text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
A simple VTK widget for wxPython. Note that wxPython comes
with its own wxVTKRenderWindow in wxPython.lib.vtk. Try both
and see which one works better for you.
Find wxPython info at http://wxPython.org
Created by David Gobbi, December 2001
Based on vtkTkRenderWindget.py
"""
"""
Please see the example at the end of this file.
----------------------------------------
Creation:
wxVTKRenderWindow(parent, ID, stereo=0, [wx keywords]):
You should create a wxPySimpleApp() or some other wx**App
before creating the window.
----------------------------------------
Methods:
Render()
AddRenderer(ren)
GetRenderers()
GetRenderWindow()
----------------------------------------
Methods to override (all take a wxEvent):
OnButtonDown(event) default: propagate event to Left, Right, Middle
OnLeftDown(event) default: set _Mode to 'Rotate'
OnRightDown(event) default: set _Mode to 'Zoom'
OnMiddleDown(event) default: set _Mode to 'Pan'
OnButtonUp(event) default: propagate event to L, R, M and unset _Mode
OnLeftUp(event)
OnRightUp(event)
OnMiddleUp(event)
OnMotion(event) default: call appropriate handler for _Mode
OnEnterWindow(event) default: set focus to this window
OnLeaveWindow(event) default: release focus
OnKeyDown(event) default: [R]eset, [W]irefreme, [S]olid, [P]ick
OnKeyUp(event)
OnChar(event)
OnSetFocus(event)
OnKillFocus(event)
OnSize(event)
OnMove(event)
OnPaint(event) default: Render()
----------------------------------------
Protected Members:
_Mode: Current mode: 'Rotate', 'Zoom', 'Pan'
_LastX, _LastY: The (x,y) coordinates of the previous event
_CurrentRenderer: The renderer that was most recently clicked in
_CurrentCamera: The camera for the current renderer
----------------------------------------
Private Members:
__Handle: Handle to the window containing the vtkRenderWindow
"""
# import usual libraries
import math, os, sys
from wxPython.wx import *
import vtk
# a few configuration items, see what works best on your system
# Use wxGLCanvas as base class instead of wxWindow.
# This is sometimes necessary under wxGTK or the image is blank.
# (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars)
try:
WX_USE_GL_CANVAS
except NameError:
if wxPlatform == '__WXMSW__':
WX_USE_GLCANVAS = 0
else:
WX_USE_GLCANVAS = 1
# Keep capturing mouse after mouse is dragged out of window
# (in wxGTK 2.3.2 there is a bug that keeps this from working,
# but it is only relevant in wxGTK if there are multiple windows)
try:
WX_USE_X_CAPTURE
except NameError:
if wxPlatform == '__WXMSW__':
WX_USE_X_CAPTURE = 1
else:
WX_USE_X_CAPTURE = 0
# end of configuration items
if WX_USE_GLCANVAS:
from wxPython.glcanvas import *
baseClass = wxGLCanvas
else:
baseClass = wxWindow
class wxVTKRenderWindow(baseClass):
"""
A wxRenderWindow for wxPython.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
def __init__(self, parent, ID, *args, **kw):
# miscellaneous protected variables
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedActor = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1,0,0)
self._PrePickedProperty = None
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
# the current interaction mode (Rotate, Pan, Zoom, etc)
self._Mode = None
self._ActiveButton = None
# private attributes
self.__OldFocus = None
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
# First do special handling of some keywords:
# stereo, position, size, width, height, style
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
position = wxDefaultPosition
if kw.has_key('position'):
position = kw['position']
del kw['position']
try:
size = parent.GetSize()
except AttributeError:
size = wxDefaultSize
if kw.has_key('size'):
size = kw['size']
del kw['size']
if kw.has_key('width') and kw.has_key('height'):
size = (kw['width'], kw['height'])
del kw['width']
del kw['height']
# wxWANTS_CHARS says to give us e.g. TAB
# wxNO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK
style = wxWANTS_CHARS | wxNO_FULL_REPAINT_ON_RESIZE
if kw.has_key('style'):
style = style | kw['style']
del kw['style']
# the enclosing frame must be shown under GTK or the windows
# don't connect together properly
l = []
p = parent
while p: # make a list of all parents
l.append(p)
p = p.GetParent()
l.reverse() # sort list into descending order
for p in l:
p.Show(1)
# initialize the wxWindow
baseClass.__init__(self, parent, ID, position, size, style)
# create the RenderWindow and initialize it
self._RenderWindow = vtk.vtkRenderWindow()
try:
self._RenderWindow.SetSize(size.width, size.height)
except AttributeError:
self._RenderWindow.SetSize(size[0], size[1])
if stereo:
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
self.__handle = None
# refresh window by doing a Render
EVT_PAINT(self, self.OnPaint)
# turn off background erase to reduce flicker
EVT_ERASE_BACKGROUND(self, lambda e: None)
# Bind the events to the event converters
EVT_RIGHT_DOWN(self, self._OnButtonDown)
EVT_LEFT_DOWN(self, self._OnButtonDown)
EVT_MIDDLE_DOWN(self, self._OnButtonDown)
EVT_RIGHT_UP(self, self._OnButtonUp)
EVT_LEFT_UP(self, self._OnButtonUp)
EVT_MIDDLE_UP(self, self._OnButtonUp)
EVT_MOTION(self, self.OnMotion)
EVT_ENTER_WINDOW(self, self._OnEnterWindow)
EVT_LEAVE_WINDOW(self, self._OnLeaveWindow)
EVT_CHAR(self, self.OnChar)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
EVT_CHAR(self, self.OnKeyDown)
EVT_KEY_UP(self, self.OnKeyUp)
EVT_SIZE(self, self._OnSize)
EVT_MOVE(self, self.OnMove)
EVT_SET_FOCUS(self, self.OnSetFocus)
EVT_KILL_FOCUS(self, self.OnKillFocus)
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._StillUpdateRate
def OnPaint(self,event):
dc = wxPaintDC(self)
self.Render()
def _OnSize(self,event):
if wxPlatform != '__WXMSW__':
try:
width, height = event.GetSize()
except:
width = event.GetSize().width
height = event.GetSize().height
self._RenderWindow.SetSize(width, height)
self.OnSize(event)
self.Render()
def OnSize(self, event):
pass
def OnMove(self,event):
pass
def _OnEnterWindow(self,event):
self.UpdateRenderer(event)
self.OnEnterWindow(event)
def OnEnterWindow(self,event):
if self.__OldFocus == None:
self.__OldFocus = wxWindow_FindFocus()
self.SetFocus()
def _OnLeaveWindow(self,event):
self.OnLeaveWindow(event)
def OnLeaveWindow(self,event):
if self.__OldFocus:
self.__OldFocus.SetFocus()
self.__OldFocus = None
def OnSetFocus(self,event):
pass
def OnKillFocus(self,event):
pass
def _OnButtonDown(self,event):
# helper function for capturing mouse until button released
self._RenderWindow.SetDesiredUpdateRate(self._DesiredUpdateRate)
if event.RightDown():
button = "Right"
elif event.LeftDown():
button = "Left"
elif event.MiddleDown():
button = "Middle"
else:
button = None
# save the button and capture mouse until the button is released
if button and not self._ActiveButton:
self._ActiveButton = button
if WX_USE_X_CAPTURE:
self.CaptureMouse()
self.OnButtonDown(event)
def OnButtonDown(self,event):
if not self._Mode:
# figure out what renderer the mouse is over
self.UpdateRenderer(event)
if event.LeftDown():
self.OnLeftDown(event)
elif event.RightDown():
self.OnRightDown(event)
elif event.MiddleDown():
self.OnMiddleDown(event)
def OnLeftDown(self,event):
if not self._Mode:
if event.ControlDown():
self._Mode = "Zoom"
elif event.ShiftDown():
self._Mode = "Pan"
else:
self._Mode = "Rotate"
def OnRightDown(self,event):
if not self._Mode:
self._Mode = "Zoom"
def OnMiddleDown(self,event):
if not self._Mode:
self._Mode = "Pan"
def _OnButtonUp(self,event):
# helper function for releasing mouse capture
self._RenderWindow.SetDesiredUpdateRate(self._StillUpdateRate)
if event.RightUp():
button = "Right"
elif event.LeftUp():
button = "Left"
elif event.MiddleUp():
button = "Middle"
else:
button = None
# if the ActiveButton is realeased, then release mouse capture
if self._ActiveButton and button == self._ActiveButton:
if WX_USE_X_CAPTURE:
self.ReleaseMouse()
self._ActiveButton = None
self.OnButtonUp(event)
def OnButtonUp(self,event):
if event.LeftUp():
self.OnLeftUp(event)
elif event.RightUp():
self.OnRightUp(event)
elif event.MiddleUp():
self.OnMiddleUp(event)
# if not interacting, then do nothing more
if self._Mode:
if self._CurrentRenderer:
self.Render()
self._Mode = None
def OnLeftUp(self,event):
pass
def OnRightUp(self,event):
pass
def OnMiddleUp(self,event):
pass
def OnMotion(self,event):
if self._Mode == "Pan":
self.Pan(event)
elif self._Mode == "Rotate":
self.Rotate(event)
elif self._Mode == "Zoom":
self.Zoom(event)
def OnChar(self,event):
pass
def OnKeyDown(self,event):
if event.GetKeyCode() == ord('r'):
self.Reset(event)
if event.GetKeyCode() == ord('w'):
self.Wireframe()
if event.GetKeyCode() == ord('s'):
self.Surface()
if event.GetKeyCode() == ord('p'):
self.PickActor(event)
if event.GetKeyCode() < 256:
self.OnChar(event)
def OnKeyUp(self,event):
pass
def GetZoomFactor(self):
return self._CurrentZoom
def GetRenderWindow(self):
return self._RenderWindow
def GetPicker(self):
return self._Picker
def Render(self):
if self._CurrentLight:
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
if((not self.GetUpdateRegion().IsEmpty())or(self.__handle)):
if self.__handle and self.__handle == self.GetHandle():
self._RenderWindow.Render()
elif self.GetHandle():
# this means the user has reparented us
# let's adapt to the new situation by doing the WindowRemap
# dance
self._RenderWindow.SetNextWindowInfo(str(self.GetHandle()))
self._RenderWindow.WindowRemap()
# store the new situation
self.__handle = self.GetHandle()
self._RenderWindow.Render()
def UpdateRenderer(self,event):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
x = event.GetX()
y = event.GetY()
windowX, windowY = self._RenderWindow.GetSize()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
return self._CurrentRenderer
def Rotate(self,event):
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self,event):
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if (camera.GetParallelProjection()):
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self,event):
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self,event=None):
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self,event):
if self._CurrentRenderer:
x = event.GetX()
y = event.GetY()
renderer = self._CurrentRenderer
picker = self._Picker
windowX, windowY = self._RenderWindow.GetSize()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
actor = picker.GetActor()
if (self._PickedActor != None and
self._PrePickedProperty != None):
self._PickedActor.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (actor != None):
self._PickedActor = actor
self._PrePickedProperty = self._PickedActor.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedActor.SetProperty(self._PickedProperty)
self.Render()
#----------------------------------------------------------------------------
def wxVTKRenderWindowConeExample():
"""Like it says, just a simple example
"""
# every wx app needs an app
app = wxPySimpleApp()
# create the widget
frame = wxFrame(None, -1, "wxRenderWindow", size=wxSize(400,400))
widget = wxVTKRenderWindow(frame, -1)
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the window
frame.Show(1)
app.MainLoop()
if __name__ == "__main__":
wxVTKRenderWindowConeExample()
|
sgh/vtk
|
Wrapping/Python/vtk/wx/wxVTKRenderWindow.py
|
Python
|
bsd-3-clause
| 21,609
|
[
"VTK"
] |
773da7814b0a9ebf5a280b330e4689e83ea1bca74d3c11b681c1387176b4683f
|
from ast import parse, walk, iter_fields, dump, NodeVisitor, get_docstring
import sys
def interpret_async(is_async):
return "an async" if is_async else "a"
class PrettyReader(NodeVisitor):
def visit_list(self, xs):
if len(xs) <= 1:
return ", ".join([self.visit(i) for i in xs ])
else:
return ", ".join([self.visit(i) for i in xs[:-1]]) + f" and {self.visit(xs[-1])}"
def visit_optional_list(self, xs, format_string="{}"):
if len(xs) == 0:
return ""
else:
return format_string.format(self.visit_list(xs))
"""
mod = Module(stmt* body)
| Interactive(stmt* body)
| Expression(expr body)
"""
def visit_Module(self, node):
return self.visit_list(node.body)
def visit_Expression(self, node):
return self.visit(node.body)
"""
stmt = FunctionDef(identifier name, arguments args,
stmt* body, expr* decorator_list, expr? returns)
| AsyncFunctionDef(identifier name, arguments args,
stmt* body, expr* decorator_list, expr? returns)
| ClassDef(identifier name,
expr* bases,
keyword* keywords,
stmt* body,
expr* decorator_list)
| Return(expr? value)
| Delete(expr* targets)
| Assign(expr* targets, expr value)
| AugAssign(expr target, operator op, expr value)
-- 'simple' indicates that we annotate simple name without parens
| AnnAssign(expr target, expr annotation, expr? value, int simple)
-- use 'orelse' because else is a keyword in target languages
| For(expr target, expr iter, stmt* body, stmt* orelse)
| AsyncFor(expr target, expr iter, stmt* body, stmt* orelse)
| While(expr test, stmt* body, stmt* orelse)
| If(expr test, stmt* body, stmt* orelse)
| With(withitem* items, stmt* body)
| AsyncWith(withitem* items, stmt* body)
| Raise(expr? exc, expr? cause)
| Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody)
| Assert(expr test, expr? msg)
| Import(alias* names)
| ImportFrom(identifier? module, alias* names, int? level)
| Global(identifier* names)
| Nonlocal(identifier* names)
| Expr(expr value)
| Pass | Break | Continue
"""
def visit_FunctionDef(self, node, is_async=False):
docstring = f"\"{get_docstring(node, True)}\""
body = node.body
if docstring:
body = body[1:] # Don't mention it
summary = ""\
+ f"{interpret_async(is_async)} function called \"{node.name}\""\
+ f", taking {self.visit(node.args)}"\
+ (f", and returning a value of {self.visit(node.returns)}" if node.returns else "")\
+ (f", with the docstring of {docstring}" if docstring else "")\
+ f", with a body of {self.visit(body)}"
return summary
def visit_AsyncFunctionDef(self, node):
return visit_FunctionDef(self, node, is_async=True)
def visit_ClassDef(self, node):
summary = (
f"a class called \"{node.name}\""
f", which extends {self.visit_list(node.bases)}"
f", and defines {self.visit_list(node.body)}"
)
return summary
def visit_Return(self, node):
if node.value:
return f"a return statement returning {self.visit(node.value)}"
else:
return "a return statement"
def visit_Delete(self, node):
return f"a delete statement, deleting {self.visit_list(node.targets)}"
def visit_Assign(self, node):
return f"an L-value {self.visit_list(node.targets)} assigned {self.visit(node.value)}"
def visit_AugAssign(self, node):
return f"an L-value {self.visit(node.target)} augmented with {self.visit(node.operator)} and the value {self.visit(node.value)}"
def visit_AnnAssign(self, node):
return "TODO"
def visit_For(self, node, is_async=False):
summary = (
f"{interpret_async(is_async)} for loop"
f", using {self.visit(node.target)} as an iterator"
f", looping through {self.visit(node.iter)}"
f", with a body of {self.visit_list(node.body)}"
# TODO: orelse
)
return summary
def visit_AsyncFor(self, node):
return visit_For(self, node, is_async=True)
def visit_While(self, node):
summary = (
"a while loop"
f", using {self.visit(node.test)} as the test"
f", with a body of {self.visit_list(node.body)}"
# TODO: orelse
)
return summary
def visit_If(self, node):
false_branch = self.visit_list(node.orelse)
summary = "an if block"\
+ f", testing {self.visit(node.test)}"\
+ f", with a True branch of {self.visit_list(node.body)}"\
+ (f", and an False branch of {false_branch}" if len(false_branch) != 0 else "")
return summary
def visit_With(self, node, is_async=False):
summary = (
f"{interpret_async(is_async)} with block"
f", using {self.visit_list(node.withitem)}"
f", with a body of {self.visit_list(node.body)}"
# TODO: orelse
)
return summary
def visit_AsyncWith(self, node):
return visit_With(self, node, is_async=True)
def visit_Raise(self, node):
summary = ""\
+ "a raise statement"\
+ (f", raising an exception {self.visit(node.exc)}" if node.exc else "")\
+ (f", with a cause of {self.visit(node.cause)}" if node.cause else "")
return summary
def visit_Try(self, node):
summary = (
"a try block"
f", using {self.visit_list(node.handlers)} as the exception handlers"
f", with a body of {self.visit_list(node.body)}"
f",and a final body of {self.visit_list(node.finalbody)}"
# TODO: orelse
)
return summary
def visit_Assert(self, node):
return "TODO"
def visit_Import(self, node):
return "TODO"
def visit_ImportFrom(self, node):
return "TODO"
def visit_Global(self, node):
return "TODO"
def visit_Nonlocal(self, node):
return "TODO"
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Pass(self, node):
return "pass"
def visit_Break(self, node):
return "break"
def visit_Continue(self, node):
return "continue"
"""
expr = BoolOp(boolop op, expr* values)
| BinOp(expr left, operator op, expr right)
| UnaryOp(unaryop op, expr operand)
| Lambda(arguments args, expr body)
| IfExp(expr test, expr body, expr orelse)
| Dict(expr* keys, expr* values)
| Set(expr* elts)
| ListComp(expr elt, comprehension* generators)
| SetComp(expr elt, comprehension* generators)
| DictComp(expr key, expr value, comprehension* generators)
| GeneratorExp(expr elt, comprehension* generators)
-- the grammar constrains where yield expressions can occur
| Await(expr value)
| Yield(expr? value)
| YieldFrom(expr value)
-- need sequences for compare to distinguish between
-- x < 4 < 3 and (x < 4) < 3
| Compare(expr left, cmpop* ops, expr* comparators)
| Call(expr func, expr* args, keyword* keywords)
| Num(object n) -- a number as a PyObject.
| Str(string s) -- need to specify raw, unicode, etc?
| FormattedValue(expr value, int? conversion, expr? format_spec)
| JoinedStr(expr* values)
| Bytes(bytes s)
| NameConstant(singleton value)
| Ellipsis
| Constant(constant value)
-- the following expression can appear in assignment context
| Attribute(expr value, identifier attr, expr_context ctx)
| Subscript(expr value, slice slice, expr_context ctx)
| Starred(expr value, expr_context ctx)
| Name(identifier id, expr_context ctx)
| List(expr* elts, expr_context ctx)
| Tuple(expr* elts, expr_context ctx)
"""
def visit_BinOp(self, node):
return f"{self.visit(node.left)} {self.visit(node.op)} {self.visit(node.right)}"
def visit_UnaryOp(self, node):
return f"{self.visit(node.op)} {self.visit(node.operand)}"
def visit_Lambda(self, node):
summary = (
f"an anonymous function taking {self.visit(node.args)}"
f", and returning {self.visit(node.body)}"
)
return summary
def visit_IfExpr(self, node):
return f"if {self.visit(node.test)} then {self.visit(node.body)} else {self.visit(node.orelse)}"
def visit_Dict(self, node):
return f"a dict of keys {self.visit_list(node.keys)}, and values {self.visit_list(node.values)}"
def visit_Set(self, node):
return f"a set of keys {self.visit_list(node.elts)}"
def visit_ListComp(self, node):
summary = (
f"a list comprehension of {self.visit(node.elt)}"
f", from {self.visit_list(node.generators)}"
)
return summary
def visit_SetComp(self, node):
summary = (
f"a set comprehension of {self.visit(node.elt)}"
f", from {self.visit_list(node.generators)}"
)
return summary
def visit_DictComp(self, node):
summary = (
f"a dict comprehension of the {self.visit(node.key)} {self.visit(node.value)} key-value pair"
f", from {self.visit_list(node.generators)}"
)
return summary
def visit_GeneratorExp(self, node):
summary = (
f"a generator expression of {self.visit(node.elt)}"
f", from {self.visit_list(node.generators)}"
)
return summary
def visit_Await(self, node):
return f"await {self.visit(node.value)}"
def visit_Yield(self, node):
return f"yield {self.visit(node.value) if node.value else ''}"
def visit_YieldFrom(self, node):
return f"yield from {self.visit(node.value)}"
def visit_Compare(self, node):
left = self.visit(node.left)
ops = map(self.visit, node.ops)
comp = map(self.visit, node.comparators)
return left + ' ' + ' '.join([f'{op} {val}' for op, val in zip(ops, comp)])
def visit_Call(self, node):
# XXX: Hack - forcing call to visit_arguments
return f"{self.visit(node.func)} called with {self.visit_arguments(node)}"
# TODO: Optional keywords (node.keywords)
def visit_Num(self, node):
return str(node.n)
def visit_Str(self, node):
return f"\"{node.s}\""
def visit_FormattedValue(self, node):
return "TODO"
def visit_JoinedStr(self, node):
return "TODO"
def visit_Bytes(self, node):
return "TODO"
def visit_NameConstant(self, node):
return str(node.value)
def visit_Ellipsis(self, node):
return "ellipsis"
def visit_Constant(self, node):
return self.visit(node.value)
def visit_Attribute(self, node):
return f"{self.visit(node.value)} \"dot\" {node.attr}"
def visit_Subscript(self, node):
return f"the slice {self.visit(node.slice)} of {self.visit(node.value)}"
def visit_Starred(self, node):
return f"splat {self.visit(node.value)}"
def visit_Name(self, node):
return f"\"{node.id}\""
def visit_List(self, node):
if len(node.elts) == 0:
return "an empty list"
else:
return f"a list of {self.visit_list(node.elts)}"
def visit_Tuple(self, node):
return f"a tuple of {self.visit_list(node.elts)}"
"""
slice = Slice(expr? lower, expr? upper, expr? step)
| ExtSlice(slice* dims)
| Index(expr value)
"""
def visit_Slice(self, node):
summary = "A slice"\
+ (f"from {self.visit(node.lower)}" if node.lower else "")\
+ (f"to {self.visit(node.upper)}" if node.upper else "")\
+ (f"with stepping {self.visit(node.step)}" if node.step else "")
return summary
def visit_ExtSlice(self, node):
return "TODO"
def visit_Index(self, node):
return f"an index of {self.visit(node.value)}"
def visit_And(self, node):
return "and"
def visit_Or(self, node):
return "or"
def visit_Add(self, node):
return "plus"
def visit_Sub(self, node):
return "minus"
def visit_Mult(self, node):
return "times"
def visit_MatMult(self, node):
return "matrix times"
def visit_Div(self, node):
return "divided by"
def visit_Mod(self, node):
return "modulo"
def visit_Pow(self, node):
return "to the power of"
def visit_LShift(self, node):
return "left shifted by"
def visit_RShift(self, node):
return "right shifted by"
def visit_BitOr(self, node):
return "bitwise or"
def visit_BitXor(self, node):
return "bitwise exclusive or"
def visit_BitAnd(self, node):
return "and"
def visit_FloorDiv(self, node):
return "integer divided by"
def visit_Invert(self, node):
return "inverted"
def visit_Not(self, node):
return "not"
def visit_UAdd(self, node):
return "positive"
def visit_USub(self, node):
return "negative"
def visit_Eq(self, node):
return "is equal to"
def visit_NotEq(self, node):
return "is not equal to"
def visit_Lt(self, node):
return "is less than"
def visit_LtE(self, node):
return "is less than or equal to"
def visit_Gt(self, node):
return "is greater than"
def visit_GtE(self, node):
return "is greater than or equal to"
def visit_Is(self, node):
return "is"
def visit_IsNot(self, node):
return "is not"
def visit_In(self, node):
return "in"
def visit_NotIn(self, node):
return "not in"
"""
comprehension = (expr target, expr iter, expr* ifs, int is_async)
excepthandler = ExceptHandler(expr? type, identifier? name, stmt* body)
attributes (int lineno, int col_offset)
arguments = (arg* args, arg? vararg, arg* kwonlyargs, expr* kw_defaults,
arg? kwarg, expr* defaults)
arg = (identifier arg, expr? annotation)
attributes (int lineno, int col_offset)
-- keyword arguments supplied to call (NULL identifier for **kwargs)
keyword = (identifier? arg, expr value)
-- import name with optional 'as' alias.
alias = (identifier name, identifier? asname)
withitem = (expr context_expr, expr? optional_vars)
"""
def visit_comprehension(self, node):
guards = self.visit_list(node.ifs)
summary = ""\
+ f"{'an async' if node.is_async else 'a'}"\
+ f" generator using {self.visit(node.target)} as an iterator"\
+ f", looping through {self.visit(node.iter)}"\
+ (f", guarded by {guards}" if len(guards) != 0 else "")
return summary
def visit_excepthandler(self, node):
return "TODO"
def visit_arguments(self, node):
grammar_suffix = "s" if len(node.args) == 0 else\
": " if len(node.args) == 1 else\
"s: "
return f"{len(node.args)} argument{grammar_suffix}{self.visit_list(node.args)}"
def visit_arg(self, node):
return f"\"{node.arg}\"" + (f" of type {self.visit(node.annotation)}" if node.annotation else "")
def visit_excepthandler(self, node):
return "TODO"
def visit_keyword(self, node):
return "TODO"
def visit_alias(self, node):
return "TODO"
def visit_withitem(self, node):
return "TODO"
if __name__ == "__main__":
raw = """
class SomeClass(object):
def add(x: int, y: int) -> int:
x = [ i + 1 for i in range(1, 10) if i % 2 == 0 ]
("hello", True)
if True:
return False
return x + y
"""
if len(sys.argv) < 2:
print("Need filename")
code = raw
else:
file_name = sys.argv[1]
with open(file_name, 'r') as f:
code = f.read()
t = parse(code)
print(dump(t))
print(PrettyReader().visit(t))
|
MaxwellBo/neoreader
|
rplugin/python3/neoreader/py_ast.py
|
Python
|
gpl-3.0
| 16,655
|
[
"VisIt"
] |
875094e945d453c62a9c2fb1ff9f06f90aa8887abf650c285e05b5cb1018eb7d
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2014 by the mediaTUM authors
:license: GPL3, see COPYING for details
Web API node iteration functions.
"""
import logging
from itertools import islice, tee
from utils.compat import iteritems
logg = logging.getLogger(__name__)
global all_node_ids_cached
all_node_ids_cached = None
# id of the collections node
COLLECTIONS_NODE_ID = 10
def all_node_id_gen(api, start_node_id=COLLECTIONS_NODE_ID):
"""Generates all node ids in the tree starting from `start_node_id`"""
top_collections = iteritems(api.children_shortlist(start_node_id))
it = None
while True:
if not it:
collection_nid, collection_name, _ = next(top_collections)
logg.info("fetching all children of top collection %s (%s)", collection_name, collection_nid)
it = api.allchildren_shortlist(collection_nid).iterids()
try:
nid = next(it)
except StopIteration:
it = None
yield nid
def visit_all_nodes(api, get_func, check_func=None, start=None, use_id_cache=False):
"""Visits all data nodes in the tree by running a function given by `get_func`.
:param api: API instance to use
:param get_func: the function that will be called for each data node id
:param check_func: function that will be called on the result of `get_func`.
This function should raise an Exception if something is wrong with the result.
:param start: number of nodes to skip at the beginning
:param use_id_cache: if True, use cached node ids from a previous visit run. This is much faster.
Can be used if your database doesn't change between runs.
"""
global all_node_ids_cached
if use_id_cache:
if all_node_ids_cached:
all_node_ids, all_node_ids_cached = tee(all_node_ids_cached)
else:
logg.info("using cached node ids")
all_node_ids, all_node_ids_cached = tee(all_node_id_gen(api))
else:
all_node_ids = all_node_id_gen(api)
if start:
all_node_ids = islice(all_node_ids, start)
visited_nids = set()
exceptions = {}
for nid in all_node_ids:
if nid not in visited_nids:
try:
node = get_func(nid)
if check_func:
check_func(node)
except Exception as e:
logg.warn("node %s failed", nid)
exceptions[nid] = e
visited_nids.add(nid)
visited_count = len(visited_nids)
if visited_count % 1000 == 0:
logg.info("%s nodes visited, %s exceptions", visited_count, len(exceptions))
return visited_nids, exceptions
|
mediatum/mediatum
|
web/services/test/nodeiteration.py
|
Python
|
gpl-3.0
| 2,712
|
[
"VisIt"
] |
5621c8f1b435e6d1368bfc036bcf1320a13ac45152a7a417845372fbada04545
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
.. note:: Experimental
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
HasProbabilityCol, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. note:: For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001,
... maxIter=10, seed=10)
>>> model = gm.fit(df)
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
8.14636...
>>> weights = model.weights
>>> len(weights)
3
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[4].prediction == rows[5].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
self._setDefault(k=2, tol=0.01, maxIter=100)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureSummary(ClusteringSummary):
"""
.. note:: Experimental
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Summary of KMeans.
.. versionadded:: 2.1.0
"""
pass
class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Return the K-means cost (sum of squared distances of points to their nearest center)
for this model on the given data.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class KMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol, HasMaxIter,
HasTol, HasSeed, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean")
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
"""
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model instance.
"""
return self._call_java("hasSummary")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(self._call_java("summary"))
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@inherit_doc
class BisectingKMeans(JavaEstimator, HasDistanceMeasure, HasFeaturesCol, HasPredictionCol,
HasMaxIter, HasSeed, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> model = bkm.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.000...
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
__init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean"):
"""
setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean")
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure` or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
.. note:: Experimental
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
pass
@inherit_doc
class LDAModel(JavaModel):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
the Expectation-Maximization ("em") `optimizer`, then this method could involve
collecting a large amount of data to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes:
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
@since("2.0.0")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. note:: Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
:return List of checkpoint files from training
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> model = lda.fit(df)
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
.. versionadded:: 2.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
@keyword_only
def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currenlty only support 'em' and 'online'.
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class PowerIterationClustering(HasMaxIter, HasWeightCol, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
<a href=http://www.icml2010.org/papers/387.pdf>Lin and Cohen</a>. From the abstract:
PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. seealso:: `Wikipedia on Spectral clustering \
<http://en.wikipedia.org/wiki/Spectral_clustering>`_
>>> data = [(1, 0, 0.5), \
(2, 0, 0.5), (2, 1, 0.7), \
(3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9), \
(4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1), \
(5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight")
>>> pic = PowerIterationClustering(k=2, maxIter=40, weightCol="weight")
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |1 |
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |0 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
.. versionadded:: 2.4.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
:param dataset:
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
:return:
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
.. versionadded:: 2.4.0
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import pyspark.ml.clustering
from pyspark.sql import SparkSession
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
debugger87/spark
|
python/pyspark/ml/clustering.py
|
Python
|
apache-2.0
| 49,218
|
[
"Gaussian"
] |
1ab5671be7f667aac0344857778befe50735b8273d44a2da909206fb0b11a438
|
# sybase/base.py
# Copyright (C) 2010-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect within SQLAlchemy **is not currently supported**. The
dialect is not tested within continuous integration and is likely to have
many issues and caveats not currently handled.
"""
import re
from sqlalchemy import exc
from sqlalchemy import schema as sa_schema
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import text
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BINARY
from sqlalchemy.types import CHAR
from sqlalchemy.types import DATE
from sqlalchemy.types import DATETIME
from sqlalchemy.types import DECIMAL
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INT # noqa
from sqlalchemy.types import INTEGER
from sqlalchemy.types import NCHAR
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import NVARCHAR
from sqlalchemy.types import REAL
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
from sqlalchemy.types import Unicode
from sqlalchemy.types import VARBINARY
from sqlalchemy.types import VARCHAR
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"backup",
"begin",
"between",
"bigint",
"binary",
"bit",
"bottom",
"break",
"by",
"call",
"capability",
"cascade",
"case",
"cast",
"char",
"char_convert",
"character",
"check",
"checkpoint",
"close",
"comment",
"commit",
"connect",
"constraint",
"contains",
"continue",
"convert",
"create",
"cross",
"cube",
"current",
"current_timestamp",
"current_user",
"cursor",
"date",
"dbspace",
"deallocate",
"dec",
"decimal",
"declare",
"default",
"delete",
"deleting",
"desc",
"distinct",
"do",
"double",
"drop",
"dynamic",
"else",
"elseif",
"encrypted",
"end",
"endif",
"escape",
"except",
"exception",
"exec",
"execute",
"existing",
"exists",
"externlogin",
"fetch",
"first",
"float",
"for",
"force",
"foreign",
"forward",
"from",
"full",
"goto",
"grant",
"group",
"having",
"holdlock",
"identified",
"if",
"in",
"index",
"index_lparen",
"inner",
"inout",
"insensitive",
"insert",
"inserting",
"install",
"instead",
"int",
"integer",
"integrated",
"intersect",
"into",
"iq",
"is",
"isolation",
"join",
"key",
"lateral",
"left",
"like",
"lock",
"login",
"long",
"match",
"membership",
"message",
"mode",
"modify",
"natural",
"new",
"no",
"noholdlock",
"not",
"notify",
"null",
"numeric",
"of",
"off",
"on",
"open",
"option",
"options",
"or",
"order",
"others",
"out",
"outer",
"over",
"passthrough",
"precision",
"prepare",
"primary",
"print",
"privileges",
"proc",
"procedure",
"publication",
"raiserror",
"readtext",
"real",
"reference",
"references",
"release",
"remote",
"remove",
"rename",
"reorganize",
"resource",
"restore",
"restrict",
"return",
"revoke",
"right",
"rollback",
"rollup",
"save",
"savepoint",
"scroll",
"select",
"sensitive",
"session",
"set",
"setuser",
"share",
"smallint",
"some",
"sqlcode",
"sqlstate",
"start",
"stop",
"subtrans",
"subtransaction",
"synchronize",
"syntax_error",
"table",
"temporary",
"then",
"time",
"timestamp",
"tinyint",
"to",
"top",
"tran",
"trigger",
"truncate",
"tsequal",
"unbounded",
"union",
"unique",
"unknown",
"unsigned",
"update",
"updating",
"user",
"using",
"validate",
"values",
"varbinary",
"varchar",
"variable",
"varying",
"view",
"wait",
"waitfor",
"when",
"where",
"while",
"window",
"with",
"with_cube",
"with_lparen",
"with_rollup",
"within",
"work",
"writetext",
]
)
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNICHAR"
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = "UNIVARCHAR"
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = "UNITEXT"
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
"bigint": BIGINT,
"int": INTEGER,
"integer": INTEGER,
"smallint": SMALLINT,
"tinyint": TINYINT,
"unsigned bigint": BIGINT, # TODO: unsigned flags
"unsigned int": INTEGER, # TODO: unsigned flags
"unsigned smallint": SMALLINT, # TODO: unsigned flags
"numeric": NUMERIC,
"decimal": DECIMAL,
"dec": DECIMAL,
"float": FLOAT,
"double": NUMERIC, # TODO
"double precision": NUMERIC, # TODO
"real": REAL,
"smallmoney": SMALLMONEY,
"money": MONEY,
"smalldatetime": DATETIME,
"datetime": DATETIME,
"date": DATE,
"time": TIME,
"char": CHAR,
"character": CHAR,
"varchar": VARCHAR,
"character varying": VARCHAR,
"char varying": VARCHAR,
"unichar": UNICHAR,
"unicode character": UNIVARCHAR,
"nchar": NCHAR,
"national char": NCHAR,
"national character": NCHAR,
"nvarchar": NVARCHAR,
"nchar varying": NVARCHAR,
"national char varying": NVARCHAR,
"national character varying": NVARCHAR,
"text": TEXT,
"unitext": UNITEXT,
"binary": BINARY,
"varbinary": VARBINARY,
"image": IMAGE,
"bit": BIT,
# not in documentation for ASE 15.7
"long varchar": TEXT, # TODO
"timestamp": TIMESTAMP,
"uniqueidentifier": UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(
self.bind, table_name, schema, info_cache=self.info_cache
)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = (
seq_column.key in self.compiled_parameters[0]
)
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON"
% self.dialect.identifier_preparer.format_table(tbl)
)
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time."
)
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')"
)
self.set_ddl_autocommit(
self.root_connection.connection.connection, True
)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF"
% self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{"doy": "dayofyear", "dow": "weekday", "milliseconds": "millisecond"},
)
def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
raise NotImplementedError("Sybase ASE does not support OFFSET")
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
kw["literal_binds"] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to Sybase."""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL"
)
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = (
isinstance(column.default, sa_schema.Sequence)
and column.default
)
if sequence:
start, increment = sequence.start or 1, sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element, include_schema=False),
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = "sybase"
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name").columns(username=Unicode)
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if (
self.server_version_info is not None
and self.server_version_info < (15,)
):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text(
"""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
if isinstance(table_name, unicode): # noqa
table_name = table_name.encode("ascii")
result = connection.execute(
TABLEID_SQL, schema_name=schema, table_name=table_name
)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
COLUMN_SQL = text(
"""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (
name,
type_,
nullable,
autoincrement,
default_,
precision,
scale,
length,
) in results:
col_info = self._get_column_info(
name,
type_,
bool(nullable),
bool(autoincrement),
default_,
precision,
scale,
length,
)
columns.append(col_info)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
autoincrement,
default,
precision,
scale,
length,
):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn(
"Did not recognize type '%s' of column '%s'" % (type_, name)
)
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(
name=name,
type=coltype,
nullable=nullable,
default=default,
autoincrement=autoincrement,
)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text(
"""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
"""
)
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text(
"""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
"""
)
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id
).fetchall()
REFTABLE_SQL = text(
"""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
"""
)
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (
schema is not None
or reftable["schema"] != self.default_schema_name
):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"],
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
INDEX_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {
"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names,
}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
PK_SQL = text(
"""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
"""
)
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {
"constrained_columns": constrained_columns,
"name": pks["name"],
}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text(
"""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(view_name, unicode): # noqa
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text(
"""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
"""
)
if util.py2k:
if isinstance(schema, unicode): # noqa
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
|
cloudera/hue
|
desktop/core/ext-py/SQLAlchemy-1.3.17/lib/sqlalchemy/dialects/sybase/base.py
|
Python
|
apache-2.0
| 31,953
|
[
"ASE"
] |
3751b9b5428eb418de39779172264fea2ba2d4cf02616156f4a72103ea838bfb
|
"""GraphQL Language
The :mod:`graphql.language` package is responsible for parsing and operating on the
GraphQL language.
"""
from .source import Source
from .location import get_location, SourceLocation, FormattedSourceLocation
from .print_location import print_location, print_source_location
from .token_kind import TokenKind
from .lexer import Lexer
from .parser import parse, parse_type, parse_value, parse_const_value
from .printer import print_ast
from .visitor import (
visit,
Visitor,
ParallelVisitor,
VisitorAction,
VisitorKeyMap,
BREAK,
SKIP,
REMOVE,
IDLE,
)
from .ast import (
Location,
Token,
Node,
# Each kind of AST node
NameNode,
DocumentNode,
DefinitionNode,
ExecutableDefinitionNode,
OperationDefinitionNode,
OperationType,
VariableDefinitionNode,
VariableNode,
SelectionSetNode,
SelectionNode,
FieldNode,
ArgumentNode,
ConstArgumentNode,
FragmentSpreadNode,
InlineFragmentNode,
FragmentDefinitionNode,
ValueNode,
ConstValueNode,
IntValueNode,
FloatValueNode,
StringValueNode,
BooleanValueNode,
NullValueNode,
EnumValueNode,
ListValueNode,
ConstListValueNode,
ObjectValueNode,
ConstObjectValueNode,
ObjectFieldNode,
ConstObjectFieldNode,
DirectiveNode,
ConstDirectiveNode,
TypeNode,
NamedTypeNode,
ListTypeNode,
NonNullTypeNode,
TypeSystemDefinitionNode,
SchemaDefinitionNode,
OperationTypeDefinitionNode,
TypeDefinitionNode,
ScalarTypeDefinitionNode,
ObjectTypeDefinitionNode,
FieldDefinitionNode,
InputValueDefinitionNode,
InterfaceTypeDefinitionNode,
UnionTypeDefinitionNode,
EnumTypeDefinitionNode,
EnumValueDefinitionNode,
InputObjectTypeDefinitionNode,
DirectiveDefinitionNode,
TypeSystemExtensionNode,
SchemaExtensionNode,
TypeExtensionNode,
ScalarTypeExtensionNode,
ObjectTypeExtensionNode,
InterfaceTypeExtensionNode,
UnionTypeExtensionNode,
EnumTypeExtensionNode,
InputObjectTypeExtensionNode,
)
from .predicates import (
is_definition_node,
is_executable_definition_node,
is_selection_node,
is_value_node,
is_const_value_node,
is_type_node,
is_type_system_definition_node,
is_type_definition_node,
is_type_system_extension_node,
is_type_extension_node,
)
from .directive_locations import DirectiveLocation
__all__ = [
"get_location",
"SourceLocation",
"FormattedSourceLocation",
"print_location",
"print_source_location",
"TokenKind",
"Lexer",
"parse",
"parse_value",
"parse_const_value",
"parse_type",
"print_ast",
"Source",
"visit",
"Visitor",
"ParallelVisitor",
"VisitorAction",
"VisitorKeyMap",
"BREAK",
"SKIP",
"REMOVE",
"IDLE",
"Location",
"Token",
"DirectiveLocation",
"Node",
"NameNode",
"DocumentNode",
"DefinitionNode",
"ExecutableDefinitionNode",
"OperationDefinitionNode",
"OperationType",
"VariableDefinitionNode",
"VariableNode",
"SelectionSetNode",
"SelectionNode",
"FieldNode",
"ArgumentNode",
"ConstArgumentNode",
"FragmentSpreadNode",
"InlineFragmentNode",
"FragmentDefinitionNode",
"ValueNode",
"ConstValueNode",
"IntValueNode",
"FloatValueNode",
"StringValueNode",
"BooleanValueNode",
"NullValueNode",
"EnumValueNode",
"ListValueNode",
"ConstListValueNode",
"ObjectValueNode",
"ConstObjectValueNode",
"ObjectFieldNode",
"ConstObjectFieldNode",
"DirectiveNode",
"ConstDirectiveNode",
"TypeNode",
"NamedTypeNode",
"ListTypeNode",
"NonNullTypeNode",
"TypeSystemDefinitionNode",
"SchemaDefinitionNode",
"OperationTypeDefinitionNode",
"TypeDefinitionNode",
"ScalarTypeDefinitionNode",
"ObjectTypeDefinitionNode",
"FieldDefinitionNode",
"InputValueDefinitionNode",
"InterfaceTypeDefinitionNode",
"UnionTypeDefinitionNode",
"EnumTypeDefinitionNode",
"EnumValueDefinitionNode",
"InputObjectTypeDefinitionNode",
"DirectiveDefinitionNode",
"TypeSystemExtensionNode",
"SchemaExtensionNode",
"TypeExtensionNode",
"ScalarTypeExtensionNode",
"ObjectTypeExtensionNode",
"InterfaceTypeExtensionNode",
"UnionTypeExtensionNode",
"EnumTypeExtensionNode",
"InputObjectTypeExtensionNode",
"is_definition_node",
"is_executable_definition_node",
"is_selection_node",
"is_value_node",
"is_const_value_node",
"is_type_node",
"is_type_system_definition_node",
"is_type_definition_node",
"is_type_system_extension_node",
"is_type_extension_node",
]
|
graphql-python/graphql-core
|
src/graphql/language/__init__.py
|
Python
|
mit
| 4,790
|
[
"VisIt"
] |
f6da8edec57b32e433ec76eab68641c5c247af7a2fe44038ee919741b2eec862
|
#
# co_co_user_defined_function_correctly_defined.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.cocos.co_co import CoCo
from pynestml.meta_model.ast_compound_stmt import ASTCompoundStmt
from pynestml.meta_model.ast_neuron import ASTNeuron
from pynestml.meta_model.ast_small_stmt import ASTSmallStmt
from pynestml.meta_model.ast_stmt import ASTStmt
from pynestml.symbols.error_type_symbol import ErrorTypeSymbol
from pynestml.symbols.predefined_types import PredefinedTypes
from pynestml.symbols.symbol import SymbolKind
from pynestml.utils.ast_helper import ASTHelper
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.utils.type_caster import TypeCaster
class CoCoUserDefinedFunctionCorrectlyDefined(CoCo):
"""
This coco ensures that all user defined functions, which are defined with a type, have a return statement
and the type of the return statement is consistent with the declaration.
Allowed:
function foo(...) bool:
return True
end
Not allowed:
function foo(...) bool:
return
end
Attributes:
processed_function (ast_function): A reference to the currently processed function.
"""
name = 'user defined functions correct'
description = 'TODO'
def __init__(self):
self.processed_function = None
self.neuron_name = None
def check_co_co(self, neuron):
"""
Checks the coco for the handed over neuron.
:param neuron: a single neuron instance.
:type neuron: ASTNeuron
"""
assert (neuron is not None and isinstance(neuron, ASTNeuron)), \
'(PyNestML.CoCo.FunctionCallsConsistent) No or wrong type of neuron provided (%s)!' % type(neuron)
self.neuron_name = neuron.get_name()
for userDefinedFunction in ASTHelper.get_functions_from_neuron(neuron):
self.processed_function = userDefinedFunction
symbol = userDefinedFunction.get_scope().resolve_to_symbol(userDefinedFunction.get_name(),
SymbolKind.FUNCTION)
# first ensure that the block contains at least one statement
if symbol is not None and len(userDefinedFunction.get_block().get_stmts()) > 0:
# now check that the last statement is a return
self.__check_return_recursively(symbol.get_return_type(),
userDefinedFunction.get_block().get_stmts(), False)
# now if it does not have a statement, but uses a return type, it is an error
elif symbol is not None and userDefinedFunction.has_return_type() and \
not symbol.get_return_type().equals(PredefinedTypes.get_void_type()):
code, message = Messages.get_no_return()
Logger.log_message(neuron=neuron, code=code, message=message,
error_position=userDefinedFunction.get_source_position(),
log_level=LoggingLevel.ERROR)
return
def __check_return_recursively(self, type_symbol = None, stmts = None, ret_defined = False):
"""
For a handed over statement, it checks if the statement is a return statement and if it is typed according
to the handed over type symbol.
:param type_symbol: a single type symbol
:type type_symbol: TypeSymbol
:param stmts: a list of statements, either simple or compound
:type stmts: list(ASTSmallStmt,ASTCompoundStmt)
:param ret_defined: indicates whether a ret has already beef defined after this block of stmt, thus is not
necessary. Implies that the return has been defined in the higher level block
:type ret_defined: bool
"""
# in order to ensure that in the sub-blocks, a return is not necessary, we check if the last one in this
# block is a return statement, thus it is not required to have a return in the sub-blocks, but optional
last_statement = stmts[len(stmts) - 1]
ret_defined = False or ret_defined
if (len(stmts) > 0 and isinstance(last_statement, ASTStmt)
and last_statement.is_small_stmt()
and last_statement.small_stmt.is_return_stmt()):
ret_defined = True
# now check that returns are there if necessary and correctly typed
for c_stmt in stmts:
if c_stmt.is_small_stmt():
stmt = c_stmt.small_stmt
else:
stmt = c_stmt.compound_stmt
# if it is a small statement, check if it is a return statement
if isinstance(stmt, ASTSmallStmt) and stmt.is_return_stmt():
# first check if the return is the last one in this block of statements
if stmts.index(c_stmt) != (len(stmts) - 1):
code, message = Messages.get_not_last_statement('Return')
Logger.log_message(error_position=stmt.get_source_position(),
code=code, message=message,
log_level=LoggingLevel.WARNING)
# now check that it corresponds to the declared type
if stmt.get_return_stmt().has_expression() and type_symbol is PredefinedTypes.get_void_type():
code, message = Messages.get_type_different_from_expected(PredefinedTypes.get_void_type(),
stmt.get_return_stmt().get_expression().type)
Logger.log_message(error_position=stmt.get_source_position(),
message=message, code=code, log_level=LoggingLevel.ERROR)
# if it is not void check if the type corresponds to the one stated
if not stmt.get_return_stmt().has_expression() and \
not type_symbol.equals(PredefinedTypes.get_void_type()):
code, message = Messages.get_type_different_from_expected(PredefinedTypes.get_void_type(),
type_symbol)
Logger.log_message(error_position=stmt.get_source_position(),
message=message, code=code, log_level=LoggingLevel.ERROR)
if stmt.get_return_stmt().has_expression():
type_of_return = stmt.get_return_stmt().get_expression().type
if isinstance(type_of_return, ErrorTypeSymbol):
code, message = Messages.get_type_could_not_be_derived(self.processed_function.get_name())
Logger.log_message(error_position=stmt.get_source_position(),
code=code, message=message, log_level=LoggingLevel.ERROR)
elif not type_of_return.equals(type_symbol):
TypeCaster.try_to_recover_or_error(type_symbol, type_of_return,
stmt.get_return_stmt().get_expression())
elif isinstance(stmt, ASTCompoundStmt):
# otherwise it is a compound stmt, thus check recursively
if stmt.is_if_stmt():
self.__check_return_recursively(type_symbol,
stmt.get_if_stmt().get_if_clause().get_block().get_stmts(),
ret_defined)
for else_ifs in stmt.get_if_stmt().get_elif_clauses():
self.__check_return_recursively(type_symbol, else_ifs.get_block().get_stmt(), ret_defined)
if stmt.get_if_stmt().has_else_clause():
self.__check_return_recursively(type_symbol,
stmt.get_if_stmt().get_else_clause().get_block().get_stmts(),
ret_defined)
elif stmt.is_while_stmt():
self.__check_return_recursively(type_symbol, stmt.get_while_stmt().get_block().get_stmts(),
ret_defined)
elif stmt.is_for_stmt():
self.__check_return_recursively(type_symbol, stmt.get_for_stmt().get_block().get_stmts(),
ret_defined)
# now, if a return statement has not been defined in the corresponding higher level block, we have
# to ensure that it is defined here
elif not ret_defined and stmts.index(c_stmt) == (len(stmts) - 1):
if not (isinstance(stmt, ASTSmallStmt) and stmt.is_return_stmt()):
code, message = Messages.get_no_return()
Logger.log_message(error_position=stmt.get_source_position(), log_level=LoggingLevel.ERROR,
code=code, message=message)
return
|
kperun/nestml
|
pynestml/cocos/co_co_user_defined_function_correctly_defined.py
|
Python
|
gpl-2.0
| 9,804
|
[
"NEURON"
] |
4dd5756c2edd117af717fa5709e294459cb16ead77cd61d7b0b516331c2dee55
|
# encoding: utf-8
"""
Job and task components for writing .xml files that the Windows HPC Server
2008 can use to start jobs.
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import uuid
from xml.etree import ElementTree as ET
from traitlets.config.configurable import Configurable
from ipython_genutils.py3compat import iteritems
from traitlets import (
Unicode, Integer, List, Instance,
Enum, Bool
)
#-----------------------------------------------------------------------------
# Job and Task classes
#-----------------------------------------------------------------------------
def as_str(value):
if isinstance(value, str):
return value
elif isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
elif isinstance(value, (int, float)):
return repr(value)
else:
return value
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def find_username():
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME','')
if domain is None:
return username
else:
return '%s\\%s' % (domain, username)
class WinHPCJob(Configurable):
job_id = Unicode('')
job_name = Unicode('MyJob', config=True)
min_cores = Integer(1, config=True)
max_cores = Integer(1, config=True)
min_sockets = Integer(1, config=True)
max_sockets = Integer(1, config=True)
min_nodes = Integer(1, config=True)
max_nodes = Integer(1, config=True)
unit_type = Unicode("Core", config=True)
auto_calculate_min = Bool(True, config=True)
auto_calculate_max = Bool(True, config=True)
run_until_canceled = Bool(False, config=True)
is_exclusive = Bool(False, config=True)
username = Unicode(find_username(), config=True)
job_type = Unicode('Batch', config=True)
priority = Enum(('Lowest','BelowNormal','Normal','AboveNormal','Highest'),
default_value='Highest', config=True)
requested_nodes = Unicode('', config=True)
project = Unicode('IPython', config=True)
xmlns = Unicode('http://schemas.microsoft.com/HPCS2008/scheduler/')
version = Unicode("2.000")
tasks = List([])
@property
def owner(self):
return self.username
def _write_attr(self, root, attr, key):
s = as_str(getattr(self, attr, ''))
if s:
root.set(key, s)
def as_element(self):
# We have to add _A_ type things to get the right order than
# the MSFT XML parser expects.
root = ET.Element('Job')
self._write_attr(root, 'version', '_A_Version')
self._write_attr(root, 'job_name', '_B_Name')
self._write_attr(root, 'unit_type', '_C_UnitType')
self._write_attr(root, 'min_cores', '_D_MinCores')
self._write_attr(root, 'max_cores', '_E_MaxCores')
self._write_attr(root, 'min_sockets', '_F_MinSockets')
self._write_attr(root, 'max_sockets', '_G_MaxSockets')
self._write_attr(root, 'min_nodes', '_H_MinNodes')
self._write_attr(root, 'max_nodes', '_I_MaxNodes')
self._write_attr(root, 'run_until_canceled', '_J_RunUntilCanceled')
self._write_attr(root, 'is_exclusive', '_K_IsExclusive')
self._write_attr(root, 'username', '_L_UserName')
self._write_attr(root, 'job_type', '_M_JobType')
self._write_attr(root, 'priority', '_N_Priority')
self._write_attr(root, 'requested_nodes', '_O_RequestedNodes')
self._write_attr(root, 'auto_calculate_max', '_P_AutoCalculateMax')
self._write_attr(root, 'auto_calculate_min', '_Q_AutoCalculateMin')
self._write_attr(root, 'project', '_R_Project')
self._write_attr(root, 'owner', '_S_Owner')
self._write_attr(root, 'xmlns', '_T_xmlns')
dependencies = ET.SubElement(root, "Dependencies")
etasks = ET.SubElement(root, "Tasks")
for t in self.tasks:
etasks.append(t.as_element())
return root
def tostring(self):
"""Return the string representation of the job description XML."""
root = self.as_element()
indent(root)
txt = ET.tostring(root, encoding="utf-8").decode('utf-8')
# Now remove the tokens used to order the attributes.
txt = re.sub(r'_[A-Z]_','',txt)
txt = '<?xml version="1.0" encoding="utf-8"?>\n' + txt
return txt
def write(self, filename):
"""Write the XML job description to a file."""
txt = self.tostring()
with open(filename, 'w') as f:
f.write(txt)
def add_task(self, task):
"""Add a task to the job.
Parameters
----------
task : :class:`WinHPCTask`
The task object to add.
"""
self.tasks.append(task)
class WinHPCTask(Configurable):
task_id = Unicode('')
task_name = Unicode('')
version = Unicode("2.000")
min_cores = Integer(1, config=True)
max_cores = Integer(1, config=True)
min_sockets = Integer(1, config=True)
max_sockets = Integer(1, config=True)
min_nodes = Integer(1, config=True)
max_nodes = Integer(1, config=True)
unit_type = Unicode("Core", config=True)
command_line = Unicode('', config=True)
work_directory = Unicode('', config=True)
is_rerunnaable = Bool(True, config=True)
std_out_file_path = Unicode('', config=True)
std_err_file_path = Unicode('', config=True)
is_parametric = Bool(False, config=True)
environment_variables = Instance(dict, args=(), config=True)
def _write_attr(self, root, attr, key):
s = as_str(getattr(self, attr, ''))
if s:
root.set(key, s)
def as_element(self):
root = ET.Element('Task')
self._write_attr(root, 'version', '_A_Version')
self._write_attr(root, 'task_name', '_B_Name')
self._write_attr(root, 'min_cores', '_C_MinCores')
self._write_attr(root, 'max_cores', '_D_MaxCores')
self._write_attr(root, 'min_sockets', '_E_MinSockets')
self._write_attr(root, 'max_sockets', '_F_MaxSockets')
self._write_attr(root, 'min_nodes', '_G_MinNodes')
self._write_attr(root, 'max_nodes', '_H_MaxNodes')
self._write_attr(root, 'command_line', '_I_CommandLine')
self._write_attr(root, 'work_directory', '_J_WorkDirectory')
self._write_attr(root, 'is_rerunnaable', '_K_IsRerunnable')
self._write_attr(root, 'std_out_file_path', '_L_StdOutFilePath')
self._write_attr(root, 'std_err_file_path', '_M_StdErrFilePath')
self._write_attr(root, 'is_parametric', '_N_IsParametric')
self._write_attr(root, 'unit_type', '_O_UnitType')
root.append(self.get_env_vars())
return root
def get_env_vars(self):
env_vars = ET.Element('EnvironmentVariables')
for k, v in iteritems(self.environment_variables):
variable = ET.SubElement(env_vars, "Variable")
name = ET.SubElement(variable, "Name")
name.text = k
value = ET.SubElement(variable, "Value")
value.text = v
return env_vars
# By declaring these, we can configure the controller and engine separately!
class IPControllerJob(WinHPCJob):
job_name = Unicode('IPController', config=False)
is_exclusive = Bool(False, config=True)
username = Unicode(find_username(), config=True)
priority = Enum(('Lowest','BelowNormal','Normal','AboveNormal','Highest'),
default_value='Highest', config=True)
requested_nodes = Unicode('', config=True)
project = Unicode('IPython', config=True)
class IPEngineSetJob(WinHPCJob):
job_name = Unicode('IPEngineSet', config=False)
is_exclusive = Bool(False, config=True)
username = Unicode(find_username(), config=True)
priority = Enum(('Lowest','BelowNormal','Normal','AboveNormal','Highest'),
default_value='Highest', config=True)
requested_nodes = Unicode('', config=True)
project = Unicode('IPython', config=True)
class IPControllerTask(WinHPCTask):
task_name = Unicode('IPController', config=True)
controller_cmd = List(['ipcontroller.exe'], config=True)
controller_args = List(['--log-to-file', '--log-level=40'], config=True)
# I don't want these to be configurable
std_out_file_path = Unicode('', config=False)
std_err_file_path = Unicode('', config=False)
min_cores = Integer(1, config=False)
max_cores = Integer(1, config=False)
min_sockets = Integer(1, config=False)
max_sockets = Integer(1, config=False)
min_nodes = Integer(1, config=False)
max_nodes = Integer(1, config=False)
unit_type = Unicode("Core", config=False)
work_directory = Unicode('', config=False)
def __init__(self, **kwargs):
super(IPControllerTask, self).__init__(**kwargs)
the_uuid = uuid.uuid1()
self.std_out_file_path = os.path.join('log','ipcontroller-%s.out' % the_uuid)
self.std_err_file_path = os.path.join('log','ipcontroller-%s.err' % the_uuid)
@property
def command_line(self):
return ' '.join(self.controller_cmd + self.controller_args)
class IPEngineTask(WinHPCTask):
task_name = Unicode('IPEngine', config=True)
engine_cmd = List(['ipengine.exe'], config=True)
engine_args = List(['--log-to-file', '--log-level=40'], config=True)
# I don't want these to be configurable
std_out_file_path = Unicode('', config=False)
std_err_file_path = Unicode('', config=False)
min_cores = Integer(1, config=False)
max_cores = Integer(1, config=False)
min_sockets = Integer(1, config=False)
max_sockets = Integer(1, config=False)
min_nodes = Integer(1, config=False)
max_nodes = Integer(1, config=False)
unit_type = Unicode("Core", config=False)
work_directory = Unicode('', config=False)
def __init__(self, **kwargs):
super(IPEngineTask,self).__init__(**kwargs)
the_uuid = uuid.uuid1()
self.std_out_file_path = os.path.join('log','ipengine-%s.out' % the_uuid)
self.std_err_file_path = os.path.join('log','ipengine-%s.err' % the_uuid)
@property
def command_line(self):
return ' '.join(self.engine_cmd + self.engine_args)
# j = WinHPCJob(None)
# j.job_name = 'IPCluster'
# j.username = 'GNET\\bgranger'
# j.requested_nodes = 'GREEN'
#
# t = WinHPCTask(None)
# t.task_name = 'Controller'
# t.command_line = r"\\blue\domainusers$\bgranger\Python\Python25\Scripts\ipcontroller.exe --log-to-file -p default --log-level 10"
# t.work_directory = r"\\blue\domainusers$\bgranger\.ipython\cluster_default"
# t.std_out_file_path = 'controller-out.txt'
# t.std_err_file_path = 'controller-err.txt'
# t.environment_variables['PYTHONPATH'] = r"\\blue\domainusers$\bgranger\Python\Python25\Lib\site-packages"
# j.add_task(t)
|
fzheng/codejam
|
lib/python2.7/site-packages/ipyparallel/apps/winhpcjob.py
|
Python
|
mit
| 11,737
|
[
"Brian"
] |
fb1e0af15565e01fdf56019cb34da42c26e9b44906959dfdfd6f643b63e8b78a
|
model = """# Generated by PySCeS 0.7.2 (2010-08-10 13:12)
# Keywords
Description: Birth-death model (001), variant 02
Modelname: BirthDeath02
Output_In_Conc: True
Species_In_Conc: False
# GlobalUnitDefinitions
UnitVolume: litre, 1.0, 0, 1
UnitLength: metre, 1.0, 0, 1
UnitSubstance: item, 1.0, 0, 1
UnitArea: metre, 1.0, 0, 2
UnitTime: second, 1.0, 0, 1
# Compartments
Compartment: Cell, 1.0, 3
# Reactions
Death@Cell:
X > $pool
Death_Mu*X
Birth@Cell:
$pool > X
Birth_Lambda*X
# Fixed species
# Variable species
X@Cell = 100.0
# Parameters
Death_Mu = 0.11
Birth_Lambda = 0.1
"""
xml_model = """<?xml version="1.0" encoding="UTF-8"?>
<sbml xmlns="http://www.sbml.org/sbml/level2/version4" level="2" version="4">
<model id="BirthDeath01" name="Birth-death model (001), variant 01">
<listOfUnitDefinitions>
<unitDefinition id="substance">
<listOfUnits>
<unit kind="item"/>
</listOfUnits>
</unitDefinition>
</listOfUnitDefinitions>
<listOfCompartments>
<compartment id="Cell"/>
</listOfCompartments>
<listOfSpecies>
<species id="X" compartment="Cell" initialAmount="100" hasOnlySubstanceUnits="true"/>
</listOfSpecies>
<listOfParameters>
<parameter id="Lambda" value="0.1"/>
<parameter id="Mu" value="0.11"/>
</listOfParameters>
<listOfReactions>
<reaction id="Birth" reversible="false">
<listOfReactants>
<speciesReference species="X"/>
</listOfReactants>
<listOfProducts>
<speciesReference species="X" stoichiometry="2"/>
</listOfProducts>
<kineticLaw>
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<times/>
<ci> Lambda </ci>
<ci> X </ci>
</apply>
</math>
</kineticLaw>
</reaction>
<reaction id="Death" reversible="false">
<listOfReactants>
<speciesReference species="X"/>
</listOfReactants>
<kineticLaw>
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<times/>
<ci> Mu </ci>
<ci> X </ci>
</apply>
</math>
</kineticLaw>
</reaction>
</listOfReactions>
</model>
</sbml>"""
|
SystemsBioinformatics/stochpy
|
stochpy/pscmodels/dsmts_001_01.py
|
Python
|
gpl-3.0
| 2,339
|
[
"PySCeS"
] |
ddc0c23391a71cbac4d4f287a3b73833fb99f074326f8e202f5553aa5e65afc0
|
#!/usr/bin/env python
# encoding: UTF-8
import asyncio
import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import logging
import smtplib
import sqlite3
import sys
import textwrap
from cloudhands.common.connectors import initialise
from cloudhands.common.connectors import Registry
from cloudhands.common.schema import Component
from cloudhands.common.schema import Membership
from cloudhands.common.schema import TimeInterval
from cloudhands.common.schema import Touch
class Emailer:
_shared_state = {}
TEXT = textwrap.dedent("""
Your action is required.
Please visit this link to confirm your membership:
{url}
""").strip()
HTML = textwrap.dedent("""
<html>
<head></head>
<body>
<h1>Your action is required</h1>
<p>Please visit this link to confirm your membership.</p>
<p><a href="{url}">{url}</a></p>
</body>
</html>
""").strip()
def __init__(self, q, args, config):
self.__dict__ = self._shared_state
if not hasattr(self, "task"):
self.q = q
self.args = args
self.config = config
self.task = asyncio.Task(self.notify())
@asyncio.coroutine
def notify(self):
log = logging.getLogger("cloudhands.identity.emailer")
session = Registry().connect(sqlite3, self.args.db).session
initialise(session)
actor = session.query(Component).filter(
Component.handle=="identity.controller").one()
while True:
dst, host, mship_uuid = yield from self.q.get()
path = "membership/{}".format(mship_uuid)
url = '/'.join((host, path))
src = self.config["smtp.src"]["from"]
mship = session.query(Membership).filter(
Membership.uuid == mship_uuid).first()
latest = mship.changes[-1]
now = datetime.datetime.utcnow()
end = now + datetime.timedelta(hours=24)
act = Touch(artifact=mship, actor=actor, state=latest.state, at=now)
limit = TimeInterval(end=end, touch=act)
msg = MIMEMultipart("alternative")
msg["Subject"] = self.config["smtp.src"]["subject"]
msg["From"] = src
msg["To"] = dst
text = Emailer.TEXT.format(url=url)
html = Emailer.HTML.format(url=url)
for i in (MIMEText(text, "plain"), MIMEText(html, "html")):
msg.attach(i)
s = smtplib.SMTP(self.config["smtp.mta"]["host"])
s.sendmail(src, dst, msg.as_string())
s.quit()
try:
session.add(limit)
session.commit()
except Exception as e:
log.error(e)
session.rollback()
continue
else:
log.info(
"{0.touch.artifact.uuid} {0.touch.state.name} "
"until {0.end:%Y-%m-%dT%H:%M:%S}".format(limit))
|
cedadev/cloudhands-web
|
cloudhands/identity/emailer.py
|
Python
|
bsd-3-clause
| 3,027
|
[
"VisIt"
] |
dfd8d8b59f28171ce276dc39013e1f631f28e77b68558070cc0e09610502c9c0
|
# coding: utf-8
from django.conf.urls import url, include
from rest_framework import routers
from .views import FeedViewSet, EntryViewSet
from .views import AggrRssFeed, AggrAtomFeed
from .views import EntryRedirectView
router = routers.DefaultRouter()
router.register(r'feeds', FeedViewSet)
router.register(r'entries', EntryViewSet, base_name='entry')
urlpatterns = router.urls
urlpatterns = [
url(r'', include(router.urls)),
url(r'rss/', AggrRssFeed(), name='rss'),
url(r'atom/', AggrAtomFeed(), name='atom'),
url(r'visit/(?P<id>[0-9]+)/$', EntryRedirectView.as_view(), name='visit'),
]
|
uraxy/imozzle
|
api/urls.py
|
Python
|
mit
| 608
|
[
"VisIt"
] |
b1bc3f71dc00aad720da1e33e2a773c4f57bc96d91b4ab8a4ff668e83ce6a514
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Device'
db.create_table(u'djanalytics_device', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_agent', self.gf('django.db.models.fields.TextField')(blank=True)),
('os', self.gf('django.db.models.fields.CharField')(max_length=100)),
('os_version', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('browser', self.gf('django.db.models.fields.CharField')(max_length=100)),
('browser_version', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('device', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('device_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.DeviceType'], null=True, blank=True)),
('screen_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('screen_height', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
))
db.send_create_signal('djanalytics', ['Device'])
# Adding model 'WebProperty'
db.create_table(u'djanalytics_webproperty', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=100)),
))
db.send_create_signal('djanalytics', ['WebProperty'])
# Adding model 'PageVisit'
db.create_table(u'djanalytics_pagevisit', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.Page'])),
('visit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.Visit'])),
('request_event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.RequestEvent'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('duration', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=7, decimal_places=0)),
))
db.send_create_signal('djanalytics', ['PageVisit'])
# Adding model 'Visitor'
db.create_table(u'djanalytics_visitor', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=36)),
))
db.send_create_signal('djanalytics', ['Visitor'])
# Adding M2M table for field clients on 'Visitor'
m2m_table_name = db.shorten_name(u'djanalytics_visitor_clients')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('visitor', models.ForeignKey(orm['djanalytics.visitor'], null=False)),
('client', models.ForeignKey(orm['djanalytics.client'], null=False))
))
db.create_unique(m2m_table_name, ['visitor_id', 'client_id'])
# Adding model 'PagePattern'
db.create_table(u'djanalytics_pagepattern', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('pattern', self.gf('django.db.models.fields.CharField')(max_length=200)),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.Client'])),
('page_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.PageType'])),
))
db.send_create_signal('djanalytics', ['PagePattern'])
# Adding unique constraint on 'PagePattern', fields ['pattern', 'client']
db.create_unique(u'djanalytics_pagepattern', ['pattern', 'client_id'])
# Adding model 'Page'
db.create_table(u'djanalytics_page', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('path', self.gf('django.db.models.fields.URLField')(max_length=200)),
('client', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.Client'])),
('page_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.PageType'], null=True)),
))
db.send_create_signal('djanalytics', ['Page'])
# Adding unique constraint on 'Page', fields ['path', 'client']
db.create_unique(u'djanalytics_page', ['path', 'client_id'])
# Adding model 'Visit'
db.create_table(u'djanalytics_visit', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=36)),
('visitor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.Visitor'])),
('device', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.Device'])),
('visit_date', self.gf('django.db.models.fields.DateField')()),
('duration', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=7, decimal_places=0)),
('first_page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='first_visit', to=orm['djanalytics.Page'])),
('last_page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_visit', null=True, to=orm['djanalytics.Page'])),
('conversion_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('web_property', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.WebProperty'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('djanalytics', ['Visit'])
# Adding model 'ReferrerType'
db.create_table(u'djanalytics_referrertype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=80)),
('pattern', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)),
))
db.send_create_signal('djanalytics', ['ReferrerType'])
# Adding model 'DeviceType'
db.create_table(u'djanalytics_devicetype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=80)),
))
db.send_create_signal('djanalytics', ['DeviceType'])
# Adding model 'PageType'
db.create_table(u'djanalytics_pagetype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
))
db.send_create_signal('djanalytics', ['PageType'])
# Adding field 'Domain.name'
db.add_column(u'djanalytics_domain', 'name',
self.gf('django.db.models.fields.CharField')(default='', max_length=100),
keep_default=False)
# Adding field 'Domain.web_property'
db.add_column(u'djanalytics_domain', 'web_property',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djanalytics.WebProperty'], null=True),
keep_default=False)
# Changing field 'Domain.pattern'
db.alter_column(u'djanalytics_domain', 'pattern', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
def backwards(self, orm):
# Removing unique constraint on 'Page', fields ['path', 'client']
db.delete_unique(u'djanalytics_page', ['path', 'client_id'])
# Removing unique constraint on 'PagePattern', fields ['pattern', 'client']
db.delete_unique(u'djanalytics_pagepattern', ['pattern', 'client_id'])
# Deleting model 'Device'
db.delete_table(u'djanalytics_device')
# Deleting model 'WebProperty'
db.delete_table(u'djanalytics_webproperty')
# Deleting model 'PageVisit'
db.delete_table(u'djanalytics_pagevisit')
# Deleting model 'Visitor'
db.delete_table(u'djanalytics_visitor')
# Removing M2M table for field clients on 'Visitor'
db.delete_table(db.shorten_name(u'djanalytics_visitor_clients'))
# Deleting model 'PagePattern'
db.delete_table(u'djanalytics_pagepattern')
# Deleting model 'Page'
db.delete_table(u'djanalytics_page')
# Deleting model 'Visit'
db.delete_table(u'djanalytics_visit')
# Deleting model 'ReferrerType'
db.delete_table(u'djanalytics_referrertype')
# Deleting model 'DeviceType'
db.delete_table(u'djanalytics_devicetype')
# Deleting model 'PageType'
db.delete_table(u'djanalytics_pagetype')
# Deleting field 'Domain.name'
db.delete_column(u'djanalytics_domain', 'name')
# Deleting field 'Domain.web_property'
db.delete_column(u'djanalytics_domain', 'web_property_id')
# User chose to not deal with backwards NULL issues for 'Domain.pattern'
raise RuntimeError("Cannot reverse this migration. 'Domain.pattern' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Domain.pattern'
db.alter_column(u'djanalytics_domain', 'pattern', self.gf('django.db.models.fields.CharField')(max_length=100))
models = {
'djanalytics.client': {
'Meta': {'object_name': 'Client'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'aed4ab96-ab2e-49be-8248-792b60627bd7'", 'max_length': '36'})
},
'djanalytics.device': {
'Meta': {'object_name': 'Device'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'device_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.DeviceType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'os_version': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'djanalytics.devicetype': {
'Meta': {'object_name': 'DeviceType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
'djanalytics.domain': {
'Meta': {'object_name': 'Domain'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'web_property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.WebProperty']", 'null': 'True'})
},
'djanalytics.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'netmask': ('django.db.models.fields.CharField', [], {'max_length': '19'})
},
u'djanalytics.location': {
'Meta': {'object_name': 'Location'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'djanalytics.page': {
'Meta': {'unique_together': "(('path', 'client'),)", 'object_name': 'Page'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.PageType']", 'null': 'True'}),
'path': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'djanalytics.pagepattern': {
'Meta': {'unique_together': "(('pattern', 'client'),)", 'object_name': 'PagePattern'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.PageType']"}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'djanalytics.pagetype': {
'Meta': {'object_name': 'PageType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'djanalytics.pagevisit': {
'Meta': {'object_name': 'PageVisit'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Page']"}),
'request_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.RequestEvent']"}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Visit']"})
},
'djanalytics.pathfilter': {
'Meta': {'object_name': 'PathFilter'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'path_pattern': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'djanalytics.referrertype': {
'Meta': {'object_name': 'ReferrerType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'pattern': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'djanalytics.requestevent': {
'Meta': {'object_name': 'RequestEvent'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Client']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'db_index': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['djanalytics.Location']", 'null': 'True', 'blank': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.URLField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'query_string': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'referrer': ('django.db.models.fields.URLField', [], {'max_length': '2083', 'null': 'True', 'blank': 'True'}),
'response_code': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'screen_height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'screen_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tracking_key': ('django.db.models.fields.CharField', [], {'default': "'8fbe15a1-2d5a-4e82-8dd3-c27dffd99b4e'", 'max_length': '36'}),
'tracking_user_id': ('django.db.models.fields.CharField', [], {'default': "'3b74c3b4-934a-497d-b90b-2872c9cf118c'", 'max_length': '36'}),
'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'djanalytics.visit': {
'Meta': {'object_name': 'Visit'},
'conversion_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Device']"}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '0'}),
'first_page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'first_visit'", 'to': "orm['djanalytics.Page']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_visit'", 'null': 'True', 'to': "orm['djanalytics.Page']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['djanalytics.Page']", 'through': "orm['djanalytics.PageVisit']", 'symmetrical': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'visit_date': ('django.db.models.fields.DateField', [], {}),
'visitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.Visitor']"}),
'web_property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djanalytics.WebProperty']"})
},
'djanalytics.visitor': {
'Meta': {'object_name': 'Visitor'},
'clients': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['djanalytics.Client']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'djanalytics.webproperty': {
'Meta': {'object_name': 'WebProperty'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'})
}
}
complete_apps = ['djanalytics']
|
analytehealth/django-analytics
|
djanalytics/migrations/0007_auto__add_device__add_webproperty__add_pagevisit__add_visitor__add_pag.py
|
Python
|
bsd-2-clause
| 22,449
|
[
"VisIt"
] |
f13338cbee80ee18c3437c4a5b104ead8de6f08181ca8e92345f31f07ced7a20
|
""" The SGE TimeLeft utility interrogates the SGE batch system for the
current CPU consumed, as well as its limit.
"""
__RCSID__ = "$Id$"
import os
import re
import time
import socket
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.TimeLeft.TimeLeft import runCommand
class SGETimeLeft( object ):
"""
This is the SGE plugin of the TimeLeft Utility
"""
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'SGETimeLeft' )
self.jobID = os.environ.get( 'JOB_ID' )
self.queue = os.environ.get( 'QUEUE' )
pbsPath = os.environ.get( 'SGE_BINARY_PATH' )
if pbsPath:
os.environ['PATH'] += ':' + pbsPath
self.cpuLimit = None
self.wallClockLimit = None
self.log.verbose( 'JOB_ID=%s, QUEUE=%s' % ( self.jobID, self.queue ) )
self.startTime = time.time()
#############################################################################
def getResourceUsage( self ):
"""Returns a dictionary containing CPUConsumed, CPULimit, WallClockConsumed
and WallClockLimit for current slot. All values returned in seconds.
"""
cmd = 'qstat -f -j %s' % ( self.jobID )
result = runCommand( cmd )
if not result['OK']:
return result
cpu = None
cpuLimit = None
wallClock = None
wallClockLimit = None
lines = str( result['Value'] ).split( '\n' )
for line in lines:
if re.search( 'usage.*cpu.*', line ):
match = re.search( 'cpu=([\d,:]*),', line )
if match:
cpuList = match.groups()[0].split( ':' )
try:
newcpu = 0.
if len( cpuList ) == 3:
newcpu = ( float( cpuList[0] ) * 60 + float( cpuList[1] ) ) * 60 + float( cpuList[2] )
elif len( cpuList ) == 4:
newcpu = ( ( float( cpuList[0] ) * 24 + float( cpuList[1] ) ) * 60 + float( cpuList[2] ) ) * 60 + float( cpuList[3] )
if not cpu or newcpu > cpu:
cpu = newcpu
except ValueError:
self.log.warn( 'Problem parsing "%s" for CPU consumed' % line )
elif re.search( 'hard resource_list.*cpu.*', line ):
match = re.search( '_cpu=(\d*)', line )
if match:
cpuLimit = float( match.groups()[0] )
match = re.search( '_rt=(\d*)', line )
if match:
wallClockLimit = float( match.groups()[0] )
# Some SGE batch systems apply CPU scaling factor to the CPU consumption figures
if cpu:
factor = _getCPUScalingFactor()
if factor:
cpu = cpu / factor
consumed = {'CPU':cpu, 'CPULimit':cpuLimit, 'WallClock':wallClock, 'WallClockLimit':wallClockLimit}
if None not in consumed.values():
# This cannot happen as we can't get wallClock from anywhere
self.log.debug( "TimeLeft counters complete:", str( consumed ) )
return S_OK( consumed )
else:
missed = [key for key, val in consumed.items() if val is None]
self.log.info( 'Could not determine parameter', ','.join( missed ) )
self.log.debug( 'This is the stdout from the batch system call\n%s' % ( result['Value'] ) )
if cpuLimit or wallClockLimit:
# We have got a partial result from SGE
if not cpuLimit:
# Take some margin
consumed['CPULimit'] = wallClockLimit * 0.8
if not wallClockLimit:
consumed['WallClockLimit'] = cpuLimit / 0.8
if not cpu:
consumed['CPU'] = time.time() - self.startTime
if not wallClock:
consumed['WallClock'] = time.time() - self.startTime
self.log.debug( "TimeLeft counters restored:", str( consumed ) )
return S_OK( consumed )
else:
msg = 'Could not determine some parameters'
self.log.info( msg, ':\nThis is the stdout from the batch system call\n%s' % ( result['Value'] ) )
retVal = S_ERROR( msg )
retVal['Value'] = consumed
return retVal
def _getCPUScalingFactor():
host = socket.getfqdn()
cmd = 'qconf -se %s' % host
result = runCommand( cmd )
if not result['OK']:
return None
_example = """Example of output for qconf -se ccwsge0640
hostname ccwsge0640.in2p3.fr
load_scaling NONE
complex_values m_mem_free=131022.000000M,m_mem_free_n0=65486.613281M, \
m_mem_free_n1=65536.000000M,os=sl6
load_values arch=lx-amd64,cpu=89.400000,fsize_used_rate=0.089, \
load_avg=36.300000,load_long=36.020000, \
load_medium=36.300000,load_short=35.960000, \
m_cache_l1=32.000000K,m_cache_l2=256.000000K, \
m_cache_l3=25600.000000K,m_core=20, \
m_mem_free=72544.000000M,m_mem_free_n0=18696.761719M, \
m_mem_free_n1=22139.621094M,m_mem_total=131022.000000M, \
m_mem_total_n0=65486.613281M, \
m_mem_total_n1=65536.000000M,m_mem_used=58478.000000M, \
m_mem_used_n0=46789.851562M,m_mem_used_n1=43396.378906M, \
m_numa_nodes=2,m_socket=2,m_thread=40, \
m_topology=SCTTCTTCTTCTTCTTCTTCTTCTTCTTCTTSCTTCTTCTTCTTCTTCTTCTTCTTCTTCTT, \
m_topology_inuse=SCTTCTTCTTCTTCTTCTTCTTCTTCTTCTTSCTTCTTCTTCTTCTTCTTCTTCTTCTTCTT, \
m_topology_numa=[SCTTCTTCTTCTTCTTCTTCTTCTTCTTCTT][SCTTCTTCTTCTTCTTCTTCTTCTTCTTCTT], \
mem_free=70513.675781M,mem_total=129001.429688M, \
mem_used=58487.753906M,memory_used_rate=0.468, \
np_load_avg=0.907500,np_load_long=0.900500, \
np_load_medium=0.907500,np_load_short=0.899000, \
num_proc=40,swap_free=0.000000M,swap_total=266.699219M, \
swap_used=266.699219M,virtual_free=70513.675781M, \
virtual_total=129268.128906M,virtual_used=58754.453125M
processors 40
user_lists NONE
xuser_lists NONE
projects NONE
xprojects NONE
usage_scaling cpu=11.350000,acct_cpu=11.350000
report_variables NONE
"""
lines = str( result['Value'] ).split( '\n' )
for line in lines:
if re.search( 'usage_scaling', line ):
match = re.search( 'cpu=([\d,\.]*),', line )
if match:
return float( match.groups()[0] )
return None
if __name__ == '__main__':
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
print SGETimeLeft().getResourceUsage()
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
andresailer/DIRAC
|
Core/Utilities/TimeLeft/SGETimeLeft.py
|
Python
|
gpl-3.0
| 6,664
|
[
"DIRAC"
] |
49c475d21cfe42980e1728b6834eaec7576bd2378bc4c837b359e4dc9df4f707
|
# Tests for basic Tkinter widgets.
import tkinter
import Test
Test.initialise()
testData = ()
if tkinter.TkVersion >= 8.0:
button_num = 31
frame_num = 16
menu_num = 20
menubutton_num = 32
else:
button_num = 30
frame_num = 15
menu_num = 19
menubutton_num = 31
c = tkinter.Button
tests = (
(c.pack, ()),
(Test.num_options, (), button_num),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('command', Test.callback),
(c.flash, ()),
(c.invoke, (), '1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Canvas
tests = (
(c.pack, ()),
(Test.num_options, (), 27),
('background', 'aliceblue'),
(c.create_oval, (100, 100, 200, 200),
{'fill' : 'lightsteelblue1', 'tags' : 'circle'}, 1),
(c.create_rectangle, (200, 100, 300, 200),
{'fill' : 'lightsteelblue2', 'tags' : 'square'}, 2),
(c.create_text, (0, 200),
{'text' : 'Hello, world', 'tags' : 'words', 'anchor' : 'w'}, 3),
(c.addtag_withtag, ('lightsteelblue1', 'circle')),
(c.bbox, ('circle', 'square'), (99, 99, 301, 201)),
(c.tag_bind, ('circle', '<1>', Test.callback)),
(c.tag_bind, 'circle', '<Button-1>'),
(c.tag_unbind, ('circle', '<1>')),
(c.canvasx, 100, 100.0),
(c.canvasy, 100, 100.0),
(c.coords, 'circle', [100.0, 100.0, 200.0, 200.0]),
(c.coords, ('circle', 0, 0, 300, 300), []),
(c.coords, 'circle', [0.0, 0.0, 300.0, 300.0]),
(c.find_withtag, 'lightsteelblue1', (1,)),
(c.focus, 'circle', ''),
(c.gettags, 'circle', ('circle', 'lightsteelblue1')),
(c.icursor, ('words', 7)),
(c.index, ('words', 'insert'), 7),
(c.insert, ('words', 'insert', 'cruel ')),
(c.itemconfigure, 'circle', {'fill': 'seagreen4'}),
(c.itemcget, ('circle', 'fill'), 'seagreen4'),
(c.lower, 'words'),
(c.move, ('square', -50, -50)),
(c.tkraise, ('words', 'circle')),
(c.scale, ('circle', 150, 150, 1.0, 0.5)),
(c.select_from, ('words', 0)),
(c.select_to, ('words', 'end')),
(c.delete, 'square'),
(c.type, 'circle', 'oval'),
(c.dtag, 'lightsteelblue1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Checkbutton
tests = (
(c.pack, ()),
(Test.num_options, (), 36),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('command', Test.callback),
(c.flash, ()),
(c.invoke, (), '1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Entry
tests = (
(c.pack, ()),
(Test.num_options, (), 28),
('background', 'lightsteelblue1'),
(c.insert, ('insert', 'Hello, Brian!')),
(c.delete, (7, 12)),
(c.icursor, 7),
(c.insert, ('insert', 'world')),
(c.get, (), 'Hello, world!'),
(c.index, 'insert', 12),
(c.selection_from, 7),
(c.selection_to, '12'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Frame
tests = (
(c.pack, ()),
(Test.num_options, (), frame_num),
('background', 'lightsteelblue1'),
('width', 300),
('height', 50),
('background', 'lightsteelblue1'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Label
tests = (
(c.pack, ()),
(Test.num_options, (), 25),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('image', Test.earthris),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Listbox
tests = (
(c.pack, ()),
(Test.num_options, (), 23),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
(c.insert, (0, 'ABC', 'DEF', 'GHI', 'XXXXXXXXXXXX')),
(c.activate, 1),
(c.select_set, (2, 3)),
(c.curselection, (), ('2', '3')),
(c.delete, 1),
(c.get, 1, 'GHI'),
(c.get, (0, 1), ('ABC', 'GHI')),
(c.index, 'end', 3),
(c.nearest, 1, 0),
(c.see, 1),
(c.size, (), 3),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Menu
tests = (
(Test.num_options, (), menu_num),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
(c.add_command, (),
{'background': 'lightsteelblue2', 'label': 'Hello World'}),
(c.add_checkbutton, (),
{'background': 'lightsteelblue2', 'label': 'Charm'}),
(c.post, (100, 100)),
(c.activate, 1),
(c.entryconfigure, 'Hello World', {'background': 'aliceblue'}),
(c.entrycget, ('Hello World', 'background'), 'aliceblue'),
(c.index, 'end', 2),
('tearoff', 0),
(c.index, 'end', 1),
(c.insert_radiobutton, 'Charm',
{'background': 'lightsteelblue2', 'label': 'Niceness',
'command': Test.callback}),
(c.invoke, 'Niceness', '1'),
(c.delete, 'Charm'),
(c.type, 'Hello World', 'command'),
(c.yposition, 'Hello World', 2),
(c.unpost, ()),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Menubutton
tests = (
(c.pack, ()),
(Test.num_options, (), menubutton_num),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Message
tests = (
(c.pack, ()),
(Test.num_options, (), 21),
('text', 'Hello World'),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('text', 'Hello\nCruel Cruel World'),
('borderwidth', 100),
('justify', 'center'),
('justify', 'right'),
('justify', 'left'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Radiobutton
tests = (
(c.pack, ()),
(Test.num_options, (), 35),
('text', 'Hello World'),
('value', 'Foo Bar'),
('variable', Test.stringvar),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('text', 'Hello\nCruel Cruel World'),
('command', Test.callback),
(c.select, ()),
(Test.stringvar.get, (), 'Foo Bar'),
(c.flash, ()),
(c.invoke, (), '1'),
(c.deselect, ()),
(Test.stringvar.get, (), ''),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Scale
tests = (
(c.pack, ()),
(Test.num_options, (), 33),
('showvalue', 1),
('orient', 'horizontal'),
('from', 100.0),
('to', 200.0),
('variable', Test.floatvar),
('background', 'lightsteelblue1'),
('foreground', 'seagreen4'),
('command', Test.callback1),
(c.set, 150.0),
(c.get, (), 150.0),
(c.get, 123, 'TypeError: too many arguments; expected 1, got 2'),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Scrollbar
tests = (
(c.pack, (), {'fill': 'x'}),
(Test.num_options, (), 20),
('orient', 'horizontal'),
(Test.set_geom, (300, 50)),
(c.set, (0.3, 0.7)),
('background', 'lightsteelblue1'),
('troughcolor', 'aliceblue'),
(c.get, (), (0.3, 0.7)),
(c.activate, 'slider'),
(c.set, (0.5, 0.9)),
(c.delta, (0, 0), 0),
(c.fraction, (0, 0), 0),
)
testData = testData + ((c, ((tests, {}),)),)
c = tkinter.Text
tests = (
(c.pack, ()),
(Test.num_options, (), 35),
('background', 'lightsteelblue1'),
(c.insert, ('end', 'This little piggy is bold.', 'bold', '\n')),
(c.insert, ('end', 'This little piggy is in green.', 'green', '\n')),
(c.insert, ('end', 'This line is a mistake.\n')),
(c.insert, ('end', 'This little piggy is crossed out.', 'overstrike', '\n')),
(c.insert, ('end', 'This little piggy is raised.', 'raised', '\n')),
(c.insert, ('end', 'This little piggy is underlined.', 'underline', '\n')),
(c.tag_configure, 'bold', {'font': Test.font['variable']}),
(c.tag_configure, 'green', {'background': 'seagreen1'}),
(c.tag_configure, 'overstrike', {'overstrike': 1}),
(c.tag_configure, 'raised',
{'background': 'aliceblue', 'borderwidth': 2, 'relief': 'raised'}),
(c.tag_configure, 'underline', {'underline': 1}),
(c.compare, ('2.0', '<', 'end'), 1),
(c.delete, ('3.0', '4.0')),
(c.get, ('1.0', '1.4'), 'This'),
(c.index, 'end', '7.0'),
(c.mark_set, ('my_mark', '4.9')),
(c.mark_gravity, ('my_mark', 'right'), ''),
(c.mark_gravity, 'my_mark', 'right'),
(c.mark_names, (), ('my_mark', 'insert', 'current')),
(c.mark_unset, 'my_mark'),
(c.insert, ('end', '\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')),
(c.insert, ('end', 'This is the last line.')),
(c.scan_mark, (0, 20)),
(c.scan_dragto, (0, 0)),
(c.scan_dragto, (0, 20)),
(c.tag_add, ('green', '1.0', '1.4')),
(c.tag_cget, ('raised', 'background'), 'aliceblue'),
(c.tag_lower, 'green'),
(c.tag_names, (),
('green', 'sel', 'bold', 'overstrike', 'raised', 'underline')),
(c.tag_nextrange, ('raised', '0.0'), ('4.0', '4.28')),
(c.tag_raise, 'green'),
(c.tag_ranges, 'green', ('1.0', '1.4', '2.0', '2.30')),
(c.tag_remove, ('green', '1.0', '1.4')),
(c.tag_ranges, 'green', ('2.0', '2.30')),
(c.tag_delete, 'green'),
(c.search, ('Gre.n', '0.0'), {'regexp': 1, 'nocase': 1}, '2.24'),
(c.search, ('Gre.n', '3.0', 'end'), {'regexp': 1, 'nocase': 1}, ''),
(c.see, 'end'),
(c.see, '0.0'),
)
testData = testData + ((c, ((tests, {}),)),)
#=============================================================================
# Grid command
def _makeGridButtons():
w = Test.currentWidget()
b1 = tkinter.Button(w, text = 'Button 1')
b2 = tkinter.Button(w, text = 'Button 2')
b3 = tkinter.Button(w, text = 'Button 3')
b4 = tkinter.Button(w, text = 'Button 4')
b5 = tkinter.Button(w, text = 'Button 5')
b6 = tkinter.Button(w, text = 'Button 6')
b7 = tkinter.Button(w, text = 'Button 7')
b8 = tkinter.Button(w, text = 'Button 8')
b1.grid(column=0, row=0)
b2.grid(column=1, row=0)
b3.grid(column=2, row=0, ipadx=50, ipady=50, padx=50, pady=50, sticky='nsew')
b4.grid(column=3, row=0)
b5.grid(column=0, row=1)
b6.grid(column=2, row=1, columnspan=2, rowspan=2, sticky='nsew')
b7.grid(column=0, row=2)
b8.grid(column=0, row=3, columnspan=4, padx=50, sticky='ew')
def _checkGridSlaves():
w = Test.currentWidget()
return len(w.grid_slaves())
def _checkGridInfo():
w = Test.currentWidget()
b8 = w.grid_slaves(column=0, row=3)[0]
info = b8.grid_info()
if info['in'] == w:
rtn = {}
for key, value in list(info.items()):
if key != 'in':
rtn[key] = value
return rtn
return 'BAD'
def _checkGridForget():
w = Test.currentWidget()
b8 = w.grid_slaves(column=0, row=3)[0]
b8.grid_forget()
return w.grid_size()
# The -pad grid option was added in Tk 4.2.
# Could not do columnconfigure(0) before Tk 4.2.
if tkinter.TkVersion >= 4.2:
padTest = {'pad': 25}
colTest = {'minsize': 100, 'pad': 25, 'weight': 1}
rowTest = {'minsize': 100, 'pad': 0, 'weight': 1}
else:
padTest = {'minsize': 100}
colTest = 'TclError: wrong # args: should be "grid columnconfigure master index ?-option value...?"'
rowTest = 'TclError: wrong # args: should be "grid rowconfigure master index ?-option value...?"'
c = tkinter.Frame
tests = (
(c.pack, (), {'fill': 'both', 'expand': 1}),
(_makeGridButtons, ()),
# (c.grid_bbox, (1, 2), (85, 268, 85, 34)),
(c.grid_columnconfigure, (0, 'minsize'), 0),
(c.grid_columnconfigure, (0, 'weight'), 0),
(c.grid_columnconfigure, 0, {'minsize': 100, 'weight': 1}),
(c.grid_columnconfigure, 0, padTest),
(c.grid_columnconfigure, 0, {}, colTest),
(c.grid_columnconfigure, (0, 'minsize'), 100),
(c.grid_columnconfigure, (0, 'weight'), 1),
(c.location, (200, 100), (2, 0)),
(c.grid_propagate, (), 1),
(c.grid_propagate, 0),
(c.grid_propagate, (), 0),
(c.grid_rowconfigure, (0, 'minsize'), 0),
(c.grid_rowconfigure, (0, 'weight'), 0),
(c.grid_rowconfigure, 0, {'minsize': 100, 'weight': 1}),
(c.grid_rowconfigure, 0, {}, rowTest),
(c.grid_size, (), (4, 4)),
(_checkGridSlaves, (), 8),
(_checkGridInfo, (), {}, {'column': '0', 'columnspan': '4',
'ipadx': '0', 'ipady': '0', 'padx': '50', 'pady': '0',
'row': '3', 'rowspan': '1', 'sticky': 'ew',
}),
(_checkGridForget, (), (4, 3)),
(_checkGridSlaves, (), 7),
)
testData = testData + ((c, ((tests, {}),)),)
if __name__ == '__main__':
#Test.setverbose(1)
#Test.setdelay(1000)
Test.runTests(testData)
|
wolf29f/iCook
|
iCook/Pmw/Pmw_2_0_0/tests/Tkinter_test.py
|
Python
|
gpl-2.0
| 12,225
|
[
"Brian"
] |
ad125878b63d06e8f69624aabb54eda9555c8538c648526fa9fc21f2c4ce5d8d
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Mostly taken from PasteDeploy and stripped down for Galaxy
import inspect
import os
import re
import sys
from typing import Callable, Dict, List, Optional, Union
from urllib.parse import unquote
import pkg_resources
from galaxy.util.properties import NicerConfigParser
__all__ = ('loadapp', 'loadserver', 'loadfilter', 'appconfig')
# ---- from paste.deploy.compat --------------------------------------
"""Python 2<->3 compatibility module"""
def print_(template, *args, **kwargs):
template = str(template)
if args:
template = template % args
elif kwargs:
template = template % kwargs
sys.stdout.writelines(template)
def reraise(t, e, tb):
exec('raise e from tb', dict(e=e, tb=tb))
# ---- from paste.deploy.util ----------------------------------------
def fix_type_error(exc_info, callable, varargs, kwargs):
"""
Given an exception, this will test if the exception was due to a
signature error, and annotate the error with better information if
so.
Usage::
try:
val = callable(*args, **kw)
except TypeError:
exc_info = fix_type_error(None, callable, args, kw)
raise exc_info[0], exc_info[1], exc_info[2]
"""
if exc_info is None:
exc_info = sys.exc_info()
if (exc_info[0] != TypeError
or str(exc_info[1]).find('argument') == -1
or getattr(exc_info[1], '_type_error_fixed', False)):
return exc_info
exc_info[1]._type_error_fixed = True
argspec = inspect.formatargspec(*inspect.getfullargspec(callable))
args = ', '.join(map(_short_repr, varargs))
if kwargs and args:
args += ', '
if kwargs:
kwargs = sorted(kwargs.keys())
args += ', '.join('%s=...' % n for n in kwargs)
gotspec = '(%s)' % args
msg = '{}; got {}, wanted {}'.format(exc_info[1], gotspec, argspec)
exc_info[1].args = (msg,)
return exc_info
def _short_repr(v):
v = repr(v)
if len(v) > 12:
v = v[:8] + '...' + v[-4:]
return v
def fix_call(callable, *args, **kw):
"""
Call ``callable(*args, **kw)`` fixing any type errors that come out.
"""
try:
val = callable(*args, **kw)
except TypeError:
exc_info = fix_type_error(None, callable, args, kw)
reraise(*exc_info)
return val
def lookup_object(spec):
"""
Looks up a module or object from a some.module:func_name specification.
To just look up a module, omit the colon and everything after it.
"""
parts, target = spec.split(':') if ':' in spec else (spec, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
# ---- from paste.deploy.loadwsgi ------------------------------------
############################################################
# Utility functions
############################################################
def import_string(s):
return pkg_resources.EntryPoint.parse("x=" + s).load(False)
def _aslist(obj):
"""
Turn object into a list; lists and tuples are left as-is, None
becomes [], and everything else turns into a one-element list.
"""
if obj is None:
return []
elif isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
def _flatten(lst):
"""
Flatten a nested list.
"""
if not isinstance(lst, (list, tuple)):
return [lst]
result = []
for item in lst:
result.extend(_flatten(item))
return result
############################################################
# Object types
############################################################
class _ObjectType:
name: Optional[str] = None
egg_protocols: Optional[List[Union[str, List[str]]]] = None
config_prefixes: Optional[List[Union[List[str], str]]] = None
def __init__(self):
# Normalize these variables:
self.egg_protocols = [_aslist(p) for p in _aslist(self.egg_protocols)]
self.config_prefixes = [_aslist(p) for p in _aslist(self.config_prefixes)]
def __repr__(self):
return '<{} protocols={!r} prefixes={!r}>'.format(
self.name, self.egg_protocols, self.config_prefixes)
def invoke(self, context):
assert context.protocol in _flatten(self.egg_protocols)
return fix_call(context.object,
context.global_conf, **context.local_conf)
class _App(_ObjectType):
name = 'application'
egg_protocols = ['paste.app_factory', 'paste.composite_factory',
'paste.composit_factory']
config_prefixes = [['app', 'application'], ['composite', 'composit'],
'pipeline', 'filter-app']
def invoke(self, context):
if context.protocol in ('paste.composit_factory',
'paste.composite_factory'):
return fix_call(context.object,
context.loader, context.global_conf,
**context.local_conf)
elif context.protocol == 'paste.app_factory':
return fix_call(context.object, context.global_conf, **context.local_conf)
else:
assert 0, "Protocol %r unknown" % context.protocol
APP = _App()
class _Filter(_ObjectType):
name = 'filter'
egg_protocols = [['paste.filter_factory', 'paste.filter_app_factory']]
config_prefixes = ['filter']
def invoke(self, context):
if context.protocol == 'paste.filter_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.filter_app_factory':
def filter_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return filter_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
FILTER = _Filter()
class _Server(_ObjectType):
name = 'server'
egg_protocols = [['paste.server_factory', 'paste.server_runner']]
config_prefixes = ['server']
def invoke(self, context):
if context.protocol == 'paste.server_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.server_runner':
def server_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return server_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
SERVER = _Server()
# Virtual type: (@@: There's clearly something crufty here;
# this probably could be more elegant)
class _PipeLine(_ObjectType):
name = 'pipeline'
def invoke(self, context):
app = context.app_context.create()
filters = [c.create() for c in context.filter_contexts]
filters.reverse()
for filter_ in filters:
app = filter_(app)
return app
PIPELINE = _PipeLine()
class _FilterApp(_ObjectType):
name = 'filter_app'
def invoke(self, context):
next_app = context.next_context.create()
filter_ = context.filter_context.create()
return filter_(next_app)
FILTER_APP = _FilterApp()
class _FilterWith(_App):
name = 'filtered_with'
def invoke(self, context):
filter_ = context.filter_context.create()
filtered = context.next_context.create()
if context.next_context.object_type is APP:
return filter_(filtered)
else:
# filtering a filter
def composed(app):
return filter_(filtered(app))
return composed
FILTER_WITH = _FilterWith()
############################################################
# Loaders
############################################################
def loadapp(uri, name=None, **kw):
return loadobj(APP, uri, name=name, **kw)
def loadfilter(uri, name=None, **kw):
return loadobj(FILTER, uri, name=name, **kw)
def loadserver(uri, name=None, **kw):
return loadobj(SERVER, uri, name=name, **kw)
def appconfig(uri, name=None, relative_to=None, global_conf=None):
context = loadcontext(APP, uri, name=name,
relative_to=relative_to,
global_conf=global_conf)
return context.config()
_loaders: Dict[str, Callable] = {}
def loadobj(object_type, uri, name=None, relative_to=None,
global_conf=None):
context = loadcontext(
object_type, uri, name=name, relative_to=relative_to,
global_conf=global_conf)
return context.create()
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if '#' in uri:
if name is None:
uri, name = uri.split('#', 1)
else:
# @@: Ignore fragment or error?
uri = uri.split('#', 1)[0]
if name is None:
name = 'main'
if ':' not in uri:
raise LookupError("URI has no scheme: %r" % uri)
scheme, path = uri.split(':', 1)
scheme = scheme.lower()
if scheme not in _loaders:
raise LookupError(
"URI scheme not known: %r (from %s)"
% (scheme, ', '.join(_loaders.keys())))
return _loaders[scheme](
object_type,
uri, path, name=name, relative_to=relative_to,
global_conf=global_conf)
def _loadconfig(object_type, uri, path, name, relative_to,
global_conf):
isabs = os.path.isabs(path)
# De-Windowsify the paths:
path = path.replace('\\', '/')
if not isabs:
if not relative_to:
raise ValueError(
"Cannot resolve relative uri %r; no relative_to keyword "
"argument given" % uri)
relative_to = relative_to.replace('\\', '/')
if relative_to.endswith('/'):
path = relative_to + path
else:
path = relative_to + '/' + path
if path.startswith('///'):
path = path[2:]
path = unquote(path)
loader = ConfigLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
_loaders['config'] = _loadconfig
def _loadegg(object_type, uri, spec, name, relative_to,
global_conf):
loader = EggLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['egg'] = _loadegg
def _loadfunc(object_type, uri, spec, name, relative_to,
global_conf):
loader = FuncLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['call'] = _loadfunc
############################################################
# Loaders
############################################################
class _Loader:
def get_app(self, name=None, global_conf=None):
return self.app_context(
name=name, global_conf=global_conf).create()
def get_filter(self, name=None, global_conf=None):
return self.filter_context(
name=name, global_conf=global_conf).create()
def get_server(self, name=None, global_conf=None):
return self.server_context(
name=name, global_conf=global_conf).create()
def app_context(self, name=None, global_conf=None):
return self.get_context(
APP, name=name, global_conf=global_conf)
def filter_context(self, name=None, global_conf=None):
return self.get_context(
FILTER, name=name, global_conf=global_conf)
def server_context(self, name=None, global_conf=None):
return self.get_context(
SERVER, name=name, global_conf=global_conf)
_absolute_re = re.compile(r'^[a-zA-Z]+:')
def absolute_name(self, name):
"""
Returns true if the name includes a scheme
"""
if name is None:
return False
return self._absolute_re.search(name)
class ConfigLoader(_Loader):
def __init__(self, filename):
self.filename = filename = filename.strip()
defaults = {
'here': os.path.dirname(os.path.abspath(filename)),
'__file__': os.path.abspath(filename)
}
self.parser = NicerConfigParser(filename, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
with open(filename) as f:
self.parser.read_file(f)
def update_defaults(self, new_defaults, overwrite=True):
for key, value in new_defaults.items():
if not overwrite and key in self.parser._defaults:
continue
self.parser._defaults[key] = value
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
relative_to=os.path.dirname(self.filename),
global_conf=global_conf)
section = self.find_config_section(
object_type, name=name)
if global_conf is None:
global_conf = {}
else:
global_conf = global_conf.copy()
defaults = self.parser.defaults()
global_conf.update(defaults)
local_conf = {}
global_additions = {}
get_from_globals = {}
for option in self.parser.options(section):
if option.startswith('set '):
name = option[4:].strip()
global_additions[name] = global_conf[name] = (
self.parser.get(section, option))
elif option.startswith('get '):
name = option[4:].strip()
get_from_globals[name] = self.parser.get(section, option)
else:
if option in defaults:
# @@: It's a global option (?), so skip it
continue
local_conf[option] = self.parser.get(section, option)
for local_var, glob_var in get_from_globals.items():
local_conf[local_var] = global_conf[glob_var]
if object_type in (APP, FILTER) and 'filter-with' in local_conf:
filter_with = local_conf.pop('filter-with')
else:
filter_with = None
if 'require' in local_conf:
for spec in local_conf['require'].split():
pkg_resources.require(spec)
del local_conf['require']
if section.startswith('filter-app:'):
context = self._filter_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif section.startswith('pipeline:'):
context = self._pipeline_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif 'use' in local_conf:
context = self._context_from_use(
object_type, local_conf, global_conf, global_additions,
section)
else:
context = self._context_from_explicit(
object_type, local_conf, global_conf, global_additions,
section)
if filter_with is not None:
filter_with_context = LoaderContext(
obj=None,
object_type=FILTER_WITH,
protocol=None,
global_conf=global_conf, local_conf=local_conf,
loader=self)
filter_with_context.filter_context = self.filter_context(
name=filter_with, global_conf=global_conf)
filter_with_context.next_context = context
return filter_with_context
return context
def _context_from_use(self, object_type, local_conf, global_conf,
global_additions, section):
use = local_conf.pop('use')
context = self.get_context(
object_type, name=use, global_conf=global_conf)
context.global_conf.update(global_additions)
context.local_conf.update(local_conf)
if '__file__' in global_conf:
# use sections shouldn't overwrite the original __file__
context.global_conf['__file__'] = global_conf['__file__']
# @@: Should loader be overwritten?
context.loader = self
if context.protocol is None:
# Determine protocol from section type
section_protocol = section.split(':', 1)[0]
if section_protocol in ('application', 'app'):
context.protocol = 'paste.app_factory'
elif section_protocol in ('composit', 'composite'):
context.protocol = 'paste.composit_factory'
else:
# This will work with 'server' and 'filter', otherwise it
# could fail but there is an error message already for
# bad protocols
context.protocol = 'paste.%s_factory' % section_protocol
return context
def _context_from_explicit(self, object_type, local_conf, global_conf,
global_addition, section):
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
if protocol in local_conf:
possible.append((protocol, local_conf[protocol]))
break
if len(possible) > 1:
raise LookupError(
"Multiple protocols given in section %r: %s"
% (section, possible))
if not possible:
raise LookupError(
"No loader given in section %r" % section)
found_protocol, found_expr = possible[0]
del local_conf[found_protocol]
value = import_string(found_expr)
context = LoaderContext(
value, object_type, found_protocol,
global_conf, local_conf, self)
return context
def _filter_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'next' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'next' setting"
% (section, self.filename))
next_name = local_conf.pop('next')
context = LoaderContext(None, FILTER_APP, None, global_conf,
local_conf, self)
context.next_context = self.get_context(
APP, next_name, global_conf)
if 'use' in local_conf:
context.filter_context = self._context_from_use(
FILTER, local_conf, global_conf, global_additions,
section)
else:
context.filter_context = self._context_from_explicit(
FILTER, local_conf, global_conf, global_additions,
section)
return context
def _pipeline_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'pipeline' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'pipeline' setting"
% (section, self.filename))
pipeline = local_conf.pop('pipeline').split()
if local_conf:
raise LookupError(
"The [%s] pipeline section in %s has extra "
"(disallowed) settings: %s"
% (', '.join(local_conf.keys())))
context = LoaderContext(None, PIPELINE, None, global_conf,
local_conf, self)
context.app_context = self.get_context(
APP, pipeline[-1], global_conf)
context.filter_contexts = [
self.get_context(FILTER, pname, global_conf)
for pname in pipeline[:-1]]
return context
def find_config_section(self, object_type, name=None):
"""
Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored.
"""
possible = []
for name_options in object_type.config_prefixes:
for name_prefix in name_options:
found = self._find_sections(
self.parser.sections(), name_prefix, name)
if found:
possible.extend(found)
break
if not possible:
raise LookupError(
"No section %r (prefixed by %s) found in config %s"
% (name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
if len(possible) > 1:
raise LookupError(
"Ambiguous section names %r for section %r (prefixed by %s) "
"found in config %s"
% (possible, name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
return possible[0]
def _find_sections(self, sections, name_prefix, name):
found = []
if name is None:
if name_prefix in sections:
found.append(name_prefix)
name = 'main'
for section in sections:
if section.startswith(name_prefix + ':'):
if section[len(name_prefix) + 1:].strip() == name:
found.append(section)
return found
class EggLoader(_Loader):
def __init__(self, spec):
self.spec = spec
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
global_conf=global_conf)
entry_point, protocol, ep_name = self.find_egg_entry_point(
object_type, name=name)
return LoaderContext(
entry_point,
object_type,
protocol,
global_conf or {}, {},
self,
distribution=pkg_resources.get_distribution(self.spec),
entry_point_name=ep_name)
def find_egg_entry_point(self, object_type, name=None):
"""
Returns the (entry_point, protocol) for the with the given
``name``.
"""
if name is None:
name = 'main'
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
pkg_resources.require(self.spec)
entry = pkg_resources.get_entry_info(
self.spec,
protocol,
name)
if entry is not None:
possible.append((entry.load(), protocol, entry.name))
break
if not possible:
# Better exception
dist = pkg_resources.get_distribution(self.spec)
raise LookupError(
"Entry point %r not found in egg %r (dir: %s; protocols: %s; "
"entry_points: %s)"
% (name, self.spec,
dist.location,
', '.join(_flatten(object_type.egg_protocols)),
', '.join(_flatten([
list((pkg_resources.get_entry_info(self.spec, prot, name) or {}).keys())
for prot in protocol_options] or '(no entry points)'))))
if len(possible) > 1:
raise LookupError(
"Ambiguous entry points for %r in egg %r (protocols: %s)"
% (name, self.spec, ', '.join(_flatten(protocol_options))))
return possible[0]
class FuncLoader(_Loader):
""" Loader that supports specifying functions inside modules, without
using eggs at all. Configuration should be in the format:
use = call:my.module.path:function_name
Dot notation is supported in both the module and function name, e.g.:
use = call:my.module.path:object.method
"""
def __init__(self, spec):
self.spec = spec
if ':' not in spec:
raise LookupError("Configuration not in format module:function")
def get_context(self, object_type, name=None, global_conf=None):
obj = lookup_object(self.spec)
return LoaderContext(
obj,
object_type,
None, # determine protocol from section type
global_conf or {},
{},
self,
)
class LoaderContext:
def __init__(self, obj, object_type, protocol,
global_conf, local_conf, loader,
distribution=None, entry_point_name=None):
self.object = obj
self.object_type = object_type
self.protocol = protocol
# assert protocol in _flatten(object_type.egg_protocols), (
# "Bad protocol %r; should be one of %s"
# % (protocol, ', '.join(map(repr, _flatten(object_type.egg_protocols)))))
self.global_conf = global_conf
self.local_conf = local_conf
self.loader = loader
self.distribution = distribution
self.entry_point_name = entry_point_name
def create(self):
return self.object_type.invoke(self)
def config(self):
conf = AttrDict(self.global_conf)
conf.update(self.local_conf)
conf.local_conf = self.local_conf
conf.global_conf = self.global_conf
conf.context = self
return conf
class AttrDict(dict):
"""
A dictionary that can be assigned to.
"""
|
galaxyproject/pulsar
|
pulsar/util/pastescript/loadwsgi.py
|
Python
|
apache-2.0
| 26,321
|
[
"Galaxy"
] |
abfd0ec2a8ef93308a0d50975db7f09336a237601902bfa4d14b679f7022636d
|
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import csv
import geometry
import glob
import os
import math
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin,\
FileOpenDialogModuleMixin
import module_utils
import vtk
import vtkgdcm
import wx
MAJOR_MARKER_SIZE = 10
MINOR_MARKER_SIZE = 7
STATE_INIT = 0
STATE_IMAGE_LOADED = 1
STATE_APEX = 2 # clicked apex
STATE_LM = 3 # clicked lower middle
STATE_NORMAL_MARKERS = 4 # after first marker has been placed
class Measurement:
filename = ''
apex = (0,0) # in pixels
lm = (0,0)
pogo_dist = 0 # distance between apex and lm in pixels
area = 0 # current area, in floating point pixels squared
class LarynxMeasurement(IntrospectModuleMixin, FileOpenDialogModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._state = STATE_INIT
self._config.filename = None
self._current_measurement = None
# pogo line first
# outline of larynx second
self._actors = []
# list of pointwidgets, first is apex, second is lm, others
# are others. :)
self._markers = []
self._pogo_line_source = None
self._area_polydata = None
self._view_frame = None
self._viewer = None
self._reader = vtk.vtkJPEGReader()
self._create_view_frame()
self._bind_events()
self.view()
# all modules should toggle this once they have shown their
# stuff.
self.view_initialised = True
self.config_to_logic()
self.logic_to_config()
self.config_to_view()
def _bind_events(self):
self._view_frame.start_button.Bind(
wx.EVT_BUTTON, self._handler_start_button)
self._view_frame.next_button.Bind(
wx.EVT_BUTTON, self._handler_next_button)
self._view_frame.reset_button.Bind(
wx.EVT_BUTTON, self._handler_reset_button)
self._view_frame.save_csv.Bind(
wx.EVT_BUTTON, self._handler_save_csv_button)
self._view_frame.rwi.AddObserver(
'LeftButtonPressEvent',
self._handler_rwi_lbp)
def _create_view_frame(self):
import resources.python.larynx_measurement_frame
reload(resources.python.larynx_measurement_frame)
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
resources.python.larynx_measurement_frame.LarynxMeasurementFrame)
module_utils.create_standard_object_introspection(
self, self._view_frame, self._view_frame.view_frame_panel,
{'Module (self)' : self})
# now setup the VTK stuff
if self._viewer is None and not self._view_frame is None:
# vtkImageViewer() does not zoom but retains colour
# vtkImageViewer2() does zoom but discards colour at
# first window-level action.
# vtkgdcm.vtkImageColorViewer() does both right!
self._viewer = vtkgdcm.vtkImageColorViewer()
self._viewer.SetupInteractor(self._view_frame.rwi)
self._viewer.GetRenderer().SetBackground(0.3,0.3,0.3)
self._set_image_viewer_dummy_input()
pp = vtk.vtkPointPicker()
pp.SetTolerance(0.0)
self._view_frame.rwi.SetPicker(pp)
def close(self):
for i in range(len(self.get_input_descriptions())):
self.set_input(i, None)
# with this complicated de-init, we make sure that VTK is
# properly taken care of
self._viewer.GetRenderer().RemoveAllViewProps()
self._viewer.SetupInteractor(None)
self._viewer.SetRenderer(None)
# this finalize makes sure we don't get any strange X
# errors when we kill the module.
self._viewer.GetRenderWindow().Finalize()
self._viewer.SetRenderWindow(None)
del self._viewer
# done with VTK de-init
self._view_frame.Destroy()
del self._view_frame
ModuleBase.close(self)
def get_input_descriptions(self):
return ()
def get_output_descriptions(self):
return ()
def set_input(self, idx, input_stream):
raise RuntimeError
def get_output(self, idx):
raise RuntimeError
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
# there is no explicit apply step in this viewer module, so we
# keep the config up to date throughout (this is common for
# pure viewer modules)
pass
def config_to_view(self):
# this will happen right after module reload / network load
if self._config.filename is not None:
self._start(self._config.filename)
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
# we need to do this to make sure that the Show() and Raise() above
# are actually performed. Not doing this is what resulted in the
# "empty renderwindow" bug after module reloading, and also in the
# fact that shortly after module creation dummy data rendered outside
# the module frame.
wx.SafeYield()
self.render()
# so if we bring up the view after having executed the network once,
# re-executing will not do a set_input()! (the scheduler doesn't
# know that the module is now dirty) Two solutions:
# * make module dirty when view is activated
# * activate view at instantiation. <--- we're doing this now.
def execute_module(self):
pass
def _add_normal_marker(self, world_pos):
if not len(self._markers) >= 2:
raise RuntimeError(
'There should be 2 or more markers by now!')
pw = self._add_marker(world_pos, (0,1,0), 0.005)
self._markers.append(pw)
self._markers[-1].AddObserver(
'InteractionEvent',
self._handler_nm_ie)
def _add_area_polygon(self):
pd = vtk.vtkPolyData()
self._area_polydata = pd
m = vtk.vtkPolyDataMapper()
m.SetInput(pd)
a = vtk.vtkActor()
a.SetMapper(m)
self._viewer.GetRenderer().AddActor(a)
self._actors.append(a)
def _add_pogo_line(self):
ls = vtk.vtkLineSource()
self._pogo_line_source = ls
m = vtk.vtkPolyDataMapper()
m.SetInput(ls.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
prop = a.GetProperty()
prop.SetLineStipplePattern(0x1010)
prop.SetLineStippleRepeatFactor(1)
self._viewer.GetRenderer().AddActor(a)
self._actors.append(a)
self._update_pogo_distance()
self.render()
def _add_sphere(self, world_pos, radius, colour):
ss = vtk.vtkSphereSource()
ss.SetRadius(radius)
m = vtk.vtkPolyDataMapper()
m.SetInput(ss.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
a.SetPosition(world_pos)
a.GetProperty().SetColor(colour)
self._viewer.GetRenderer().AddActor(a)
self.render()
def _add_marker(self, world_pos, colour, size=0.01):
"""
@param size: fraction of visible prop bounds diagonal.
"""
#self._add_sphere(world_pos, MAJOR_MARKER_SIZE, (1,1,0))
pw = vtk.vtkPointWidget()
# we're giving it a small bounding box
pw.TranslationModeOn()
b = self._viewer.GetRenderer().ComputeVisiblePropBounds()
# calculate diagonal
dx,dy = b[1] - b[0], b[3] - b[2]
diag = math.hypot(dx,dy)
d = size * diag
w = world_pos
pwb = w[0] - d, w[0] + d, \
w[1] - d, w[1] + d, \
b[4], b[5]
pw.PlaceWidget(pwb)
pw.SetPosition(world_pos)
pw.SetInteractor(self._view_frame.rwi)
pw.AllOff()
pw.GetProperty().SetColor(colour)
pw.On()
return pw
def _add_apex_marker(self, world_pos):
# this method should only be called when the list is empty!
if self._markers:
raise RuntimeError('Marker list is not empty!')
self._markers.append(self._add_marker(world_pos, (1,1,0)))
self._markers[-1].AddObserver(
'InteractionEvent',
self._handler_alm_ie)
def _add_lm_marker(self, world_pos):
if len(self._markers) != 1:
raise RuntimeError(
'Marker list should have only one entry!')
self._markers.append(self._add_marker(world_pos, (0,1,1)))
self._markers[-1].AddObserver(
'InteractionEvent',
self._handler_alm_ie)
def _create_db(self, filename):
con = sqlite3.connect(filename)
con.execute(
"""create table images
(id integer primary key, filename varchar unique)""")
con.execute(
"""create table coords
(
""")
def _handler_alm_ie(self, pw=None, vtk_e=None):
self._update_pogo_distance()
self._update_area()
def _handler_nm_ie(self, pw=None, vtk_e=None):
self._update_area()
def _handler_rwi_lbp(self, vtk_o, vtk_e):
# we only handle this if the user is pressing shift
if not vtk_o.GetShiftKey():
return
pp = vtk_o.GetPicker() # this will be our pointpicker
x,y = vtk_o.GetEventPosition()
#iapp = vtk.vtkImageActorPointPlacer()
#ren = self._viewer.GetRenderer()
#iapp.SetImageActor(our_actor)
#iapp.ComputeWorldPosition(ren, display_pos, 3xdouble,
# 9xdouble)
if not pp.Pick(x,y,0,self._viewer.GetRenderer()):
print "off image!"
else:
print pp.GetMapperPosition()
# now also get WorldPos
ren = self._viewer.GetRenderer()
ren.SetDisplayPoint(x,y,0)
ren.DisplayToWorld()
w = ren.GetWorldPoint()[0:3]
print w
# we have a picked position and a world point, now decide
# what to do based on our current state
if self._state == STATE_IMAGE_LOADED:
# put down the apex ball
self._add_apex_marker(w)
self._state = STATE_APEX
elif self._state == STATE_APEX:
# put down the LM ball
self._add_lm_marker(w)
self._add_pogo_line()
self._state = STATE_LM
elif self._state == STATE_LM:
# now we're putting down all other markers
self._add_normal_marker(w)
# now create the polydata
self._add_area_polygon()
self._update_area()
self._state = STATE_NORMAL_MARKERS
elif self._state == STATE_NORMAL_MARKERS:
self._add_normal_marker(w)
self._update_area()
def _handler_reset_button(self, evt):
if self._current_measurement.filename:
self._start(self._current_measurement.filename,
reset=True)
def _handler_save_csv_button(self, evt):
fn = self._current_measurement.filename
if not os.path.exists(fn):
return
self._save_dacs_to_csv(fn)
def _handler_start_button(self, evt):
# let user pick image
# - close down any running analysis
# - analyze all jpg images in that dir
# - read / initialise SQL db
# first get filename from user
filename = self.filename_browse(self._view_frame,
'Select FIRST subject image to start processing',
'Subject image (*.jpg)|*.jpg;*.JPG',
style=wx.OPEN)
if filename:
self._start(filename)
def _handler_next_button(self, evt):
# write everything to to measurement files
# first the points
fn = self._current_measurement.filename
if len(self._markers) > 0:
points_name = '%s.pts' % (fn,)
f = open(points_name, 'w')
pts = [m.GetPosition()[0:3] for m in self._markers]
f.write(str(pts))
f.close()
if len(self._markers) >= 3:
# we only write the DAC if there are at least 3 markers,
# else the measurement is not valid...
# then the distance, area and cormack lehane
dac_name = '%s.dac' % (fn,)
f = open(dac_name, 'w')
clg1 = int(self._view_frame.clg1_cbox.GetValue())
d = self._current_measurement.pogo_dist
a = self._current_measurement.area
dac = [d,a,clg1]
f.write(str(dac))
f.close()
# IS there a next file?
# get ext and dir of current file
current_fn = self._current_measurement.filename
# ext is '.JPG'
ext = os.path.splitext(current_fn)[1]
dir = os.path.dirname(current_fn)
all_files = glob.glob(os.path.join(dir, '*%s' % (ext,)))
# we assume the user has this covered (filenames padded)
all_files.sort()
# find index of current file, take next image
idx = all_files.index(current_fn) + 1
if idx < len(all_files):
new_filename = all_files[idx]
else:
new_filename = all_files[0]
self._start(new_filename)
def _load_measurement(self, new_filename):
# see if there's a points file that we can use
points_name = '%s.pts' % (new_filename,)
try:
f = open(points_name)
except IOError:
pass
else:
# just evaluate what's in there, should be an array of
# three-element tuples (we're going to write away the
# world-pos coordinates)
points = eval(f.read(), {"__builtins__": {}})
f.close()
try:
self._add_apex_marker(points[0])
self._state = STATE_APEX
self._add_lm_marker(points[1])
self._add_pogo_line()
self._state = STATE_LM
self._add_normal_marker(points[2])
self._add_area_polygon()
self._update_area()
self._state = STATE_NORMAL_MARKERS
for pt in points[3:]:
self._add_normal_marker(pt)
self._update_area()
except IndexError:
pass
# now make sure everything else is updated
self._update_pogo_distance()
self._update_area()
# cormack lehane grade
dac_name = '%s.dac' % (new_filename,)
try:
f = open(dac_name)
except IOError:
pass
else:
dist, area, clg1 = eval(f.read(), {"__builtins__":{}})
f.close()
#self._current_measurement.clg1 = clg
self._view_frame.clg1_cbox.SetValue(clg1)
def render(self):
# if you call self._viewer.Render() here, you get the
# VTK-window out of main window effect at startup. So don't.
self._view_frame.rwi.Render()
def _reset_image_pz(self):
"""Reset the pan/zoom of the current image.
"""
ren = self._viewer.GetRenderer()
ren.ResetCamera()
def _save_dacs_to_csv(self, current_fn):
# make list of all filenames in current directory
# load all dacs
img_ext = os.path.splitext(current_fn)[1]
dir = os.path.dirname(current_fn)
all_images = glob.glob(os.path.join(dir, '*%s' % (img_ext,)))
all_dacs = glob.glob(os.path.join(dir, '*%s.dac' % (img_ext,)))
if len(all_dacs) == 0:
self._module_manager.log_error(
"No measurements to save yet.")
return
if len(all_dacs) % 3 != 0:
self._module_manager.log_error(
"Number of measurements not a multiple of 3!\n"
"Can't write CSV file.")
return
if len(all_dacs) != len(all_images):
self._module_manager.log_warning(
"You have not yet measured all images yet.\n"
"Will write CSV anyway, please double-check.")
# sort the dacs
all_dacs.sort()
csv_fn = os.path.join(dir, 'measurements.csv')
csv_f = open(csv_fn, 'w')
wrtr = csv.writer(csv_f, delimiter=',',
quotechar='"')
# write header row
wrtr.writerow([
'name', 'clg1 a', 'clg1 b', 'clg1 c',
'norm dist a', 'norm dist b', 'norm dist c',
'dist a', 'dist b', 'dist c',
'norm area a', 'norm area b', 'norm area c',
'area a', 'area b', 'area c'
])
# now go through all the dac files and write them out in
# multiples of three
for i in range(len(all_dacs) / 3):
three_names = []
clg = []
norm_dist = []
dist = []
norm_area = []
area = []
for j in range(3):
# get dac filename and read its contents
dfn = all_dacs[i*3 + j]
d,a,c = eval(open(dfn).read(),
{"__builtins__":{}})
# create short (extensionless) filename for creating
# the measurement title
sfn = os.path.splitext(os.path.basename(dfn))[0]
# we have to strip off the jpg as well
sfn = os.path.splitext(sfn)[0]
# store it for creating the string later
three_names.append(sfn)
if j == 0:
# if this is the first of a three-element group,
# store the distance and area to normalise the
# other two with.
nd = d
na = a
norm_dist.append(1.0)
norm_area.append(1.0)
else:
# if not, normalise and store
norm_dist.append(d / nd)
norm_area.append(a / na)
# store the pixel measurements
clg.append(c)
dist.append(d)
area.append(a)
# write out a measurement line to the CSV file
name3 = '%s-%s-%s' % tuple(three_names)
wrtr.writerow([name3] + clg +
norm_dist + dist +
norm_area + area)
csv_f.close()
def _stop(self):
# close down any running analysis
# first remove all polydatas we might have added to the scene
for a in self._actors:
self._viewer.GetRenderer().RemoveViewProp(a)
for m in self._markers:
m.Off()
m.SetInteractor(None)
del self._markers[:]
# setup dummy image input.
self._set_image_viewer_dummy_input()
# set state to initialised
self._state = STATE_INIT
def _start(self, new_filename, reset=False):
# first see if we can open the new file
new_reader = self._open_image_file(new_filename)
# if so, stop previous session
self._stop()
# replace reader and show the image
self._reader = new_reader
self._viewer.SetInput(self._reader.GetOutput())
# show the new filename in the correct image box
# first shorten it slightly: split it at the path separator,
# take the last two components (last dir comp, filename), then
# prepend a '...' and join them all together again. example
# output: .../tmp/file.jpg
short_p = os.path.sep.join(
['...']+new_filename.split(os.path.sep)[-2:])
self._view_frame.current_image_txt.SetValue(short_p)
self._config.filename = new_filename
cm = Measurement()
cm.filename = self._config.filename
self._current_measurement = cm
self._actors = []
self._reset_image_pz()
self.render()
self._state = STATE_IMAGE_LOADED
# this means that the user doesn't want the stored data, for
# example when resetting the image measurement
if not reset:
self._load_measurement(new_filename)
self.render()
# now determine our current progress by tallying up DAC files
ext = os.path.splitext(new_filename)[1]
dir = os.path.dirname(new_filename)
all_images = glob.glob(os.path.join(dir, '*%s' % (ext,)))
all_dacs = glob.glob(os.path.join(dir, '*%s.dac' % (ext,)))
progress_msg = "%d / %d images complete" % \
(len(all_dacs), len(all_images))
self._view_frame.progress_txt.SetValue(progress_msg)
def _set_image_viewer_dummy_input(self):
ds = vtk.vtkImageGridSource()
self._viewer.SetInput(ds.GetOutput())
def _open_image_file(self, filename):
# create a new instance of the current reader
# to read the passed file.
nr = self._reader.NewInstance()
nr.SetFileName(filename)
# FIXME: trap this error
nr.Update()
return nr
def _update_pogo_distance(self):
"""Based on the first two markers, update the pogo line and
recalculate the distance.
"""
if len(self._markers) >= 2:
p1,p2 = [self._markers[i].GetPosition() for i in range(2)]
self._pogo_line_source.SetPoint1(p1)
self._pogo_line_source.SetPoint2(p2)
pogo_dist = math.hypot(p2[0] - p1[0], p2[1] - p1[1])
# store pogo_dist in Measurement
self._current_measurement.pogo_dist = pogo_dist
self._view_frame.pogo_dist_txt.SetValue('%.2f' %
(pogo_dist,))
def _update_area(self):
"""Based on three or more markers in total, draw a nice
polygon and update the total area.
"""
if len(self._markers) >= 3:
# start from apex, then all markers to the right of the
# pogo line, then the lm point, then all markers to the
# left.
p1,p2 = [self._markers[i].GetPosition()[0:2] for i in range(2)]
z = self._markers[0].GetPosition()[2]
n,mag,lv = geometry.normalise_line(p1,p2)
# get its orthogonal vector
no = - n[1],n[0]
pts = [self._markers[i].GetPosition()[0:2]
for i in range(2, len(self._markers))]
right_pts = []
left_pts = []
for p in pts:
v = geometry.points_to_vector(p1,p)
# project v onto n
v_on_n = geometry.dot(v,n) * n
# then use that to determine the vector orthogonal on
# n from p
v_ortho_n = v - v_on_n
# rl is positive for right hemisphere, negative for
# otherwise
rl = geometry.dot(no, v_ortho_n)
if rl >= 0:
right_pts.append(p)
elif rl < 0:
left_pts.append(p)
vpts = vtk.vtkPoints()
vpts.InsertPoint(0,p1[0],p1[1],z)
for i,j in enumerate(right_pts):
vpts.InsertPoint(i+1,j[0],j[1],z)
if len(right_pts) == 0:
i = -1
vpts.InsertPoint(i+2,p2[0],p2[1],z)
for k,j in enumerate(left_pts):
vpts.InsertPoint(i+3+k,j[0],j[1],z)
num_points = 2 + len(left_pts) + len(right_pts)
assert(vpts.GetNumberOfPoints() == num_points)
self._area_polydata.SetPoints(vpts)
cells = vtk.vtkCellArray()
# we repeat the first point
cells.InsertNextCell(num_points + 1)
for i in range(num_points):
cells.InsertCellPoint(i)
cells.InsertCellPoint(0)
self._area_polydata.SetLines(cells)
# now calculate the polygon area according to:
# http://local.wasp.uwa.edu.au/~pbourke/geometry/polyarea/
all_pts = [p1] + right_pts + [p2] + left_pts + [p1]
tot = 0
for i in range(len(all_pts)-1):
pi = all_pts[i]
pip = all_pts[i+1]
tot += pi[0]*pip[1] - pip[0]*pi[1]
area = - tot / 2.0
# store area in current measurement
self._current_measurement.area = area
self._view_frame.area_txt.SetValue('%.2f' % (area,))
|
nagyistoce/devide
|
modules/viewers/LarynxMeasurement.py
|
Python
|
bsd-3-clause
| 24,986
|
[
"VTK"
] |
a54c38b5c8e496d7b1c0fda3e94598171c6a542a858e52fa4e7fec151dff87a7
|
# Copyright 2007-2012 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
import nose.tools
from hyperspy._signals.spectrum import Spectrum
from hyperspy.hspy import create_model
from hyperspy.components import Gaussian
class TestModel:
def setUp(self):
s = Spectrum(np.empty(1))
m = create_model(s)
self.model = m
def test_access_component_by_name(self):
m = self.model
g1 = Gaussian()
g2 = Gaussian()
g2.name = "test"
m.extend((g1, g2))
nose.tools.assert_is(m["test"], g2)
def test_access_component_by_index(self):
m = self.model
g1 = Gaussian()
g2 = Gaussian()
g2.name = "test"
m.extend((g1, g2))
nose.tools.assert_is(m[1], g2)
def test_component_name_when_append(self):
m = self.model
gs = [Gaussian(), Gaussian(), Gaussian()]
m.extend(gs)
nose.tools.assert_is(m['Gaussian'], gs[0])
nose.tools.assert_is(m['Gaussian_0'], gs[1])
nose.tools.assert_is(m['Gaussian_1'], gs[2])
@nose.tools.raises(ValueError)
def test_several_component_with_same_name(self):
m = self.model
gs = [Gaussian(), Gaussian(), Gaussian()]
m.extend(gs)
m[0]._name = "Gaussian"
m[1]._name = "Gaussian"
m[2]._name = "Gaussian"
m['Gaussian']
@nose.tools.raises(ValueError)
def test_no_component_with_that_name(self):
m = self.model
m['Voigt']
@nose.tools.raises(ValueError)
def test_component_already_in_model(self):
m = self.model
g1 = Gaussian()
m.extend((g1, g1))
def test_remove_component(self):
m = self.model
g1 = Gaussian()
m.append(g1)
m.remove(g1)
nose.tools.assert_equal(len(m), 0)
def test_remove_component_by_index(self):
m = self.model
g1 = Gaussian()
m.append(g1)
m.remove(0)
nose.tools.assert_equal(len(m), 0)
def test_remove_component_by_name(self):
m = self.model
g1 = Gaussian()
m.append(g1)
m.remove(g1.name)
nose.tools.assert_equal(len(m), 0)
def test_get_component_by_name(self):
m = self.model
g1 = Gaussian()
g2 = Gaussian()
g2.name = "test"
m.extend((g1, g2))
nose.tools.assert_is(m._get_component("test"), g2)
def test_get_component_by_index(self):
m = self.model
g1 = Gaussian()
g2 = Gaussian()
g2.name = "test"
m.extend((g1, g2))
nose.tools.assert_is(m._get_component(1), g2)
def test_get_component_by_component(self):
m = self.model
g1 = Gaussian()
g2 = Gaussian()
g2.name = "test"
m.extend((g1, g2))
nose.tools.assert_is(m._get_component(g2), g2)
@nose.tools.raises(ValueError)
def test_get_component_wrong(self):
m = self.model
g1 = Gaussian()
g2 = Gaussian()
g2.name = "test"
m.extend((g1, g2))
m._get_component(1.2)
|
pburdet/hyperspy
|
test_model.py
|
Python
|
gpl-3.0
| 3,745
|
[
"Gaussian"
] |
2de7a1a6b7a605ca2aa31fe89853281fff05960feeffa009008c26f9f1dfb853
|
__doc__ = """Code by Benjamin S. Murphy
bscott.murphy@gmail.com
Dependencies:
numpy
scipy (scipy.optimize.minimize())
Functions:
adjust_for_anisotropy(x, y, xcenter, ycenter, scaling, angle):
Returns X and Y arrays of adjusted data coordinates. Angle is CCW.
adjust_for_anisotropy_3d(x, y, z, xcenter, ycenter, zcenter, scaling_y,
scaling_z, angle_x, angle_y, angle_z):
Returns X, Y, Z arrays of adjusted data coordinates. Angles are CCW about
specified axes. Scaling is applied in rotated coordinate system.
initialize_variogram_model(x, y, z, variogram_model, variogram_model_parameters,
variogram_function, nlags):
Returns lags, semivariance, and variogram model parameters as a list.
initialize_variogram_model_3d(x, y, z, values, variogram_model,
variogram_model_parameters, variogram_function, nlags):
Returns lags, semivariance, and variogram model parameters as a list.
variogram_function_error(params, x, y, variogram_function):
Called by calculate_variogram_model.
calculate_variogram_model(lags, semivariance, variogram_model, variogram_function):
Returns variogram model parameters that minimize the RMSE between the specified
variogram function and the actual calculated variogram points.
krige(x, y, z, coords, variogram_function, variogram_model_parameters):
Function that solves the ordinary kriging system for a single specified point.
Returns the Z value and sigma squared for the specified coordinates.
krige_3d(x, y, z, vals, coords, variogram_function, variogram_model_parameters):
Function that solves the ordinary kriging system for a single specified point.
Returns the interpolated value and sigma squared for the specified coordinates.
find_statistics(x, y, z, variogram_funtion, variogram_model_parameters):
Returns the delta, sigma, and epsilon values for the variogram fit.
calcQ1(epsilon):
Returns the Q1 statistic for the variogram fit (see Kitanidis).
calcQ2(epsilon):
Returns the Q2 statistic for the variogram fit (see Kitanidis).
calc_cR(Q2, sigma):
Returns the cR statistic for the variogram fit (see Kitanidis).
References:
P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
Copyright (c) 2015 Benjamin S. Murphy
"""
import numpy as np
from scipy.optimize import minimize
def adjust_for_anisotropy(x, y, xcenter, ycenter, scaling, angle):
"""Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling."""
x -= xcenter
y -= ycenter
xshape = x.shape
yshape = y.shape
x = x.flatten()
y = y.flatten()
coords = np.vstack((x, y))
stretch = np.array([[1, 0], [0, scaling]])
rotate = np.array([[np.cos(-angle * np.pi/180.0), -np.sin(-angle * np.pi/180.0)],
[np.sin(-angle * np.pi/180.0), np.cos(-angle * np.pi/180.0)]])
rotated_coords = np.dot(stretch, np.dot(rotate, coords))
x = rotated_coords[0, :].reshape(xshape)
y = rotated_coords[1, :].reshape(yshape)
x += xcenter
y += ycenter
return x, y
def adjust_for_anisotropy_3d(x, y, z, xcenter, ycenter, zcenter, scaling_y,
scaling_z, angle_x, angle_y, angle_z):
"""Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling."""
x -= xcenter
y -= ycenter
z -= zcenter
xshape = x.shape
yshape = y.shape
zshape = z.shape
x = x.flatten()
y = y.flatten()
z = z.flatten()
coords = np.vstack((x, y, z))
stretch = np.array([[1., 0., 0.], [0., scaling_y, 0.], [0., 0., scaling_z]])
rotate_x = np.array([[1., 0., 0.],
[0., np.cos(-angle_x * np.pi/180.), -np.sin(-angle_x * np.pi/180.)],
[0., np.sin(-angle_x * np.pi/180.), np.cos(-angle_x * np.pi/180.)]])
rotate_y = np.array([[np.cos(-angle_y * np.pi/180.), 0., np.sin(-angle_y * np.pi/180.)],
[0., 1., 0.],
[-np.sin(-angle_y * np.pi/180.), 0., np.cos(-angle_y * np.pi/180.)]])
rotate_z = np.array([[np.cos(-angle_z * np.pi/180.), -np.sin(-angle_z * np.pi/180.), 0.],
[np.sin(-angle_z * np.pi/180.), np.cos(-angle_z * np.pi/180.), 0.],
[0., 0., 1.]])
rot_tot = np.dot(rotate_z, np.dot(rotate_y, rotate_x))
rotated_coords = np.dot(stretch, np.dot(rot_tot, coords))
x = rotated_coords[0, :].reshape(xshape)
y = rotated_coords[1, :].reshape(yshape)
z = rotated_coords[2, :].reshape(zshape)
x += xcenter
y += ycenter
z += zcenter
return x, y, z
def initialize_variogram_model(x, y, z, variogram_model, variogram_model_parameters,
variogram_function, nlags, weight):
"""Initializes the variogram model for kriging according
to user specifications or to defaults"""
x1, x2 = np.meshgrid(x, x)
y1, y2 = np.meshgrid(y, y)
z1, z2 = np.meshgrid(z, z)
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
d = np.sqrt(dx**2 + dy**2)
g = 0.5 * dz**2
indices = np.indices(d.shape)
d = d[(indices[0, :, :] > indices[1, :, :])]
g = g[(indices[0, :, :] > indices[1, :, :])]
# Equal-sized bins are now implemented. The upper limit on the bins
# is appended to the list (instead of calculated as part of the
# list comprehension) to avoid any numerical oddities
# (specifically, say, ending up as 0.99999999999999 instead of 1.0).
# Appending dmax + 0.001 ensures that the largest distance value
# is included in the semivariogram calculation.
dmax = np.amax(d)
dmin = np.amin(d)
dd = (dmax - dmin)/nlags
bins = [dmin + n*dd for n in range(nlags)]
dmax += 0.001
bins.append(dmax)
# This old binning method was experimental and doesn't seem
# to work too well. Bins were computed such that there are more
# at shorter lags. This effectively weights smaller distances more
# highly in determining the variogram. As Kitanidis points out,
# the variogram fit to the data at smaller lag distances is more
# important. However, the value at the largest lag probably ends up
# being biased too high for the larger values and thereby throws off
# automatic variogram calculation and confuses comparison of the
# semivariogram with the variogram model.
#
# dmax = np.amax(d)
# dmin = np.amin(d)
# dd = dmax - dmin
# bins = [dd*(0.5**n) + dmin for n in range(nlags, 1, -1)]
# bins.insert(0, dmin)
# bins.append(dmax)
lags = np.zeros(nlags)
semivariance = np.zeros(nlags)
for n in range(nlags):
# This 'if... else...' statement ensures that there are data
# in the bin so that numpy can actually find the mean. If we
# don't test this first, then Python kicks out an annoying warning
# message when there is an empty bin and we try to calculate the mean.
if d[(d >= bins[n]) & (d < bins[n + 1])].size > 0:
lags[n] = np.mean(d[(d >= bins[n]) & (d < bins[n + 1])])
semivariance[n] = np.mean(g[(d >= bins[n]) & (d < bins[n + 1])])
else:
lags[n] = np.nan
semivariance[n] = np.nan
lags = lags[~np.isnan(semivariance)]
semivariance = semivariance[~np.isnan(semivariance)]
if variogram_model_parameters is not None:
if variogram_model == 'linear' and len(variogram_model_parameters) != 2:
raise ValueError("Exactly two parameters required "
"for linear variogram model")
elif (variogram_model == 'power' or variogram_model == 'spherical' or variogram_model == 'exponential'
or variogram_model == 'gaussian') and len(variogram_model_parameters) != 3:
raise ValueError("Exactly three parameters required "
"for %s variogram model" % variogram_model)
else:
if variogram_model == 'custom':
raise ValueError("Variogram parameters must be specified when implementing custom variogram model.")
else:
variogram_model_parameters = calculate_variogram_model(lags, semivariance, variogram_model,
variogram_function, weight)
return lags, semivariance, variogram_model_parameters
def initialize_variogram_model_3d(x, y, z, values, variogram_model, variogram_model_parameters,
variogram_function, nlags, weight):
"""Initializes the variogram model for kriging according
to user specifications or to defaults"""
x1, x2 = np.meshgrid(x, x)
y1, y2 = np.meshgrid(y, y)
z1, z2 = np.meshgrid(z, z)
val1, val2 = np.meshgrid(values, values)
d = np.sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
g = 0.5 * (val1 - val2)**2
indices = np.indices(d.shape)
d = d[(indices[0, :, :] > indices[1, :, :])]
g = g[(indices[0, :, :] > indices[1, :, :])]
# The upper limit on the bins is appended to the list (instead of calculated as part of the
# list comprehension) to avoid any numerical oddities (specifically, say, ending up as
# 0.99999999999999 instead of 1.0). Appending dmax + 0.001 ensures that the largest distance value
# is included in the semivariogram calculation.
dmax = np.amax(d)
dmin = np.amin(d)
dd = (dmax - dmin)/nlags
bins = [dmin + n*dd for n in range(nlags)]
dmax += 0.001
bins.append(dmax)
lags = np.zeros(nlags)
semivariance = np.zeros(nlags)
for n in range(nlags):
# This 'if... else...' statement ensures that there are data in the bin so that numpy can actually
# find the mean. If we don't test this first, then Python kicks out an annoying warning message
# when there is an empty bin and we try to calculate the mean.
if d[(d >= bins[n]) & (d < bins[n + 1])].size > 0:
lags[n] = np.mean(d[(d >= bins[n]) & (d < bins[n + 1])])
semivariance[n] = np.mean(g[(d >= bins[n]) & (d < bins[n + 1])])
else:
lags[n] = np.nan
semivariance[n] = np.nan
lags = lags[~np.isnan(semivariance)]
semivariance = semivariance[~np.isnan(semivariance)]
if variogram_model_parameters is not None:
if variogram_model == 'linear' and len(variogram_model_parameters) != 2:
raise ValueError("Exactly two parameters required "
"for linear variogram model")
elif (variogram_model == 'power' or variogram_model == 'spherical' or variogram_model == 'exponential'
or variogram_model == 'gaussian') and len(variogram_model_parameters) != 3:
raise ValueError("Exactly three parameters required "
"for %s variogram model" % variogram_model)
else:
if variogram_model == 'custom':
raise ValueError("Variogram parameters must be specified when implementing custom variogram model.")
else:
variogram_model_parameters = calculate_variogram_model(lags, semivariance, variogram_model,
variogram_function, weight)
return lags, semivariance, variogram_model_parameters
def variogram_function_error(params, x, y, variogram_function, weight):
"""Function used to in fitting of variogram model.
Returns RMSE between calculated fit and actual data."""
diff = variogram_function(params, x) - y
if weight:
weights = np.arange(x.size, 0.0, -1.0)
weights /= np.sum(weights)
rmse = np.sqrt(np.average(diff**2, weights=weights))
else:
rmse = np.sqrt(np.mean(diff**2))
return rmse
def calculate_variogram_model(lags, semivariance, variogram_model, variogram_function, weight):
"""Function that fits a variogram model when parameters are not specified."""
if variogram_model == 'linear':
x0 = [(np.amax(semivariance) - np.amin(semivariance))/(np.amax(lags) - np.amin(lags)),
np.amin(semivariance)]
bnds = ((0.0, 1000000000.0), (0.0, np.amax(semivariance)))
elif variogram_model == 'power':
x0 = [(np.amax(semivariance) - np.amin(semivariance))/(np.amax(lags) - np.amin(lags)),
1.1, np.amin(semivariance)]
bnds = ((0.0, 1000000000.0), (0.01, 1.99), (0.0, np.amax(semivariance)))
else:
x0 = [np.amax(semivariance), 0.5*np.amax(lags), np.amin(semivariance)]
bnds = ((0.0, 10*np.amax(semivariance)), (0.0, np.amax(lags)), (0.0, np.amax(semivariance)))
res = minimize(variogram_function_error, x0, args=(lags, semivariance, variogram_function, weight),
method='SLSQP', bounds=bnds)
return res.x
def krige(x, y, z, coords, variogram_function, variogram_model_parameters):
"""Sets up and solves the kriging matrix for the given coordinate pair.
This function is now only used for the statistics calculations."""
zero_index = None
zero_value = False
x1, x2 = np.meshgrid(x, x)
y1, y2 = np.meshgrid(y, y)
d = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
bd = np.sqrt((x - coords[0])**2 + (y - coords[1])**2)
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
n = x.shape[0]
a = np.zeros((n+1, n+1))
a[:n, :n] = - variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
b = np.zeros((n+1, 1))
b[:n, 0] = - variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
x_ = np.linalg.solve(a, b)
zinterp = np.sum(x_[:n, 0] * z)
sigmasq = np.sum(x_[:, 0] * -b[:, 0])
return zinterp, sigmasq
def krige_3d(x, y, z, vals, coords, variogram_function, variogram_model_parameters):
"""Sets up and solves the kriging matrix for the given coordinate pair.
This function is now only used for the statistics calculations."""
zero_index = None
zero_value = False
x1, x2 = np.meshgrid(x, x)
y1, y2 = np.meshgrid(y, y)
z1, z2 = np.meshgrid(z, z)
d = np.sqrt((x1 - x2)**2 + (y1 - y2)**2 + (z1 - z2)**2)
bd = np.sqrt((x - coords[0])**2 + (y - coords[1])**2 + (z - coords[2])**2)
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
n = x.shape[0]
a = np.zeros((n+1, n+1))
a[:n, :n] = - variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
b = np.zeros((n+1, 1))
b[:n, 0] = - variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
x_ = np.linalg.solve(a, b)
zinterp = np.sum(x_[:n, 0] * vals)
sigmasq = np.sum(x_[:, 0] * -b[:, 0])
return zinterp, sigmasq
def find_statistics(x, y, z, variogram_function, variogram_model_parameters):
"""Calculates variogram fit statistics."""
delta = np.zeros(z.shape)
sigma = np.zeros(z.shape)
for n in range(z.shape[0]):
if n == 0:
delta[n] = 0.0
sigma[n] = 0.0
else:
z_, ss_ = krige(x[:n], y[:n], z[:n], (x[n], y[n]), variogram_function, variogram_model_parameters)
d = z[n] - z_
delta[n] = d
sigma[n] = np.sqrt(ss_)
delta = delta[1:]
sigma = sigma[1:]
epsilon = delta/sigma
return delta, sigma, epsilon
def find_statistics_3d(x, y, z, vals, variogram_function, variogram_model_parameters):
"""Calculates variogram fit statistics for 3D problems."""
delta = np.zeros(vals.shape)
sigma = np.zeros(vals.shape)
for n in range(z.shape[0]):
if n == 0:
delta[n] = 0.0
sigma[n] = 0.0
else:
z_, ss_ = krige_3d(x[:n], y[:n], z[:n], vals[:n], (x[n], y[n], z[n]),
variogram_function, variogram_model_parameters)
d = z[n] - z_
delta[n] = d
sigma[n] = np.sqrt(ss_)
delta = delta[1:]
sigma = sigma[1:]
epsilon = delta/sigma
return delta, sigma, epsilon
def calcQ1(epsilon):
return abs(np.sum(epsilon)/(epsilon.shape[0] - 1))
def calcQ2(epsilon):
return np.sum(epsilon**2)/(epsilon.shape[0] - 1)
def calc_cR(Q2, sigma):
return Q2 * np.exp(np.sum(np.log(sigma**2))/sigma.shape[0])
|
yejingxin/PyKrige
|
pykrige/core.py
|
Python
|
bsd-3-clause
| 17,003
|
[
"Gaussian"
] |
5eb17c583a94a679084a52a46c72ef8259d26db41fcc25596e892e039cdef6a4
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SSLProfile Avi RESTful Object
description:
- This module is used to configure SSLProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
accepted_ciphers:
description:
- Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html).
- Default value when not specified in API or module is interpreted by Avi Controller as AES:3DES:RC4.
accepted_versions:
description:
- Set of versions accepted by the server.
cipher_enums:
description:
- Enum options - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256, tls_ecdhe_ecdsa_with_aes_256_gcm_sha384, tls_ecdhe_rsa_with_aes_128_gcm_sha256,
- tls_ecdhe_rsa_with_aes_256_gcm_sha384, tls_ecdhe_ecdsa_with_aes_128_cbc_sha256, tls_ecdhe_ecdsa_with_aes_256_cbc_sha384,
- tls_ecdhe_rsa_with_aes_128_cbc_sha256, tls_ecdhe_rsa_with_aes_256_cbc_sha384, tls_rsa_with_aes_128_gcm_sha256, tls_rsa_with_aes_256_gcm_sha384,
- tls_rsa_with_aes_128_cbc_sha256, tls_rsa_with_aes_256_cbc_sha256, tls_ecdhe_ecdsa_with_aes_128_cbc_sha, tls_ecdhe_ecdsa_with_aes_256_cbc_sha,
- tls_ecdhe_rsa_with_aes_128_cbc_sha, tls_ecdhe_rsa_with_aes_256_cbc_sha, tls_rsa_with_aes_128_cbc_sha, tls_rsa_with_aes_256_cbc_sha,
- tls_rsa_with_3des_ede_cbc_sha, tls_rsa_with_rc4_128_sha.
description:
description:
- User defined description for the object.
dhparam:
description:
- Dh parameters used in ssl.
- At this time, it is not configurable and is set to 2048 bits.
enable_ssl_session_reuse:
description:
- Enable ssl session re-use.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
name:
description:
- Name of the object.
required: true
prefer_client_cipher_ordering:
description:
- Prefer the ssl cipher ordering presented by the client during the ssl handshake over the one specified in the ssl profile.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
send_close_notify:
description:
- Send 'close notify' alert message for a clean shutdown of the ssl connection.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ssl_rating:
description:
- Sslrating settings for sslprofile.
ssl_session_timeout:
description:
- The amount of time before an ssl session expires.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
- Units(SEC).
tags:
description:
- List of tag.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create SSL profile with list of allowed ciphers
avi_sslprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
accepted_ciphers: >
ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA:
ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:
AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:
AES256-SHA:DES-CBC3-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:
ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA
accepted_versions:
- type: SSL_VERSION_TLS1
- type: SSL_VERSION_TLS1_1
- type: SSL_VERSION_TLS1_2
cipher_enums:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA256
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_3DES_EDE_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
name: PFS-BOTH-RSA-EC
send_close_notify: true
ssl_rating:
compatibility_rating: SSL_SCORE_EXCELLENT
performance_rating: SSL_SCORE_EXCELLENT
security_score: '100.0'
tenant_ref: Demo
"""
RETURN = '''
obj:
description: SSLProfile (api/sslprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
accepted_ciphers=dict(type='str',),
accepted_versions=dict(type='list',),
cipher_enums=dict(type='list',),
description=dict(type='str',),
dhparam=dict(type='str',),
enable_ssl_session_reuse=dict(type='bool',),
name=dict(type='str', required=True),
prefer_client_cipher_ordering=dict(type='bool',),
send_close_notify=dict(type='bool',),
ssl_rating=dict(type='dict',),
ssl_session_timeout=dict(type='int',),
tags=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslprofile',
set([]))
if __name__ == '__main__':
main()
|
hkariti/ansible
|
lib/ansible/modules/network/avi/avi_sslprofile.py
|
Python
|
gpl-3.0
| 8,116
|
[
"VisIt"
] |
0e8001e166fe49cb4cec4e63bb493e4d2e2a56ec2c174f8178d32938d3e10ceb
|
# gapjunction.py ---
#
# Filename: gapjunction.py
# Description:
# Author:Subhasis Ray
# Maintainer:
# Created: Tue Jul 2 14:28:35 2013 (+0530)
# Version:
# Last-Updated: Tue Jul 23 21:28:45 2013 (+0530)
# By: subha
# Update #: 57
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import moose
from moose import utils
import pylab
simtime = 100e-3
simdt = 1e-6
def make_compartment(path):
comp = moose.Compartment(path)
comp.Em = -70e-3
comp.initVm = -70e-3
comp.Cm = 1e-12
comp.Rm = 1e9
return comp
def gapjunction_demo():
"""
Demonstration of medelling gap junction using MOOSE.
"""
model = moose.Neutral('model')
data = moose.Neutral('data')
comps = []
comp1 = make_compartment('%s/comp1' % (model.path))
comp2 = make_compartment('%s/comp2' % (model.path))
pulse = moose.PulseGen('%s/pulse' % (model.path))
pulse.level[0] = 1e-9
pulse.delay[0] = 50e-3
pulse.width[0] = 20e-3
pulse.delay[1] = 1e9
moose.connect(pulse, 'output', comp1, 'injectMsg')
gj = moose.GapJunction('%s/gj' % (model.path))
gj.Gk = 1e-6
moose.connect(gj, 'channel1', comp1, 'channel')
moose.connect(gj, 'channel2', comp2, 'channel')
vm1_tab = moose.Table('%s/Vm1' % (data.path))
moose.connect(vm1_tab, 'requestOut', comp1, 'getVm')
vm2_tab = moose.Table('%s/Vm2' % (data.path))
moose.connect(vm2_tab, 'requestOut', comp2, 'getVm')
pulse_tab = moose.Table('%s/inject' % (data.path))
moose.connect(pulse_tab, 'requestOut', pulse, 'getOutputValue')
utils.setDefaultDt(elecdt=simdt, plotdt2=simdt)
utils.assignDefaultTicks()
utils.stepRun(simtime, 10000*simdt)
# print len(vm1_tab.vector), len(vm2_tab.vector), len(pulse_tab.vector)
# moose.showmsg(comp1)
# moose.showmsg(comp2)
# moose.showmsg(pulse)
t = pylab.linspace(0, simtime, len(vm1_tab.vector))
pylab.plot(t, vm1_tab.vector*1000, label='Vm1 (mV)')
pylab.plot(t, vm2_tab.vector*1000, label='Vm2 (mV)')
pylab.plot(t, pulse_tab.vector*1e9, label='inject (nA)')
pylab.legend()
pylab.show()
if __name__ == '__main__':
gapjunction_demo()
def main():
"""
This example is to demonstrate, how gap junction can be modeled using MOOSE.
"""
gapjunction_demo()
#
# gapjunction.py ends here
|
BhallaLab/moose-examples
|
snippets/gapjunction.py
|
Python
|
gpl-2.0
| 3,075
|
[
"MOOSE"
] |
39c9772179ee2dc23702ae8a7450633660227bae62cbb97cbf9db3e915ac1fba
|
#!/usr/bin/env python3
import numpy as np
from . import crystal
import scipy
import scipy.constants as constants
import scipy.ndimage
import collections
import json
import argparse
import IPython
def local_maxima(data, size = 5, threshold = 0.05):
data_max = scipy.ndimage.filters.maximum_filter(data, size = size)
maxima = (data == data_max)
data_min = scipy.ndimage.filters.minimum_filter(data, size = size)
minima = (data == data_min)
diff = (data_max - data_min)
maxima[diff < np.ptp(diff)*threshold] = 0
return maxima
class Beam:
def __init__(self, E = 15000, incident_angle = 0):
self.E = E
self.incident_angle = incident_angle
self.p = np.sqrt(2*E*constants.m_e/constants.eV + E**2/constants.c**2) # eV/c
self.k_0 = self.p * constants.eV / constants.hbar / 1e10 # 1/A
self.wavelength = 2 * np.pi / self.k_0 # A
self.k = np.array([self.k_0, 0, 0])
if incident_angle:
self.k = np.dot(crystal.rotation_matrix(incident_angle, [0, 1, 0]), self.k)
def export(self):
d = collections.OrderedDict([
('E',self.E),
('incident_angle',self.incident_angle),
])
return d
def export_json(self, fname):
with open(fname, 'w') as f:
json.dump(self.export(), f, indent = 2)
@classmethod
def load(cls, beam_dic):
return cls(**beam_dic)
class Screen:
def __init__(self, xlim = (-0.05, 0.05), ylim = (0, 0.05), distance = 0.30,
pixel_width = 0.0005):
self.xlim = xlim
self.ylim = ylim
self.pixel_width = pixel_width
self.distance = distance
xstep = int(np.diff(xlim)[0]/pixel_width) + 1
ystep = int(np.diff(ylim)[0]/pixel_width) + 1
x = np.linspace(xlim[0], xlim[1], xstep)
y = np.linspace(ylim[0], ylim[1], ystep)
self.X, self.Y = np.meshgrid(x, y)
self.Z = np.zeros_like(self.X)
def clear_screen(self):
self.Z = np.zeros_like(self.X)
@property
def X_m(self):
return self.X - self.pixel_width/2
@property
def Y_m(self):
return self.Y - self.pixel_width/2
def inv_ray_trace(self, x, y, beam):
x_r = np.asarray(x).flatten()/self.distance
y_r = np.asarray(y).flatten()/self.distance
z_r = np.full_like(x_r, 1.0, dtype = float)
v_r = np.vstack((z_r, x_r, y_r))
# scale by beam wavevector magnitude
v = beam.k_0 * v_r / np.linalg.norm(v_r, axis = 0)
try:
if x.shape:
return (v.T - beam.k).reshape(x.shape + (-1,))
except (AttributeError, ValueError):
return v.T - beam.k
def full_inv_ray_trace(self, beam, scale = 1):
X, Y = scipy.ndimage.zoom(self.X, scale), scipy.ndimage.zoom(self.Y, scale)
return self.inv_ray_trace(X, Y, beam)
def export(self):
d = collections.OrderedDict([
('xlim',self.xlim),
('ylim',self.ylim),
('distance',self.distance),
('pixel_width',self.pixel_width),
])
return d
def export_json(self, fname):
with open(fname, 'w') as f:
json.dump(self.export(), f, indent = 2)
@classmethod
def load(cls, screen_dic):
return cls(**screen_dic)
class RHEEDSim:
def __init__(self, *lattices, screen = None, beam = None, broadening = 0.2,
colour = False, **kwargs):
self.screen = screen if screen else Screen()
self.beam = beam if beam else Beam()
self.broadening = broadening
self.lattices = list(lattices)
self.last_intensity = None
self.colour = colour
def diffracted_intensity(self, k_scatter, lattices = None,
broadening = None):
if lattices is None:
lattices = self.lattices
if broadening is None:
broadening = self.broadening
y, x, n = k_scatter.shape
k_scatter = k_scatter.reshape((x*y, n))
Z = np.zeros((y, x))
for lattice in lattices:
base_coords = np.floor(np.dot(k_scatter[...,:lattice.dimension], lattice.rlvi)).astype(int)
lattice_coords = lattice.neighbours2 + base_coords
rect_coords = np.dot(lattice_coords, lattice.rlv)
dists = np.linalg.norm(rect_coords - k_scatter[...,:lattice.dimension], axis = -1)
intensities = lattice.intensity(lattice_coords)
hwhms = lattice.reciprocal_lattice_sphere_radius_bulk(intensities) * broadening
diffracted = np.sum(intensities * hwhms / (dists**2 + hwhms**2), axis = 0)
Z += diffracted.reshape((y, x))**2
return Z
def simulate(self, lattices = None, broadening = None, colour = None):
if lattices is None:
lattices = self.lattices
k_scatter = self.screen.full_inv_ray_trace(self.beam)
if colour is None:
colour = self.colour
if colour:
# warn if more than 3 lattices in RHEEDSim
if len(lattices) > 3:
raise ValueError('Cannot colour contribution from more than 3 lattices')
y, x, n = k_scatter.shape
intensity = np.zeros((y, x, 3))
for i, lattice in enumerate(lattices):
intensity[... , i] = self.diffracted_intensity(k_scatter, lattices = [lattice],
broadening = broadening)
return intensity / intensity.max()
intensity = self.diffracted_intensity(k_scatter, broadening = broadening)
self.screen.Z += intensity
self.last_intensity = intensity
return intensity
def diffraction_index(self, lattices = None, d_u = 0.5, threshold = 0.05,
broadening = None):
if lattices is None:
lattices = self.lattices
lattice_indices = []
for lattice in lattices:
if self.last_intensity is None:
self.simulate(broadening = broadening, colour = False)
maxima = local_maxima(self.last_intensity, threshold = threshold)
x = self.screen.X[maxima]
y = self.screen.Y[maxima]
k_scatter = self.screen.inv_ray_trace(x, y, self.beam)
dists, indices, coords = lattice.closest_reciprocal_lattice_point(k_scatter[...,:lattice.dimension])
# add check for repeated indices
idx = (dists < d_u)
x, y, indices = x[idx], y[idx], indices[idx]
lattice_indices.append((np.column_stack((y, x)), indices))
return lattice_indices
def brillouin_ewald_intersection(self, lattice = None):
if lattice is None:
# default to first lattice in rheedsim
lattice = self.lattices[0]
elif type(lattice) == int:
lattice = self.lattices[lattice]
k_scatter = self.screen.full_inv_ray_trace(self.beam)
y, x, n = k_scatter.shape
k_scatter = k_scatter.reshape((x*y, n))
Z = np.zeros((x*y))
dists, lattice_coords, rect_coords = lattice.closest_reciprocal_lattice_point(k_scatter)
unique_lattice_coords = set(tuple(i) for i in lattice_coords.tolist())
unique_lattice_coords = sorted(unique_lattice_coords, key = lambda x : np.linalg.norm(x))
for i, ulc in enumerate(unique_lattice_coords):
idx = (lattice_coords == np.asarray(ulc)).all(axis = 1)
Z[idx] += i
return Z.reshape((y, x))
def export(self):
d = collections.OrderedDict([
('screen',self.screen.export()),
('beam',self.beam.export()),
('lattices', [lattice.export() for lattice in self.lattices]),
('broadening', self.broadening),
('colour', self.colour),
])
return d
def export_json(self, fname):
with open(fname, 'w') as f:
json.dump(self.export(), f, indent = 2)
@classmethod
def load(cls, rheedsim_dic):
screen = Screen.load(rheedsim_dic['screen'])
beam = Beam.load(rheedsim_dic['beam'])
lattices = [crystal.Lattice.load(l) for l in rheedsim_dic['lattices']]
return cls(*lattices, **{**rheedsim_dic, 'screen':screen, 'beam':beam})
@classmethod
def load_json(cls, fname):
with open(fname, 'r') as f:
rheedsim_dic = json.load(f)
return cls.load(rheedsim_dic)
def main(args):
if args.load:
sim = RHEEDSim.load_json(args.load)
IPython.embed()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', type = str)
args = parser.parse_args()
main(args)
|
chanjr/rheedsim
|
src/rheedsim/rheedsim.py
|
Python
|
mit
| 8,732
|
[
"CRYSTAL"
] |
0d9bfb50a1271ff78f618a1804f6a350e1a782b545035deed70c5c4cc997c712
|
"""
A number of functions that enhance IDLE on Mac OSX.
"""
import sys
import tkinter
from os import path
import warnings
def runningAsOSXApp():
warnings.warn("runningAsOSXApp() is deprecated, use isAquaTk()",
DeprecationWarning, stacklevel=2)
return isAquaTk()
def isCarbonAquaTk(root):
warnings.warn("isCarbonAquaTk(root) is deprecated, use isCarbonTk()",
DeprecationWarning, stacklevel=2)
return isCarbonTk()
_tk_type = None
def _initializeTkVariantTests(root):
"""
Initializes OS X Tk variant values for
isAquaTk(), isCarbonTk(), isCocoaTk(), and isXQuartz().
"""
global _tk_type
if sys.platform == 'darwin':
ws = root.tk.call('tk', 'windowingsystem')
if 'x11' in ws:
_tk_type = "xquartz"
elif 'aqua' not in ws:
_tk_type = "other"
elif 'AppKit' in root.tk.call('winfo', 'server', '.'):
_tk_type = "cocoa"
else:
_tk_type = "carbon"
else:
_tk_type = "other"
def isAquaTk():
"""
Returns True if IDLE is using a native OS X Tk (Cocoa or Carbon).
"""
assert _tk_type is not None
return _tk_type == "cocoa" or _tk_type == "carbon"
def isCarbonTk():
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
assert _tk_type is not None
return _tk_type == "carbon"
def isCocoaTk():
"""
Returns True if IDLE is using a Cocoa Aqua Tk.
"""
assert _tk_type is not None
return _tk_type == "cocoa"
def isXQuartz():
"""
Returns True if IDLE is using an OS X X11 Tk.
"""
assert _tk_type is not None
return _tk_type == "xquartz"
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE.
1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
can still crash unexpectedly.
"""
if isCocoaTk():
patchlevel = root.tk.call('info', 'patchlevel')
if patchlevel not in ('8.5.7', '8.5.9'):
return False
return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.".format(patchlevel))
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that is more appropriate for
IDLE with an Aqua Tk.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from tkinter import Menu
from idlelib import Bindings
from idlelib import WindowList
closeItem = Bindings.menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del Bindings.menudefs[0][1][-3:]
Bindings.menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del Bindings.menudefs[-1][1][0:2]
# Remove the 'Configure Idle' entry from the options menu, it is in the
# application menu as 'Preferences'
del Bindings.menudefs[-2][1][0]
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
# Ensure that the root object has an instance_dict attribute,
# mirrors code in EditorWindow (although that sets the attribute
# on an EditorWindow instance that is then passed as the first
# argument to ConfigDialog)
root.instance_dict = flist.inversedict
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonTk():
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
if isCocoaTk():
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform initial OS X customizations if needed.
Called from PyShell.main() after initial calls to Tk()
There are currently three major versions of Tk in use on OS X:
1. Aqua Cocoa Tk (native default since OS X 10.6)
2. Aqua Carbon Tk (original native, 32-bit only, deprecated)
3. X11 (supported by some third-party distributors, deprecated)
There are various differences among the three that affect IDLE
behavior, primarily with menus, mouse key events, and accelerators.
Some one-time customizations are performed here.
Others are dynamically tested throughout idlelib by calls to the
isAquaTk(), isCarbonTk(), isCocoaTk(), isXQuartz() functions which
are initialized here as well.
"""
_initializeTkVariantTests(root)
if isAquaTk():
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
|
ms-iot/python
|
cpython/Lib/idlelib/macosxSupport.py
|
Python
|
bsd-3-clause
| 8,419
|
[
"VisIt"
] |
6421e36a368b091c36d28adc726fd2c890047b33bc617aaf6644d87279eeb374
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different interpolation modes,
# with the wrap-pad feature turned on and with a rotation
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
transform = vtk.vtkTransform()
# rotate about the center of the image
transform.Translate(+100.8,+100.8,+69.0)
transform.RotateWXYZ(100,0.1,0.1,1)
transform.Translate(-100.8,-100.8,-69.0)
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.MirrorOn()
reslice1.SetResliceTransform(transform)
reslice1.SetInterpolationModeToCubic()
reslice1.SetOutputSpacing(2.0,2.0,1.5)
reslice1.SetOutputOrigin(-32,-32,40)
reslice1.SetOutputExtent(0,127,0,127,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(reader.GetOutputPort())
reslice2.MirrorOn()
reslice2.SetResliceTransform(transform)
reslice2.SetInterpolationModeToLinear()
reslice2.SetOutputSpacing(2.0,2.0,1.5)
reslice2.SetOutputOrigin(-32,-32,40)
reslice2.SetOutputExtent(0,127,0,127,0,0)
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.MirrorOn()
reslice3.SetResliceTransform(transform)
reslice3.SetInterpolationModeToNearestNeighbor()
reslice3.SetOutputSpacing(2.0,2.0,1.5)
reslice3.SetOutputOrigin(-32,-32,40)
reslice3.SetOutputExtent(0,127,0,127,0,0)
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.MirrorOn()
reslice4.SetResliceTransform(transform)
reslice4.SetInterpolationModeToLinear()
reslice4.SetOutputSpacing(3.2,3.2,1.5)
reslice4.SetOutputOrigin(-102.4,-102.4,40)
reslice4.SetOutputExtent(0,127,0,127,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(256,256)
imgWin.Render()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/ResliceMirrorOblique.py
|
Python
|
gpl-3.0
| 3,396
|
[
"VTK"
] |
56e3a50f944fa27d83dbc41c7d34ead441c55a71601ba47cb223c6c05b58fb24
|
import pytest
import re
from six import StringIO
from woodpecker.sequences.basesequence import BaseSequence
@pytest.fixture
def stopwatch_sequence():
class stopwatchSequence(BaseSequence):
def steps(self):
self.start_stopwatch('stopwatch_test')
self.end_stopwatch('stopwatch_test')
return stopwatchSequence
@pytest.fixture
def bad_stopwatch_sequence():
class BadstopwatchSequence(BaseSequence):
def steps(self):
self.end_stopwatch('stopwatch_test')
return BadstopwatchSequence
@pytest.fixture
def fixed_think_time_sequence():
class FixedThinkTimeSequence(BaseSequence):
def steps(self):
self.think_time(1, kind='fixed')
return FixedThinkTimeSequence
@pytest.fixture
def random_gaussian_think_time_sequence():
class RandomGaussianThinkTimeSequence(BaseSequence):
def steps(self):
self.think_time(1, kind='gaussian')
return RandomGaussianThinkTimeSequence
def test_stopwatches(stopwatch_sequence):
output_stream = StringIO()
sequence = stopwatch_sequence(
debug=True,
inline_log_sinks=(output_stream,)
)
sequence.run_steps()
output_stream.seek(0)
output_string = output_stream.getvalue()
assert 'Stopwatch "stopwatch_test" started' in output_string
assert 'Stopwatch "stopwatch_test" ended' in output_string
def test_bad_stopwatches(bad_stopwatch_sequence):
output_stream = StringIO()
sequence = bad_stopwatch_sequence(
debug=True,
inline_log_sinks=(output_stream,)
)
with pytest.raises(KeyError):
sequence.run_steps()
def test_fixed_think_time(fixed_think_time_sequence):
output_stream = StringIO()
sequence = fixed_think_time_sequence(
debug=True,
inline_log_sinks=(output_stream,)
)
sequence.run_steps()
output_stream.seek(0)
output_string = output_stream.getvalue()
assert 'Think time: 1 s (fixed)' in output_string
def test_gaussian_random_think_time(random_gaussian_think_time_sequence):
output_stream = StringIO()
sequence = random_gaussian_think_time_sequence(
debug=True,
inline_log_sinks=(output_stream,)
)
sequence.run_steps()
output_stream.seek(0)
output_string = output_stream.getvalue()
assert re.search(r"Think time: \d+\.\d{,3} s \(gaussian\)", output_string) \
is not None
think_time = float(re.findall(
r"Think time: (\d+\.\d{,3}) s \(gaussian\)", output_string
)[0])
assert think_time >= 0
|
steromano87/Woodpecker
|
tests/sequences/test_basesequence.py
|
Python
|
agpl-3.0
| 2,557
|
[
"Gaussian"
] |
bc055b22e8cf374315647fb8feed1239b38bdf0010d37eb14b102a71b16947d0
|
#!/usr/bin/env python
"""A simple example of how you can use Mayavi without using Envisage
or the Mayavi Envisage application and do off screen rendering.
On Linux/Mac, with VTK < 5.2, you should see a small black window popup
and disappear, see the section :ref:`offscreen_rendering` to avoid this.
On Win32 you will not see any windows popping up at all. In the end you
should have an offscreen.png image in the same directory with the
rendered visualization.
It can be run as::
$ python offscreen.py
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
from os.path import join, abspath, dirname
# The offscreen Engine.
from mayavi.api import OffScreenEngine
# Usual MayaVi imports
from mayavi.scripts.util import get_data_dir
from mayavi.sources.api import VTKXMLFileReader
from mayavi.modules.api import Outline, ScalarCutPlane, Streamline
def main():
# Create the MayaVi offscreen engine and start it.
e = OffScreenEngine()
# Starting the engine registers the engine with the registry and
# notifies others that the engine is ready.
e.start()
# Create a new scene.
win = e.new_scene()
# Now setup a normal MayaVi pipeline.
src = VTKXMLFileReader()
src.initialize(join(get_data_dir(dirname(abspath(__file__))),
'fire_ug.vtu'))
e.add_source(src)
e.add_module(Outline())
e.add_module(ScalarCutPlane())
e.add_module(Streamline())
win.scene.isometric_view()
# Change the size argument to anything you want.
win.scene.save('offscreen.png', size=(800, 800))
if __name__ == '__main__':
main()
|
dmsurti/mayavi
|
examples/mayavi/advanced_visualization/offscreen.py
|
Python
|
bsd-3-clause
| 1,670
|
[
"Mayavi",
"VTK"
] |
4e74a13385ab87dab89d234a00b4c5bdde3f7581a1f819cb359cb091ed972d26
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Example analyses and calculations based on data parsed by cclib."""
from cclib.method.bader import Bader
from cclib.method.bickelhaupt import Bickelhaupt
from cclib.method.cda import CDA
from cclib.method.cspa import CSPA
from cclib.method.ddec import DDEC6
from cclib.method.density import Density
from cclib.method.electrons import Electrons
from cclib.method.fragments import FragmentAnalysis
from cclib.method.hirshfeld import Hirshfeld
from cclib.method.lpa import LPA
from cclib.method.mbo import MBO
from cclib.method.moments import Moments
from cclib.method.mpa import MPA
from cclib.method.nuclear import Nuclear
from cclib.method.opa import OPA
from cclib.method.orbitals import Orbitals
from cclib.method.stockholder import Stockholder
from cclib.method.volume import Volume
|
berquist/cclib
|
cclib/method/__init__.py
|
Python
|
bsd-3-clause
| 988
|
[
"cclib"
] |
d224dd1d7a6cd485f39cbf9f23baa08b5eb183b1cdc1c6ad44111137406b8026
|
#
# ENVISIoN
#
# Copyright (c) 2020-2021 Alexander Vevstad, Gabriel Anderberg, Didrik Axén,
# Adam Engman, Kristoffer Gubberud Maras, Joakim Stenborg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
# Alterations to this file by Gabriel Anderberg, Didrik Axén,
# Adam Engman, Kristoffer Gubberud Maras, Joakim Stenborg
#
# To the extent possible under law, the person who associated CC0 with
# the alterations to this file has waived all copyright and related
# or neighboring rights to the alterations made to this file.
#
# You should have received a copy of the CC0 legalcode along with
# this work. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
from pathlib import Path
import h5py
import numpy as np
import re
import sys
import scipy
from pathlib import Path
def enlarge(matrix):
'''
Inviwo requires volume data to be at least 48x48x48 in size.
Interpolate volume data voxels until that size is reached.
'''
# Inviwo requires arrays to be above a certain size.
# Volumes in hdf5 below 48x48x48 will not be detected
# Larger interpolated volume dimensions make slice look better.
# 128 seem to be a good choice between size and looks.
scale = 128/min(len(x) for x in matrix)
if scale > 1:
matrix = scipy.ndimage.zoom(matrix, scale, None, 3, 'wrap')
# while any(len(x) < 96 for x in matrix):
# matrix = scipy.ndimage.zoom(matrix, 2)
return matrix
def expand(matrix):
'''
Expands given matrix 4 quadrents
Parameters
------
matrix: numpy.array
3D Matrix should represent reciprocal lattice
Return
------
Expanded matrix
'''
lenx = matrix.shape[0]
leny = matrix.shape[1]
lenz = matrix.shape[2]
new = np.zeros((2*lenx, 2*leny, 2*lenz), dtype=np.float32)
new[0:lenx, 0:leny, 0:lenz] = matrix
new[lenx:2*lenx, 0:leny, 0:lenz] = matrix
new[0:lenx, leny:2*leny, 0:lenz] = matrix
new[lenx:2*lenx, leny:2*leny, 0:lenz] = matrix
new[0:lenx, 0:leny, lenz:2*lenz] = matrix
new[lenx:2*lenx, 0:leny, lenz:2*lenz] = matrix
new[0:lenx, leny:2*leny, lenz:2*lenz] = matrix
new[lenx:2*lenx, leny:2*leny, lenz:2*lenz] = matrix
return new
def brillouin_zone(matrix, basis):
'''
Transforms reciprocal lattice to brillouin zone
Parameters
------
matrix: numpy.array
3D Matrix should represent reciprocal lattice
basis:
Reciprocal basis vectors
Return
------
Matrix representing the brillouin zone
'''
base_x = basis[0]
base_y = basis[1]
base_z = basis[2]
base_xy = base_x - base_y
base_xy = np.ceil(base_xy)
base_xz = base_x - base_z
base_xz = np.ceil(base_xz)
base_zx = base_z - base_x
base_zx = np.ceil(base_zx)
base_x = np.ceil(base_x)
base_y = np.ceil(base_y)
base_z = np.ceil(base_z)
lenx = matrix.shape[0]
leny = matrix.shape[1]
lenz = matrix.shape[2]
matrix = expand(matrix)
for x in range(matrix.shape[0]):
for y in range(matrix.shape[1]):
for z in range(matrix.shape[2]):
if np.dot(base_x, np.array([x - lenx*base_x[0]*3/2, y - leny*base_x[1]*3/2, z - lenz*base_x[2]*3/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(-base_x, np.array([x - lenx*base_x[0]*1/2, y - leny*base_x[1]*1/2, z - lenz*base_x[2]*1/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(base_y, np.array([x - lenx*base_y[0]*3/2, y - leny*base_y[1]*3/2, z - lenz*base_y[2]*3/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(-base_y, np.array([x - lenx*base_y[0]*1/2, y - leny*base_y[1]*1/2, z - lenz*base_y[2]*1/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(base_z, np.array([x - lenx*base_z[0]*3/2, y - leny*base_z[1]*3/2, z - lenz*base_z[2]*3/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(-base_z, np.array([x - lenx*base_z[0]*1/2, y - leny*base_z[1]*1/2, z - lenz*base_z[2]*1/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(base_xy, np.array([x - lenx*base_xy[0]*3/2, y - leny*base_xy[1]*3/2, z - lenz*base_xy[2]*3/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(-base_xy, np.array([x - lenx*base_xy[0]*1/2, y - leny*base_xy[1]*1/2, z - lenz*base_xy[2]*1/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(base_xz, np.array([x - lenx*base_xz[0]*3/2, y - leny*base_xz[1]*3/2, z - lenz*base_xz[2]*3/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(-base_xz, np.array([x - lenx*base_xz[0]*1/2, y - leny*base_xz[1]*1/2, z - lenz*base_xz[2]*1/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(base_zx, np.array([x - lenx*base_zx[0]*3/2, y - leny*base_zx[1]*3/2, z - lenz*base_zx[2]*3/2])) >= 0:
matrix[x, y, z] = 1
if np.dot(-base_zx, np.array([x - lenx*base_zx[0]*1/2, y - leny*base_zx[1]*1/2, z - lenz*base_zx[2]*1/2])) >= 0:
matrix[x, y, z] = 1
return matrix
def convert_fermi_volumes(band, basis, fermi_energy):
emin = band.min()
emax = band.max()
def normalize(value):
return (value - emin) / (emax - emin)
normalize_vector = np.vectorize(normalize)
fermi_matrix = normalize_vector(band.copy())
brillouin_zone_matrix = brillouin_zone(fermi_matrix.copy(), basis)
expanded_matrix = expand(fermi_matrix.copy())
normalized_fermi_energy = normalize(fermi_energy)
fermi_matrix = enlarge(fermi_matrix)
brillouin_zone_matrix = enlarge(brillouin_zone_matrix)
expanded_matrix = enlarge(expanded_matrix)
# fermi_matrix = expand(fermi_matrix)
# brillouin_zone_matrix = expand(brillouin_zone_matrix)
# fermi_matrix = expand(fermi_matrix)
# fermi_matrix = expand(fermi_matrix)
return [fermi_matrix, brillouin_zone_matrix, expanded_matrix, normalized_fermi_energy]
def check_directory_fermi(vasp_path):
if Path(vasp_path).joinpath('OUTCAR').exists() and Path(vasp_path).joinpath('EIGENVAL').exists():
with Path(vasp_path).joinpath('OUTCAR').open('r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'KPOINTS' and 'fermi-surface' in line:
return True
return False
def fermi_parser(hdf_file_path, vasp_dir_path):
"""
Reads OUTCAR and EIGNVAL to create datastructure for visualization of fermi surfaces
Parameters
----------
hdf_file_path: str
Path where hdf file will be written to
vasp_dir_path: str
Path of direcotry containing OUTCAR and EIGENVAL files
Returns
-------
None
"""
# Check for files
# ---------------
outcar_file_path = Path(vasp_dir_path).joinpath('OUTCAR')
eigenval_file_path = Path(vasp_dir_path).joinpath('EIGENVAL')
if not outcar_file_path.exists() or not eigenval_file_path.exists():
raise FileNotFoundError('Cannot find one of the two vasp files in directory %s' % vasp_dir_path)
# Parse OUTCAR file for fermi energy and reciprocal lattice vectors
# https://www.vasp.at/wiki/index.php/OUTCAR
# --------------------------------------------------------------
with outcar_file_path.open('r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'E-fermi' in line:
fermi_energy = float(re.findall(r'-?[\d.]+', line)[0])
if 'reciprocal lattice vectors' in line:
base_x = re.findall(r'-?[\d.]+', lines[i + 1])[3:]
base_x = [float(x) for x in base_x]
base_y = re.findall(r'-?[\d.]+', lines[i + 2])[3:]
base_y = [float(x) for x in base_y]
base_z = re.findall(r'-?[\d.]+', lines[i + 3])[3:]
base_z = [float(x) for x in base_z]
basis = np.array([base_x, base_y, base_z])
# Parse EIGENVAL file for all calculated K-Points and band energies
# https://www.vasp.at/wiki/index.php/EIGENVAL
# ----------------------------------------------------------------
with eigenval_file_path.open('r') as f:
lines = f.readlines()
# collect meta data
[_, _, _, nspin] = [int(v) for v in re.findall(r'[\d]+', lines[0])]
nelectrons, nkpoints, nbands = [int(v) for v in re.findall(r'[\d]+', lines[5])]
kpoints = np.zeros(shape=(nkpoints, 4))
evalues = np.zeros(shape=(nkpoints, nbands, nspin), dtype=np.float32)
kpoint_index = 0
for i, line in enumerate(lines[7:]):
regex = re.findall(r'[-\d.E+]+', line)
# kpoint
if len(regex) == 4:
kpoints[kpoint_index, :] = [float(v) for v in regex]
kpoint_index += 1
# eigenvalue
elif len(regex) > 0:
band_index = int(regex[0])
values = [float(v) for v in regex[1:1+nspin:]]
evalues[kpoint_index - 1, band_index - 1, :] = values
# derive dimensions from unique kpoints
nkpoints_x = len(set(kpoints[:, 0]))
nkpoints_y = len(set(kpoints[:, 1]))
nkpoints_z = len(set(kpoints[:, 2]))
# Write data to HDF5
# ------------------
hdf_file = h5py.File(hdf_file_path, 'a')
hdf_file.create_dataset('fermi_energy', data=np.array(fermi_energy))
hdf_file.create_dataset('reciprocal_basis', data=basis)
hdf_group = hdf_file.create_group('fermi_bands')
for band_index in range(nbands):
band = np.reshape(evalues[:, band_index, 0], (nkpoints_x, nkpoints_y, nkpoints_z))
hdf_subgroup = hdf_group.create_group(str(band_index))
hdf_subgroup.create_dataset('composition', data=band, dtype='float32')
volumes = convert_fermi_volumes(band, basis, fermi_energy)
hdf_subgroup.create_dataset('fermi_volume', data=volumes[0], dtype=np.float32)
hdf_subgroup.create_dataset('brillouin_zone', data=volumes[1], dtype=np.float32)
hdf_subgroup.create_dataset('expanded_volume', data=volumes[2], dtype=np.float32)
hdf_subgroup.create_dataset('normalized_fermi_energy', data=np.array(volumes[3]), dtype=np.float32)
hdf_file.close()
return True
if __name__ == '__main__':
fermi_parser(sys.argv[1], sys.argv[2])
|
rartino/ENVISIoN
|
envisionpy/hdf5parser/vasp/fermi_parser.py
|
Python
|
bsd-2-clause
| 12,074
|
[
"VASP"
] |
a4c364f62f1e7c88b6510b34bb0349ab14ba379d958f91e4dd9d6a7417041a92
|
"""
DIRAC.StorageManagementSystem.Client package
"""
|
DIRACGrid/DIRAC
|
src/DIRAC/StorageManagementSystem/Client/__init__.py
|
Python
|
gpl-3.0
| 56
|
[
"DIRAC"
] |
6ee3a70640bc0a259c1a77055338e07436226dd327393f5dda2c1d92efecda73
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
r"""servermanager is a module for using paraview server manager in Python.
One can always use the server manager API directly. However, this module
provides an interface easier to use from Python by wrapping several VTK
classes around Python classes.
Note that, upon load, this module will create several sub-modules: sources,
filters and rendering. These modules can be used to instantiate specific
proxy types. For a list, try "dir(servermanager.sources)"
A simple example:
from paraview.servermanager import *
# Creates a new built-in session and makes it the active session.
Connect()
# Creates a new render view on the active session.
renModule = CreateRenderView()
# Create a new sphere proxy on the active session and register it
# in the sources group.
sphere = sources.SphereSource(registrationGroup="sources", ThetaResolution=16, PhiResolution=32)
# Create a representation for the sphere proxy and adds it to the render
# module.
display = CreateRepresentation(sphere, renModule)
renModule.StillRender()
"""
import re, os, new, sys
from paravis import *
# VTN: Avoid paraview.* instructions in this file.
# It leads to problems during execution.
def _wrap_property(proxy, smproperty):
""" Internal function.
Given a server manager property and its domains, returns the
appropriate python object.
"""
property = None
if smproperty.IsA("vtkSMStringVectorProperty"):
al = smproperty.GetDomain("array_list")
if al and al.IsA("vtkSMArraySelectionDomain") and \
smproperty.GetRepeatable():
property = ArrayListProperty(proxy, smproperty)
elif al and al.IsA("vtkSMArrayListDomain") and smproperty.GetNumberOfElements() == 5:
property = ArraySelectionProperty(proxy, smproperty)
else:
iter = smproperty.NewDomainIterator()
isFileName = False
while not iter.IsAtEnd():
# Refer to BUG #9710 to see why optional domains need to be
# ignored.
if iter.GetDomain().IsA("vtkSMFileListDomain") and \
iter.GetDomain().GetIsOptional() == 0 :
isFileName = True
break
iter.Next()
iter.UnRegister(None)
if isFileName:
property = FileNameProperty(proxy, smproperty)
elif _make_name_valid(smproperty.GetXMLLabel()) == 'ColorArrayName':
property = ColorArrayProperty(proxy, smproperty)
else:
property = VectorProperty(proxy, smproperty)
elif smproperty.IsA("vtkSMVectorProperty"):
if smproperty.IsA("vtkSMIntVectorProperty") and \
smproperty.GetDomain("enum"):
property = EnumerationProperty(proxy, smproperty)
else:
property = VectorProperty(proxy, smproperty)
elif smproperty.IsA("vtkSMInputProperty"):
property = InputProperty(proxy, smproperty)
elif smproperty.IsA("vtkSMProxyProperty"):
property = ProxyProperty(proxy, smproperty)
else:
property = Property(proxy, smproperty)
return property
class Proxy(object):
"""Proxy for a server side object. A proxy manages the lifetime of
one or more server manager objects. It also provides an interface
to set and get the properties of the server side objects. These
properties are presented as Python properties. For example,
you can set a property Foo using the following:
proxy.Foo = (1,2)
or
proxy.Foo.SetData((1,2))
or
proxy.Foo[0:2] = (1,2)
For more information, see the documentation of the property which
you can obtain with
help(proxy.Foo).
This class also provides an iterator which can be used to iterate
over all properties.
eg:
proxy = Proxy(proxy=smproxy)
for property in proxy:
print property
For advanced users:
This is a python class that wraps a vtkSMProxy.. Makes it easier to
set/get properties.
Instead of:
proxy.GetProperty("Foo").SetElement(0, 1)
proxy.GetProperty("Foo").SetElement(0, 2)
you can do:
proxy.Foo = (1,2)
or
proxy.Foo.SetData((1,2))
or
proxy.Foo[0:2] = (1,2)
Instead of:
proxy.GetProperty("Foo").GetElement(0)
you can do:
proxy.Foo.GetData()[0]
or
proxy.Foo[0]
For proxy properties, you can use append:
proxy.GetProperty("Bar").AddProxy(foo)
you can do:
proxy.Bar.append(foo)
Properties support most of the list API. See VectorProperty and
ProxyProperty documentation for details.
Please note that some of the methods accessible through the Proxy
class are not listed by help() because the Proxy objects forward
unresolved attributes to the underlying object. To get the full list,
see also dir(proxy.SMProxy). See also the doxygen based documentation
of the vtkSMProxy C++ class.
"""
def __init__(self, **args):
""" Default constructor. It can be used to initialize properties
by passing keyword arguments where the key is the name of the
property. In addition registrationGroup and registrationName (optional)
can be specified (as keyword arguments) to automatically register
the proxy with the proxy manager. """
self.add_attribute('Observed', None)
self.add_attribute('ObserverTag', -1)
self.add_attribute('_Proxy__Properties', {})
self.add_attribute('_Proxy__LastAttrName', None)
self.add_attribute('SMProxy', None)
self.add_attribute('Port', 0)
if 'port' in args:
self.Port = args['port']
del args['port']
update = True
if 'no_update' in args:
if args['no_update']:
update = False
del args['no_update']
if 'proxy' in args:
self.InitializeFromProxy(args['proxy'])
del args['proxy']
else:
self.Initialize(None, update)
if 'registrationGroup' in args:
registrationGroup = args['registrationGroup']
del args['registrationGroup']
registrationName = self.SMProxy.GetGlobalIDAsString()
if 'registrationName' in args:
registrationName = args['registrationName']
del args['registrationName']
pxm = ProxyManager()
pxm.RegisterProxy(registrationGroup, registrationName, self.SMProxy)
if update:
self.UpdateVTKObjects()
for key in args.keys():
setattr(self, key, args[key])
# Visit all properties so that they are created
for prop in self:
pass
def __setattr__(self, name, value):
try:
setter = getattr(self.__class__, name)
setter = setter.__set__
except AttributeError:
if not hasattr(self, name):
raise AttributeError("Attribute %s does not exist. " % name +
" This class does not allow addition of new attributes to avoid " +
"mistakes due to typos. Use add_attribute() if you really want " +
"to add this attribute.")
self.__dict__[name] = value
else:
setter(self, value)
def add_attribute(self, name, value):
self.__dict__[name] = value
def __del__(self):
"""Destructor. Cleans up all observers as well as remove
the proxy from the _pyproxies dictionary"""
# Make sure that we remove observers we added
if self.Observed:
observed = self.Observed
tag = self.ObserverTag
self.Observed = None
self.ObserverTag = -1
observed.RemoveObserver(tag)
if _pyproxies and self.SMProxy and (self.SMProxy, self.Port) in _pyproxies:
del _pyproxies[(self.SMProxy, self.Port)]
def InitializeFromProxy(self, aProxy, update=True):
"""Constructor. Assigns proxy to self.SMProxy, updates the server
object as well as register the proxy in _pyproxies dictionary."""
import weakref
self.SMProxy = aProxy
if update:
self.SMProxy.UpdateVTKObjects()
_pyproxies[(self.SMProxy, self.Port)] = weakref.ref(self)
def Initialize(self):
"Overridden by the subclass created automatically"
pass
def __eq__(self, other):
"Returns true if the underlying SMProxies are the same."
if isinstance(other, Proxy):
try:
if self.Port != other.Port:
return False
except:
pass
## VSV using IsSame instead ==
return self.SMProxy.IsSame(other.SMProxy)
return self.SMProxy.IsSame(other)
def __ne__(self, other):
"Returns false if the underlying SMProxies are the same."
return not self.__eq__(other)
def __iter__(self):
"Creates an iterator for the properties."
return PropertyIterator(self)
def SetPropertyWithName(self, pname, arg):
"""Generic method for setting the value of a property."""
prop = self.GetProperty(pname)
if prop is None:
raise RuntimeError, "Property %s does not exist. Please check the property name for typos." % pname
prop.SetData(arg)
def GetPropertyValue(self, name):
"""Returns a scalar for properties with 1 elements, the property
itself for vectors."""
p = self.GetProperty(name)
if isinstance(p, VectorProperty):
if p.GetNumberOfElements() == 1 and not p.GetRepeatable():
if p.SMProperty.IsA("vtkSMStringVectorProperty") or not p.GetArgumentIsArray():
return p[0]
elif isinstance(p, InputProperty):
if not p.GetMultipleInput():
if len(p) > 0:
return p[0]
else:
return None
elif isinstance(p, ProxyProperty):
if not p.GetRepeatable():
if len(p) > 0:
return p[0]
else:
return None
return p
def GetProperty(self, name):
"""Given a property name, returns the property object."""
if name in self.__Properties and self.__Properties[name]():
return self.__Properties[name]()
smproperty = self.SMProxy.GetProperty(name)
# Maybe they are looking by the label. Try to match that.
if not smproperty:
iter = PropertyIterator(self)
for prop in iter:
if name == _make_name_valid(iter.PropertyLabel):
smproperty = prop.SMProperty
break
if smproperty:
property = _wrap_property(self, smproperty)
if property is not None:
import weakref
self.__Properties[name] = weakref.ref(property)
return property
return None
def ListProperties(self):
"""Returns a list of all property names on this proxy."""
property_list = []
iter = self.__iter__()
for property in iter:
name = _make_name_valid(iter.PropertyLabel)
if name:
property_list.append(name)
return property_list
def __ConvertArgumentsAndCall(self, *args):
""" Internal function.
Used to call a function on SMProxy. Converts input and
output values as appropriate.
"""
newArgs = []
for arg in args:
if issubclass(type(arg), Proxy) or isinstance(arg, Proxy):
newArgs.append(arg.SMProxy)
else:
newArgs.append(arg)
func = getattr(self.SMProxy, self.__LastAttrName)
retVal = func(*newArgs)
if type(retVal) is type(self.SMProxy) and retVal.IsA("vtkSMProxy"):
return _getPyProxy(retVal)
elif type(retVal) is type(self.SMProxy) and retVal.IsA("vtkSMProperty"):
return _wrap_property(self, retVal)
else:
return retVal
def __GetActiveCamera(self):
""" This method handles GetActiveCamera specially. It adds
an observer to the camera such that everytime it is modified
the render view updated"""
import weakref
c = self.SMProxy.GetActiveCamera()
# VSV: Observers are not supported
## if not c.HasObserver("ModifiedEvent"):
## self.ObserverTag =c.AddObserver("ModifiedEvent", _makeUpdateCameraMethod(weakref.ref(self)))
## self.Observed = c
return c
def __getattr__(self, name):
"""With the exception of a few overloaded methods,
returns the SMProxy method"""
if not self.SMProxy:
raise AttributeError("class %s has no attribute %s" % ("None", name))
return None
# Handle GetActiveCamera specially.
if name == "GetActiveCamera" and \
hasattr(self.SMProxy, "GetActiveCamera"):
return self.__GetActiveCamera
if name == "SaveDefinition" and hasattr(self.SMProxy, "SaveDefinition"):
return self.__SaveDefinition
# If not a property, see if SMProxy has the method
try:
proxyAttr = getattr(self.SMProxy, name)
self.__LastAttrName = name
return self.__ConvertArgumentsAndCall
except:
pass
return getattr(self.SMProxy, name)
class SourceProxy(Proxy):
"""Proxy for a source object. This class adds a few methods to Proxy
that are specific to sources. It also provides access to the output
ports. Output ports can be accessed by name or index:
> op = source[0]
or
> op = source['some name'].
"""
def UpdatePipeline(self, time=None):
"""This method updates the server-side VTK pipeline and the associated
data information. Make sure to update a source to validate the output
meta-data."""
if time != None:
self.SMProxy.UpdatePipeline(time)
else:
self.SMProxy.UpdatePipeline()
# This is here to cause a receive
# on the client side so that progress works properly.
if ActiveConnection and ActiveConnection.IsRemote():
self.SMProxy.GetDataInformation()
def FileNameChanged(self):
"Called when the filename of a source proxy is changed."
self.UpdatePipelineInformation()
def UpdatePipelineInformation(self):
"""This method updates the meta-data of the server-side VTK pipeline and
the associated information properties"""
self.SMProxy.UpdatePipelineInformation()
def GetDataInformation(self, idx=None):
"""This method returns a DataInformation wrapper around a
vtkPVDataInformation"""
if idx == None:
idx = self.Port
if self.SMProxy:
return DataInformation( \
self.SMProxy.GetDataInformation(idx), \
self.SMProxy, idx)
def __getitem__(self, idx):
"""Given a slice, int or string, returns the corresponding
output port"""
if isinstance(idx, slice):
indices = idx.indices(self.SMProxy.GetNumberOfOutputPorts())
retVal = []
for i in range(*indices):
retVal.append(OutputPort(self, i))
return retVal
elif isinstance(idx, int):
if idx >= self.SMProxy.GetNumberOfOutputPorts() or idx < 0:
raise IndexError
return OutputPort(self, idx)
else:
return OutputPort(self, self.SMProxy.GetOutputPortIndex(idx))
def GetPointDataInformation(self):
"""Returns the associated point data information."""
self.UpdatePipeline()
return FieldDataInformation(self.SMProxy, self.Port, "PointData")
def GetCellDataInformation(self):
"""Returns the associated cell data information."""
self.UpdatePipeline()
return FieldDataInformation(self.SMProxy, self.Port, "CellData")
def GetFieldDataInformation(self):
"""Returns the associated cell data information."""
self.UpdatePipeline()
return FieldDataInformation(self.SMProxy, self.Port, "FieldData")
PointData = property(GetPointDataInformation, None, None, "Returns point data information")
CellData = property(GetCellDataInformation, None, None, "Returns cell data information")
FieldData = property(GetFieldDataInformation, None, None, "Returns field data information")
class ExodusIIReaderProxy(SourceProxy):
"""Special class to define convenience functions for array
selection."""
def FileNameChanged(self):
"Called when the filename changes. Selects all variables."
SourceProxy.FileNameChanged(self)
self.SelectAllVariables()
def SelectAllVariables(self):
"Select all available variables for reading."
for prop in ('PointVariables', 'EdgeVariables', 'FaceVariables',
'ElementVariables', 'GlobalVariables'):
f = getattr(self, prop)
f.SelectAll()
def DeselectAllVariables(self):
"Deselects all variables."
for prop in ('PointVariables', 'EdgeVariables', 'FaceVariables',
'ElementVariables', 'GlobalVariables'):
f = getattr(self, prop)
f.DeselectAll()
class ViewLayoutProxy(Proxy):
"""Special class to define convenience methods for View Layout"""
def SplitViewHorizontal(self, view, fraction=0.5):
"""Split the cell containing the specified view horizontally.
If no fraction is specified, the frame is split into equal parts.
On success returns a positve number that identifying the new cell
location that can be used to assign view to, or split further.
Return -1 on failure."""
location = self.GetViewLocation(view)
if location == -1:
raise RuntimeError, "View is not present in this layout."
if fraction < 0.0 or fraction > 1.0:
raise RuntimeError, "'fraction' must be in the range [0.0, 1.0]"
return self.SMProxy.SplitHorizontal(location, fraction)
def SplitViewVertical(self, view=None, fraction=0.5):
"""Split the cell containing the specified view horizontally.
If no view is specified, active view is used.
If no fraction is specified, the frame is split into equal parts.
On success returns a positve number that identifying the new cell
location that can be used to assign view to, or split further.
Return -1 on failure."""
location = self.GetViewLocation(view)
if location == -1:
raise RuntimeError, "View is not present in this layout."
if fraction < 0.0 or fraction > 1.0:
raise RuntimeError, "'fraction' must be in the range [0.0, 1.0]"
return self.SMProxy.SplitVertical(location, fraction)
def AssignView(self, location, view):
"""Assign a view at a particular location. Note that the view's position may
be changed by subsequent Split() calls. Returns true on success."""
viewproxy = None
if isinstance(view, Proxy):
view = view.SMProxy
return self.SMProxy.AssignView(location, view)
def GetViewLocation(self, view):
if isinstance(view, Proxy):
view = view.SMProxy
return self.SMProxy.GetViewLocation(view)
class Property(object):
"""Generic property object that provides access to one of the properties of
a server object. This class does not allow setting/getting any values but
provides an interface to update a property using __call__. This can be used
for command properties that correspond to function calls without arguments.
For example,
> proxy.Foo()
would push a Foo property which may cause the proxy to call a Foo method
on the actual VTK object.
For advanced users:
Python wrapper around a vtkSMProperty with a simple interface.
In addition to all method provided by vtkSMProperty (obtained by
forwarding unknown attributes requests to the underlying SMProxy),
Property and sub-class provide a list API.
Please note that some of the methods accessible through the Property
class are not listed by help() because the Property objects forward
unresolved attributes to the underlying object. To get the full list,
see also dir(proxy.SMProperty). See also the doxygen based documentation
of the vtkSMProperty C++ class.
"""
def __init__(self, proxy, smproperty):
"""Default constructor. Stores a reference to the proxy."""
import weakref
self.SMProperty = smproperty
self.Proxy = proxy
def __repr__(self):
"""Returns a string representation containing property name
and value"""
if not type(self) is Property:
if self.GetData() is not None:
repr = self.GetData().__repr__()
else:
repr = "None"
else:
repr = "Property name= "
name = self.Proxy.GetPropertyName(self.SMProperty)
if name:
repr += name
else:
repr += "Unknown"
return repr
def __call__(self):
"""Forces a property update using InvokeCommand."""
if type(self) is Property:
self.Proxy.SMProxy.InvokeCommand(self._FindPropertyName())
else:
raise RuntimeError, "Cannot invoke this property"
def _FindPropertyName(self):
"Returns the name of this property."
return self.Proxy.GetPropertyName(self.SMProperty)
def _UpdateProperty(self):
"Pushes the value of this property to the server."
# For now, we are updating all properties. This is due to an
# issue with the representations. Their VTK objects are not
# created until Input is set therefore, updating a property
# has no effect. Updating all properties everytime one is
# updated has the effect of pushing values set before Input
# when Input is updated.
# self.Proxy.SMProxy.UpdateProperty(self._FindPropertyName())
self.Proxy.SMProxy.UpdateVTKObjects()
def __getattr__(self, name):
"Unknown attribute requests get forwarded to SMProperty."
return getattr(self.SMProperty, name)
Name = property(_FindPropertyName, None, None,
"Returns the name for the property")
class GenericIterator(object):
"""Iterator for container type objects"""
def __init__(self, obj):
self.Object = obj
self.index = 0
def __iter__(self):
return self
def next(self):
if self.index >= len(self.Object):
raise StopIteration
idx = self.index
self.index += 1
return self.Object[idx]
class VectorProperty(Property):
"""A VectorProperty provides access to one or more values. You can use
a slice to get one or more property values:
> val = property[2]
or
> vals = property[0:5:2]
You can use a slice to set one or more property values:
> property[2] = val
or
> property[1:3] = (1,2)
"""
def ConvertValue(self, value):
return value
def __len__(self):
"""Returns the number of elements."""
return self.SMProperty.GetNumberOfElements()
def __iter__(self):
"""Implementation of the sequence API"""
return GenericIterator(self)
def __setitem__(self, idx, value):
"""Given a list or tuple of values, sets a slice of values [min, max)"""
if isinstance(idx, slice):
indices = idx.indices(len(self))
for i, j in zip(range(*indices), value):
self.SMProperty.SetElement(i, self.ConvertValue(j))
self._UpdateProperty()
elif idx >= len(self) or idx < 0:
raise IndexError
else:
self.SMProperty.SetElement(idx, self.ConvertValue(value))
self._UpdateProperty()
def GetElement(self, index):
return self.SMProperty.GetElement(index)
def __getitem__(self, idx):
"""Returns the range [min, max) of elements. Raises an IndexError
exception if an argument is out of bounds."""
ls = len(self)
if isinstance(idx, slice):
indices = idx.indices(ls)
retVal = []
for i in range(*indices):
retVal.append(self.GetElement(i))
return retVal
elif idx >= ls:
raise IndexError
elif idx < 0:
idx = ls + idx
if idx < 0:
raise IndexError
return self.GetElement(idx)
def GetData(self):
"Returns all elements as either a list or a single value."
property = self.SMProperty
if property.GetRepeatable() or \
property.GetNumberOfElements() > 1:
return self[0:len(self)]
elif property.GetNumberOfElements() == 1:
return self.GetElement(0)
def SetData(self, values):
"""Allows setting of all values at once. Requires a single value or
a iterable object."""
if not hasattr(values, "__iter__"):
values = (values,)
if not self.GetRepeatable() and len(values) != self.GetNumberOfElements():
raise RuntimeError("This property requires %d values." % self.GetNumberOfElements())
if self.GetRepeatable():
# Clean up first
self.SMProperty.SetNumberOfElements(0)
idx = 0
for val in values:
self.SMProperty.SetElement(idx, self.ConvertValue(val))
idx += 1
self._UpdateProperty()
def Clear(self):
"Removes all elements."
self.SMProperty().SetNumberOfElements(0)
self._UpdateProperty()
class ColorArrayProperty(VectorProperty):
"""This subclass of VectorProperty handles setting of the array to
color by. It handles attribute type as well as well array name."""
def GetAvailable(self):
"""Returns the list of available arrays as (attribute type, array name
tuples."""
arrays = []
for a in self.Proxy.Input.PointData:
arrays.append(('POINT_DATA', a.GetName()))
for a in self.Proxy.Input.CellData:
arrays.append(('CELL_DATA', a.GetName()))
return arrays
def SetData(self, value):
"""Overwritten to enable setting attribute type (the ColorAttributeType
property and the array name. The argument should be the array name
(in which case the first appropriate attribute type is picked) or
a tuple of attribute type and array name."""
if isinstance(value, tuple) and len(value) == 2:
att = value[0]
arr = value[1]
elif isinstance(value, str):
att = None
arr = value
else:
raise ValueError("Expected a tuple of 2 values or a string.")
if not arr:
self.SMProperty.SetElement(0, '')
self._UpdateProperty()
return
found = False
for a in self.Available:
if a[1] == arr and (not att or att == a[0]):
att = a[0]
found = True
break
if not found:
pvoptions = vtkProcessModule.GetProcessModule().GetOptions()
# if this process is from a parallel batch run in symmetric mpi mode
# then we may not have any points or cells on some processes in which
# case we'll probably be missing the point and cell data too. the
# check below makes sure that we avoid this situation.
if pvoptions.GetProcessType() != 0x40 or pvoptions.GetSymmetricMPIMode() == False \
or len(self.Available) != 0:
raise ValueError("Could not locate array %s in the input." % arr)
catt = self.Proxy.GetProperty("ColorAttributeType")
if att != None:
catt.SetData(att)
self.SMProperty.SetElement(0, arr)
self._UpdateProperty()
Available = property(GetAvailable, None, None, \
"This read-only property returns the list of arrays that can be colored by.")
class EnumerationProperty(VectorProperty):
"""Subclass of VectorProperty that is applicable for enumeration type
properties."""
def GetElement(self, index):
"""Returns the text for the given element if available. Returns
the numerical values otherwise."""
val = self.SMProperty.GetElement(index)
domain = self.SMProperty.GetDomain("enum")
for i in range(domain.GetNumberOfEntries()):
if domain.GetEntryValue(i) == val:
return domain.GetEntryText(i)
return val
def ConvertValue(self, value):
"""Converts value to type suitable for vtSMProperty::SetElement()"""
if type(value) == str:
domain = self.SMProperty.GetDomain("enum")
if domain.HasEntryText(value):
return domain.GetEntryValueForText(value)
else:
raise ValueError("%s is not a valid value." % value)
return VectorProperty.ConvertValue(self, value)
def GetAvailable(self):
"Returns the list of available values for the property."
retVal = []
domain = self.SMProperty.GetDomain("enum")
for i in range(domain.GetNumberOfEntries()):
retVal.append(domain.GetEntryText(i))
return retVal
Available = property(GetAvailable, None, None, \
"This read-only property contains the list of values that can be applied to this property.")
class FileNameProperty(VectorProperty):
"""Property to set/get one or more file names.
This property updates the pipeline information everytime its value changes.
This is used to keep the array lists up to date."""
def _UpdateProperty(self):
"Pushes the value of this property to the server."
VectorProperty._UpdateProperty(self)
self.Proxy.FileNameChanged()
class ArraySelectionProperty(VectorProperty):
"Property to select an array to be processed by a filter."
def GetAssociation(self):
val = self.GetElement(3)
if val == "":
return None
for key, value in ASSOCIATIONS.iteritems():
if value == int(val):
return key
return None
def GetArrayName(self):
return self.GetElement(4)
def __len__(self):
"""Returns the number of elements."""
return 2
def __setitem__(self, idx, value):
raise RuntimeError, "This property cannot be accessed using __setitem__"
def __getitem__(self, idx):
"""Returns attribute type for index 0, array name for index 1"""
if isinstance(idx, slice):
indices = idx.indices(len(self))
retVal = []
for i in range(*indices):
if i >= 2 or i < 0:
raise IndexError
if i == 0:
retVal.append(self.GetAssociation())
else:
retVal.append(self.GetArrayName())
return retVal
elif idx >= 2 or idx < 0:
raise IndexError
if i == 0:
return self.GetAssociation()
else:
return self.GetArrayName()
def SetData(self, values):
"""Allows setting of all values at once. Requires a single value,
a tuple or list."""
if not isinstance(values, tuple) and \
not isinstance(values, list):
values = (values,)
if len(values) == 1:
self.SMProperty.SetElement(4, values[0])
elif len(values) == 2:
if isinstance(values[0], str):
val = str(ASSOCIATIONS[values[0]])
else:
# In case user didn't specify valid association,
# just pick POINTS.
val = str(ASSOCIATIONS['POINTS'])
self.SMProperty.SetElement(3, str(val))
self.SMProperty.SetElement(4, values[1])
else:
raise RuntimeError, "Expected 1 or 2 values."
self._UpdateProperty()
def UpdateDefault(self):
"Helper method to set default values."
if self.SMProperty.GetNumberOfElements() != 5:
return
if self.GetElement(4) != '' or \
self.GetElement(3) != '':
return
for i in range(0,3):
if self.GetElement(i) == '':
self.SMProperty.SetElement(i, '0')
al = self.SMProperty.GetDomain("array_list")
al.Update(self.SMProperty)
al.SetDefaultValues(self.SMProperty)
class ArrayListProperty(VectorProperty):
"""This property provides a simpler interface for selecting arrays.
Simply assign a list of arrays that should be loaded by the reader.
Use the Available property to get a list of available arrays."""
def __init__(self, proxy, smproperty):
VectorProperty.__init__(self, proxy, smproperty)
self.__arrays = []
def GetAvailable(self):
"Returns the list of available arrays"
dm = self.GetDomain("array_list")
retVal = []
for i in range(dm.GetNumberOfStrings()):
retVal.append(dm.GetString(i))
return retVal
Available = property(GetAvailable, None, None, \
"This read-only property contains the list of items that can be read by a reader.")
def SelectAll(self):
"Selects all arrays."
self.SetData(self.Available)
def DeselectAll(self):
"Deselects all arrays."
self.SetData([])
def __iter__(self):
"""Implementation of the sequence API"""
return GenericIterator(self)
def __len__(self):
"""Returns the number of elements."""
return len(self.GetData())
def __setitem__(self, idx, value):
"""Given a list or tuple of values, sets a slice of values [min, max)"""
self.GetData()
if isinstance(idx, slice):
indices = idx.indices(len(self))
for i, j in zip(range(*indices), value):
self.__arrays[i] = j
self.SetData(self.__arrays)
elif idx >= len(self) or idx < 0:
raise IndexError
else:
self.__arrays[idx] = self.ConvertValue(value)
self.SetData(self.__arrays)
def __getitem__(self, idx):
"""Returns the range [min, max) of elements. Raises an IndexError
exception if an argument is out of bounds."""
self.GetData()
if isinstance(idx, slice):
indices = idx.indices(len(self))
retVal = []
for i in range(*indices):
retVal.append(self.__arrays[i])
return retVal
elif idx >= len(self) or idx < 0:
raise IndexError
return self.__arrays[idx]
def SetData(self, values):
"""Allows setting of all values at once. Requires a single value,
a tuple or list."""
# Clean up first
iup = self.SMProperty.GetImmediateUpdate()
self.SMProperty.SetImmediateUpdate(False)
# Clean up first
self.SMProperty.SetNumberOfElements(0)
if not isinstance(values, tuple) and \
not isinstance(values, list):
values = (values,)
fullvalues = []
# WARNING:
# The order of the two loops below are delibrately set in this way
# so that values passed in will take precedence.
# This is needed for backward compatibility of the
# property ElementBlocks for vtkExodusIIReader.
# If you attemp to change this, please verify that
# python state files for opening old .ex2 file (<=3.14) still works.
for array in self.Available:
if not values.__contains__(array):
fullvalues.append(array)
fullvalues.append('0')
for i in range(len(values)):
val = self.ConvertValue(values[i])
fullvalues.append(val)
fullvalues.append('1')
i = 0
for value in fullvalues:
self.SMProperty.SetElement(i, value)
i += 1
self._UpdateProperty()
self.SMProperty.SetImmediateUpdate(iup)
def GetData(self):
"Returns all elements as a list."
property = self.SMProperty
nElems = property.GetNumberOfElements()
if nElems%2 != 0:
raise ValueError, "The SMProperty with XML label '%s' has a size that is not a multiple of 2." % property.GetXMLLabel()
self.__arrays = []
for i in range(0, nElems, 2):
if self.GetElement(i+1) != '0':
self.__arrays.append(self.GetElement(i))
return list(self.__arrays)
class ProxyProperty(Property):
"""A ProxyProperty provides access to one or more proxies. You can use
a slice to get one or more property values:
> proxy = property[2]
or
> proxies = property[0:5:2]
You can use a slice to set one or more property values:
> property[2] = proxy
or
> property[1:3] = (proxy1, proxy2)
You can also append and delete:
> property.append(proxy)
and
> del property[1:2]
You can also remove all elements with Clear().
Note that some properties expect only 1 proxy and will complain if
you set the number of values to be something else.
"""
def __init__(self, proxy, smproperty):
"""Default constructor. Stores a reference to the proxy. Also looks
at domains to find valid values."""
Property.__init__(self, proxy, smproperty)
# Check to see if there is a proxy list domain and, if so,
# initialize ourself. (Should this go in ProxyProperty?)
listdomain = self.GetDomain('proxy_list')
if listdomain:
if listdomain.GetClassName() != 'vtkSMProxyListDomain':
raise ValueError, "Found a 'proxy_list' domain on an InputProperty that is not a ProxyListDomain."
pm = ProxyManager()
group = "pq_helper_proxies." + proxy.GetGlobalIDAsString()
if listdomain.GetNumberOfProxies() == 0:
for i in xrange(listdomain.GetNumberOfProxyTypes()):
igroup = listdomain.GetProxyGroup(i)
name = listdomain.GetProxyName(i)
iproxy = CreateProxy(igroup, name)
listdomain.AddProxy(iproxy)
pm.RegisterProxy(group, proxy.GetPropertyName(smproperty), iproxy)
listdomain.SetDefaultValues(self.SMProperty)
def GetAvailable(self):
"""If this proxy has a list domain, then this function returns the
strings you can use to select from the domain. If there is no such
list domain, the returned list is empty."""
listdomain = self.GetDomain('proxy_list')
retval = []
if listdomain:
for i in xrange(listdomain.GetNumberOfProxies()):
proxy = listdomain.GetProxy(i)
retval.append(proxy.GetXMLLabel())
return retval
Available = property(GetAvailable, None, None,
"""This read only property is a list of strings you can
use to select from the list domain. If there is no
such list domain, the array is empty.""")
def __iter__(self):
"""Implementation of the sequence API"""
return GenericIterator(self)
def __len__(self):
"""Returns the number of elements."""
return self.SMProperty.GetNumberOfProxies()
def remove(self, proxy):
"""Removes the first occurence of the proxy from the property."""
self.SMProperty.RemoveProxy(proxy.SMProxy)
self._UpdateProperty()
def __setitem__(self, idx, value):
"""Given a list or tuple of values, sets a slice of values [min, max)"""
if isinstance(idx, slice):
indices = idx.indices(len(self))
for i, j in zip(range(*indices), value):
self.SMProperty.SetProxy(i, j.SMProxy)
self._UpdateProperty()
elif idx >= len(self) or idx < 0:
raise IndexError
else:
self.SMProperty.SetProxy(idx, value.SMProxy)
self._UpdateProperty()
def __delitem__(self,idx):
"""Removes the element idx"""
if isinstance(idx, slice):
indices = idx.indices(len(self))
# Collect the elements to delete to a new list first.
# Otherwise indices are screwed up during the actual
# remove loop.
toremove = []
for i in range(*indices):
toremove.append(self[i])
for i in toremove:
self.SMProperty.RemoveProxy(i.SMProxy)
self._UpdateProperty()
elif idx >= len(self) or idx < 0:
raise IndexError
else:
self.SMProperty.RemoveProxy(self[idx].SMProxy)
self._UpdateProperty()
def __getitem__(self, idx):
"""Returns the range [min, max) of elements. Raises an IndexError
exception if an argument is out of bounds."""
if isinstance(idx, slice):
indices = idx.indices(len(self))
retVal = []
for i in range(*indices):
retVal.append(_getPyProxy(self.SMProperty.GetProxy(i)))
return retVal
elif idx >= len(self) or idx < 0:
raise IndexError
return _getPyProxy(self.SMProperty.GetProxy(idx))
def __getattr__(self, name):
"Unknown attribute requests get forwarded to SMProperty."
return getattr(self.SMProperty, name)
def index(self, proxy):
idx = 0
for px in self:
## VSV: ==
if proxy.IsSame(px):
return idx
idx += 1
raise ValueError("proxy is not in the list.")
def append(self, proxy):
"Appends the given proxy to the property values."
self.SMProperty.AddProxy(proxy.SMProxy)
self._UpdateProperty()
def GetData(self):
"Returns all elements as either a list or a single value."
property = self.SMProperty
if property.GetRepeatable() or property.GetNumberOfProxies() > 1:
return self[0:len(self)]
else:
if property.GetNumberOfProxies() > 0:
return _getPyProxy(property.GetProxy(0))
return None
def SetData(self, values):
"""Allows setting of all values at once. Requires a single value,
a tuple or list."""
if isinstance(values, str):
position = -1
try:
position = self.Available.index(values)
except:
raise ValueError, values + " is not a valid object in the domain."
values = self.GetDomain('proxy_list').GetProxy(position)
if not isinstance(values, tuple) and \
not isinstance(values, list):
values = (values,)
self.SMProperty.RemoveAllProxies()
for value in values:
if isinstance(value, Proxy):
value_proxy = value.SMProxy
else:
value_proxy = value
self.SMProperty.AddProxy(value_proxy)
self._UpdateProperty()
def Clear(self):
"Removes all elements."
self.SMProperty.RemoveAllProxies()
self._UpdateProperty()
class InputProperty(ProxyProperty):
"""An InputProperty allows making pipeline connections. You can set either
a source proxy or an OutputProperty to an input property:
> property[0] = proxy
or
> property[0] = OuputPort(proxy, 1)
> property.append(proxy)
or
> property.append(OutputPort(proxy, 0))
"""
def __setitem__(self, idx, value):
"""Given a list or tuple of values, sets a slice of values [min, max)"""
if isinstance(idx, slice):
indices = idx.indices(len(self))
for i, j in zip(range(*indices), value):
op = value[i-min]
self.SMProperty.SetInputConnection(i, op.SMProxy, op.Port)
self._UpdateProperty()
elif idx >= len(self) or idx < 0:
raise IndexError
else:
self.SMProperty.SetInputConnection(idx, value.SMProxy, value.Port)
self._UpdateProperty()
def __getitem__(self, idx):
"""Returns the range [min, max) of elements. Raises an IndexError
exception if an argument is out of bounds."""
if isinstance(idx, slice):
indices = idx.indices(len(self))
retVal = []
for i in range(*indices):
port = None
if self.SMProperty.GetProxy(i):
port = OutputPort(_getPyProxy(self.SMProperty.GetProxy(i)),\
self.SMProperty.GetOutputPortForConnection(i))
retVal.append(port)
return retVal
elif idx >= len(self) or idx < 0:
raise IndexError
return OutputPort(_getPyProxy(self.SMProperty.GetProxy(idx)),\
self.SMProperty.GetOutputPortForConnection(idx))
def append(self, value):
"""Appends the given proxy to the property values.
Accepts Proxy or OutputPort objects."""
self.SMProperty.AddInputConnection(value.SMProxy, value.Port)
self._UpdateProperty()
def GetData(self):
"""Returns all elements as either a list of OutputPort objects or
a single OutputPort object."""
property = self.SMProperty
if property.GetRepeatable() or property.GetNumberOfProxies() > 1:
return self[0:len(self)]
else:
if property.GetNumberOfProxies() > 0:
return OutputPort(_getPyProxy(property.GetProxy(0)),\
self.SMProperty.GetOutputPortForConnection(0))
return None
def SetData(self, values):
"""Allows setting of all values at once. Requires a single value,
a tuple or list. Accepts Proxy or OutputPort objects."""
if isinstance(values, str):
ProxyProperty.SetData(self, values)
return
if not isinstance(values, tuple) and \
not isinstance(values, list):
values = (values,)
self.SMProperty.RemoveAllProxies()
for value in values:
if value:
self.SMProperty.AddInputConnection(value.SMProxy, value.Port)
self._UpdateProperty()
def _UpdateProperty(self):
"Pushes the value of this property to the server."
ProxyProperty._UpdateProperty(self)
iter = PropertyIterator(self.Proxy)
for prop in iter:
if isinstance(prop, ArraySelectionProperty):
prop.UpdateDefault()
class DataInformation(object):
"""DataInformation is a contained for meta-data associated with an
output data.
DataInformation is a python wrapper around a vtkPVDataInformation.
In addition to proving all methods of a vtkPVDataInformation, it provides
a few convenience methods.
Please note that some of the methods accessible through the DataInformation
class are not listed by help() because the DataInformation objects forward
unresolved attributes to the underlying object. To get the full list,
see also dir(proxy.DataInformation).
See also the doxygen based documentation of the vtkPVDataInformation C++
class.
"""
def __init__(self, dataInformation, proxy, idx):
"""Default constructor. Requires a vtkPVDataInformation, a source proxy
and an output port id."""
self.DataInformation = dataInformation
self.Proxy = proxy
self.Idx = idx
def Update(self):
"""****Deprecated**** There is no reason anymore to use this method
explicitly, it is called automatically when one gets any value from the
data information object.
Update the data information if necessary. Note that this
does not cause execution of the underlying object. In certain
cases, you may have to call UpdatePipeline() on the proxy."""
if self.Proxy:
self.Proxy.GetDataInformation(self.Idx)
def GetDataSetType(self):
"""Returns the dataset type as defined in vtkDataObjectTypes."""
self.Update()
if not self.DataInformation:
raise RuntimeError, "No data information is available"
if self.DataInformation.GetCompositeDataSetType() > -1:
return self.DataInformation.GetCompositeDataSetType()
return self.DataInformation.GetDataSetType()
def GetDataSetTypeAsString(self):
"""Returns the dataset type as a user-friendly string. This is
not the same as the enumaration used by VTK"""
return vtk.vtkDataObjectTypes.GetClassNameFromTypeId(self.GetDataSetType())
def __getattr__(self, name):
"""Forwards unknown attribute requests to the underlying
vtkPVInformation."""
if not self.DataInformation:
raise AttributeError("class has no attribute %s" % name)
return None
self.Update()
return getattr(self.DataInformation, name)
class ArrayInformation(object):
"""Meta-information associated with an array. Use the Name
attribute to get the array name.
Please note that some of the methods accessible through the ArrayInformation
class are not listed by help() because the ArrayInformation objects forward
unresolved attributes to the underlying object.
See the doxygen based documentation of the vtkPVArrayInformation C++
class for a full list.
"""
def __init__(self, proxy, field, name):
self.Proxy = proxy
self.FieldData = field
self.Name = name
def __getattr__(self, name):
"""Forward unknown methods to vtkPVArrayInformation"""
array = self.FieldData.GetFieldData().GetArrayInformation(self.Name)
if not array: return None
return getattr(array, name)
def __repr__(self):
"""Returns a user-friendly representation string."""
return "Array: " + self.Name
def GetRange(self, component=0):
"""Given a component, returns its value range as a tuple of 2 values."""
array = self.FieldData.GetFieldData().GetArrayInformation(self.Name)
range = array.GetComponentRange(component)
return (range[0], range[1])
class FieldDataInformationIterator(object):
"""Iterator for FieldDataInformation"""
def __init__(self, info, items=False):
self.FieldDataInformation = info
self.index = 0
self.items = items
def __iter__(self):
return self
def next(self):
if self.index >= self.FieldDataInformation.GetNumberOfArrays():
raise StopIteration
self.index += 1
ai = self.FieldDataInformation[self.index-1]
if self.items:
return (ai.GetName(), ai)
else:
return ai
class FieldDataInformation(object):
"""Meta-data for a field of an output object (point data, cell data etc...).
Provides easy access to the arrays using the slice interface:
> narrays = len(field_info)
> for i in range(narrays):
> array_info = field_info[i]
Full slice interface is supported:
> arrays = field_info[0:5:3]
where arrays is a list.
Array access by name is also possible:
> array_info = field_info['Temperature']
The number of arrays can also be accessed using the NumberOfArrays
property.
"""
def __init__(self, proxy, idx, field):
self.Proxy = proxy
self.OutputPort = idx
self.FieldData = field
def GetFieldData(self):
"""Convenience method to get the underlying
vtkPVDataSetAttributesInformation"""
return getattr(self.Proxy.GetDataInformation(self.OutputPort), "Get%sInformation" % self.FieldData)()
def GetNumberOfArrays(self):
"""Returns the number of arrays."""
self.Proxy.UpdatePipeline()
return self.GetFieldData().GetNumberOfArrays()
def GetArray(self, idx):
"""Given an index or a string, returns an array information.
Raises IndexError if the index is out of bounds."""
self.Proxy.UpdatePipeline()
if not self.GetFieldData().GetArrayInformation(idx):
return None
if isinstance(idx, str):
return ArrayInformation(self.Proxy, self, idx)
elif idx >= len(self) or idx < 0:
raise IndexError
return ArrayInformation(self.Proxy, self, self.GetFieldData().GetArrayInformation(idx).GetName())
def __len__(self):
"""Returns the number of arrays."""
return self.GetNumberOfArrays()
def __getitem__(self, idx):
"""Implements the [] operator. Accepts an array name."""
if isinstance(idx, slice):
indices = idx.indices(self.GetNumberOfArrays())
retVal = []
for i in range(*indices):
retVal.append(self.GetArray(i))
return retVal
return self.GetArray(idx)
def keys(self):
"""Implementation of the dictionary API"""
kys = []
narrays = self.GetNumberOfArrays()
for i in range(narrays):
kys.append(self.GetArray(i).GetName())
return kys
def values(self):
"""Implementation of the dictionary API"""
vals = []
narrays = self.GetNumberOfArrays()
for i in range(narrays):
vals.append(self.GetArray(i))
return vals
def iteritems(self):
"""Implementation of the dictionary API"""
return FieldDataInformationIterator(self, True)
def items(self):
"""Implementation of the dictionary API"""
itms = []
narrays = self.GetNumberOfArrays()
for i in range(narrays):
ai = self.GetArray(i)
itms.append((ai.GetName(), ai))
return itms
def has_key(self, key):
"""Implementation of the dictionary API"""
if self.GetArray(key):
return True
return False
def __iter__(self):
"""Implementation of the dictionary API"""
return FieldDataInformationIterator(self)
def __getattr__(self, name):
"""Forwards unknown attributes to the underlying
vtkPVDataSetAttributesInformation"""
array = self.GetArray(name)
if array: return array
raise AttributeError("class has no attribute %s" % name)
return None
NumberOfArrays = property(GetNumberOfArrays, None, None, "Returns the number of arrays.")
def OutputPort(proxy, outputPort=0):
if not Proxy:
return None
if isinstance(outputPort, str):
outputPort = proxy.GetOutputPortIndex(outputPort)
if outputPort >= proxy.GetNumberOfOutputPorts():
return None
if proxy.Port == outputPort:
return proxy
newinstance = _getPyProxy(proxy.SMProxy, outputPort)
newinstance.Port = outputPort
newinstance._Proxy__Properties = proxy._Proxy__Properties
return newinstance
class ProxyManager(object):
"""When running scripts from the python shell in the ParaView application,
registering proxies with the proxy manager is the only mechanism to
notify the graphical user interface (GUI) that a proxy
exists. Therefore, unless a proxy is registered, it will not show up in
the user interface. Also, the proxy manager is the only way to get
access to proxies created using the GUI. Proxies created using the GUI
are automatically registered under an appropriate group (sources,
filters, representations and views). To get access to these objects,
you can use proxyManager.GetProxy(group, name). The name is the same
as the name shown in the pipeline browser.
This class is a python wrapper for vtkSMProxyManager. Note that the
underlying vtkSMProxyManager is a singleton. All instances of this
class will refer to the same object. In addition to all methods provided by
vtkSMProxyManager (all unknown attribute requests are forwarded
to the vtkSMProxyManager), this class provides several convenience
methods.
Please note that some of the methods accessible through the ProxyManager
class are not listed by help() because the ProxyManager objects forwards
unresolved attributes to the underlying object. To get the full list,
see also dir(proxy.SMProxyManager). See also the doxygen based documentation
of the vtkSMProxyManager C++ class.
"""
def __init__(self, session=None):
"""Constructor. Assigned self.SMProxyManager to
vtkSMProxyManager.GetProxyManager()."""
global ActiveConnection
if not session:
session = ActiveConnection.Session
self.SMProxyManager = session.GetSessionProxyManager()
def RegisterProxy(self, group, name, aProxy):
"""Registers a proxy (either SMProxy or proxy) with the
server manager"""
if isinstance(aProxy, Proxy):
self.SMProxyManager.RegisterProxy(group, name, aProxy.SMProxy)
else:
self.SMProxyManager.RegisterProxy(group, name, aProxy)
def NewProxy(self, group, name):
"""Creates a new proxy of given group and name and returns an SMProxy.
Note that this is a server manager object. You should normally create
proxies using the class objects. For example:
obj = servermanager.sources.SphereSource()"""
if not self.SMProxyManager:
return None
aProxy = self.SMProxyManager.NewProxy(group, name, "NULL")
if not aProxy:
return None
aProxy.UnRegister(None)
return aProxy
def GetProxy(self, group, name):
"""Returns a Proxy registered under a group and name"""
if not self.SMProxyManager:
return None
aProxy = self.SMProxyManager.GetProxy(group, name)
if not aProxy:
return None
return _getPyProxy(aProxy)
def GetPrototypeProxy(self, group, name):
"""Returns a prototype proxy given a group and name. This is an
SMProxy. This is a low-level method. You should not normally
have to call it."""
if not self.SMProxyManager:
return None
aProxy = self.SMProxyManager.GetPrototypeProxy(group, name)
if not aProxy:
return None
return aProxy
def GetProxiesInGroup(self, groupname):
"""Returns a map of proxies in a particular group."""
proxies = {}
iter = self.NewGroupIterator(groupname)
for aProxy in iter:
proxies[(iter.GetKey(), aProxy.GetGlobalIDAsString())] = aProxy
return proxies
def UnRegisterProxy(self, groupname, proxyname, aProxy):
"""Unregisters a proxy."""
if not self.SMProxyManager:
return
if aProxy != None and isinstance(aProxy,Proxy):
aProxy = aProxy.SMProxy
if aProxy:
self.SMProxyManager.UnRegisterProxy(groupname, proxyname, aProxy)
def GetProxies(self, groupname, proxyname):
"""Returns all proxies registered under the given group with the
given name. Note that it is possible to register more than one
proxy with the same name in the same group. Because the proxies
are different, there is no conflict. Use this method instead of
GetProxy() if you know that there are more than one proxy registered
with this name."""
if not self.SMProxyManager:
return []
collection = vtk.vtkCollection()
result = []
self.SMProxyManager.GetProxies(groupname, proxyname, collection)
for i in range(0, collection.GetNumberOfItems()):
aProxy = _getPyProxy(collection.GetItemAsObject(i))
if aProxy:
result.append(aProxy)
return result
def __iter__(self):
"""Returns a new ProxyIterator."""
iter = ProxyIterator()
iter.Begin()
return iter
def NewGroupIterator(self, group_name):
"""Returns a ProxyIterator for a group. The resulting object
can be used to traverse the proxies that are in the given
group."""
iter = self.__iter__()
iter.SetModeToOneGroup()
iter.Begin(group_name)
return iter
def NewDefinitionIterator(self, groupname=None):
"""Returns an iterator that can be used to iterate over
all groups and types of proxies that the proxy manager
can create."""
iter = None
if groupname != None:
iter = ProxyDefinitionIterator(self.GetProxyDefinitionManager().NewSingleGroupIterator(groupname,0))
else:
iter = ProxyDefinitionIterator(self.GetProxyDefinitionManager().NewIterator(0))
return iter
def __ConvertArgumentsAndCall(self, *args):
newArgs = []
for arg in args:
if issubclass(type(arg), Proxy) or isinstance(arg, Proxy):
newArgs.append(arg.SMProxy)
else:
newArgs.append(arg)
func = getattr(self.SMProxyManager, self.__LastAttrName)
retVal = func(*newArgs)
if type(retVal) is type(self.SMProxyManager) and retVal.IsA("vtkSMProxy"):
return _getPyProxy(retVal)
else:
return retVal
def __getattr__(self, name):
"""Returns attribute from the ProxyManager"""
try:
pmAttr = getattr(self.SMProxyManager, name)
self.__LastAttrName = name
return self.__ConvertArgumentsAndCall
except:
pass
return getattr(self.SMProxyManager, name)
def LoadState(self, filename, loader = None):
self.SMProxyManager.LoadXMLState(filename, loader)
def SaveState(self, filename):
self.SMProxyManager.SaveXMLState(filename)
class PropertyIterator(object):
"""Wrapper for a vtkSMPropertyIterator class to satisfy
the python iterator protocol. Note that the list of
properties can also be obtained from the class object's
dictionary.
See the doxygen documentation for vtkSMPropertyIterator C++
class for details.
"""
def __init__(self, aProxy):
self.SMIterator = aProxy.NewPropertyIterator()
if self.SMIterator:
self.SMIterator.UnRegister(None)
self.SMIterator.Begin()
self.Key = None
self.PropertyLabel = None
self.Proxy = aProxy
def __iter__(self):
return self
def next(self):
if not self.SMIterator:
raise StopIteration
if self.SMIterator.IsAtEnd():
self.Key = None
raise StopIteration
self.Key = self.SMIterator.GetKey()
self.PropertyLabel = self.SMIterator.GetPropertyLabel()
self.SMIterator.Next()
return self.Proxy.GetProperty(self.Key)
def GetProxy(self):
"""Returns the proxy for the property last returned by the call to
'next()'"""
return self.Proxy
def GetKey(self):
"""Returns the key for the property last returned by the call to
'next()' """
return self.Key
def GetProperty(self):
"""Returns the property last returned by the call to 'next()' """
return self.Proxy.GetProperty(self.Key)
def __getattr__(self, name):
"""returns attributes from the vtkSMPropertyIterator."""
return getattr(self.SMIterator, name)
class ProxyDefinitionIterator(object):
"""Wrapper for a vtkPVProxyDefinitionIterator class to satisfy
the python iterator protocol.
See the doxygen documentation of the vtkPVProxyDefinitionIterator
C++ class for more information."""
def __init__(self, iter):
self.SMIterator = iter
if self.SMIterator:
self.SMIterator.UnRegister(None)
self.SMIterator.InitTraversal()
self.Group = None
self.Key = None
def __iter__(self):
return self
def next(self):
if self.SMIterator.IsDoneWithTraversal():
self.Group = None
self.Key = None
raise StopIteration
self.Group = self.SMIterator.GetGroupName()
self.Key = self.SMIterator.GetProxyName()
self.SMIterator.GoToNextItem()
return {"group": self.Group, "key":self.Key }
def GetProxyName(self):
"""Returns the key for the proxy definition last returned by the call
to 'next()' """
return self.Key
def GetGroup(self):
"""Returns the group for the proxy definition last returned by the
call to 'next()' """
return self.Group
def __getattr__(self, name):
"""returns attributes from the vtkPVProxyDefinitionIterator."""
return getattr(self.SMIterator, name)
class ProxyIterator(object):
"""Wrapper for a vtkSMProxyIterator class to satisfy the
python iterator protocol.
See the doxygen documentation of vtkSMProxyIterator C++ class for
more information.
"""
def __init__(self):
self.SMIterator = vtkSMProxyIterator()
self.SMIterator.SetSession(ActiveConnection.Session)
self.SMIterator.Begin()
self.AProxy = None
self.Group = None
self.Key = None
def __iter__(self):
return self
def next(self):
if self.SMIterator.IsAtEnd():
self.AProxy = None
self.Group = None
self.Key = None
raise StopIteration
return None
self.AProxy = _getPyProxy(self.SMIterator.GetProxy())
self.Group = self.SMIterator.GetGroup()
self.Key = self.SMIterator.GetKey()
self.SMIterator.Next()
return self.AProxy
def GetProxy(self):
"""Returns the proxy last returned by the call to 'next()'"""
return self.AProxy
def GetKey(self):
"""Returns the key for the proxy last returned by the call to
'next()' """
return self.Key
def GetGroup(self):
"""Returns the group for the proxy last returned by the call to
'next()' """
return self.Group
def __getattr__(self, name):
"""returns attributes from the vtkSMProxyIterator."""
return getattr(self.SMIterator, name)
# Caution: Observers must be global methods otherwise we run into memory
# leak when the interpreter get reset from the C++ layer.
def _update_definitions(caller, event):
updateModules(ActiveConnection.Modules)
class Connection(object):
"""
This is a python representation for a session/connection.
"""
def __init__(self, connectionId, session):
"""Default constructor. Creates a Connection with the given
ID, all other data members initialized to None."""
global MultiServerConnections
global ActiveConnection
self.ID = connectionId
self.Session = session
self.Modules = PVModule()
self.Alive = True
self.DefinitionObserverTag = 0
self.CustomDefinitionObserverTag = 0
if MultiServerConnections == None and ActiveConnection:
raise RuntimeError, "Concurrent connections not supported!"
if MultiServerConnections != None and not self in MultiServerConnections:
MultiServerConnections.append(self)
ActiveConnection = self
__InitAfterConnect__(self)
__exposeActiveModules__()
def __eq__(self, other):
"Returns true if the connection ids are the same."
return (self.ID == other.ID)
def __repr__(self):
"""User friendly string representation"""
return "Connection (%s) [%d]" % (self.Session.GetURI(), self.ID)
def GetURI(self):
"""Get URI of the connection"""
return self.Session.GetURI()
def IsRemote(self):
"""Returns True if the connection to a remote server, False if
it is local (built-in)"""
if self.Session.IsA("vtkSMSessionClient"):
return True
return False
def GetNumberOfDataPartitions(self):
"""Returns the number of partitions on the data server for this
connection"""
return self.Session.GetServerInformation().GetNumberOfProcesses()
def AttachDefinitionUpdater(self):
"""Attach observer to automatically update modules when needed."""
# VTN: Observers are not supported
# ProxyDefinitionsUpdated = 2000
## self.DefinitionObserverTag = self.Session.GetProxyDefinitionManager().AddObserver(2000, _update_definitions)
# CompoundProxyDefinitionsUpdated = 2001
## self.CustomDefinitionObserverTag = self.Session.GetProxyDefinitionManager().AddObserver(2001, _update_definitions)
pass
def close(self):
if self.DefinitionObserverTag:
self.Session.GetProxyDefinitionManager().RemoveObserver(self.DefinitionObserverTag)
self.Session.GetProxyDefinitionManager().RemoveObserver(self.CustomDefinitionObserverTag)
self.Session = None
self.Modules = None
self.Alive = False
def __del__(self):
if self.Alive:
self.close()
def SaveState(filename):
"""Given a state filename, saves the state of objects registered
with the proxy manager."""
pm = ProxyManager()
pm.SaveState(filename)
def LoadState(filename, connection=None):
"""Given a state filename and an optional connection, loads the server
manager state."""
if not connection:
connection = ActiveConnection
if not connection:
raise RuntimeError, "Cannot load state without a connection"
pm = ProxyManager()
pm.LoadState(filename, None)
views = GetRenderViews()
for view in views:
# Make sure that the client window size matches the
# ViewSize property. In paraview, the GUI takes care
# of this.
if view.GetClassName() == "vtkSMIceTDesktopRenderViewProxy":
view.GetRenderWindow().SetSize(view.ViewSize[0], \
view.ViewSize[1])
def InitFromGUI():
"""
Method used to initialize the Python Shell from the ParaView GUI.
"""
global fromGUI, ActiveConnection
# if not fromGUI:
# print "from paraview.simple import *"
fromGUI = True
# ToggleProgressPrinting() ### FIXME COLLABORATION
enableMultiServer(vtkProcessModule.GetProcessModule().GetMultipleSessionsSupport())
iter = vtkProcessModule.GetProcessModule().NewSessionIterator();
iter.InitTraversal()
ActiveConnection = None
activeSession = vtkSMProxyManager.GetProxyManager().GetActiveSession()
tmpActiveConnection = None
while not iter.IsDoneWithTraversal():
c = Connection(iter.GetCurrentSessionId(), iter.GetCurrentSession())
if c.Session == activeSession:
tmpActiveConnection = c
iter.GoToNextItem()
iter.UnRegister(None)
if tmpActiveConnection:
ActiveConnection = tmpActiveConnection
def Connect(ds_host=None, ds_port=11111, rs_host=None, rs_port=22221):
"""
Use this function call to create a new session. On success,
it returns a vtkSMSession object that abstracts the connection.
Otherwise, it returns None.
There are several ways in which this function can be called:
* When called with no arguments, it creates a new session
to the built-in server on the client itself.
* When called with ds_host and ds_port arguments, it
attempts to connect to a server(data and render server on the same server)
on the indicated host:port.
* When called with ds_host, ds_port, rs_host, rs_port, it
creates a new connection to the data server on ds_host:ds_port and to the
render server on rs_host: rs_port.
"""
if ds_host == None:
session = vtkSMSession()
elif rs_host == None:
session = vtkSMSessionClient()
session.Connect("cs://%s:%d" % (ds_host, ds_port))
else:
session = vtkSMSessionClient()
session.Connect("cdsrs://%s:%d/%s:%d" % (ds_host, ds_port, rs_host, rs_port))
id = vtkProcessModule.GetProcessModule().RegisterSession(session)
connection = Connection(id, session)
return connection
def ReverseConnect(port=11111):
"""
Use this function call to create a new session. On success,
it returns a Session object that abstracts the connection.
Otherwise, it returns None.
In reverse connection mode, the client waits for a connection
from the server (client has to be started first). The server
then connects to the client (run pvserver with -rc and -ch
option).
The optional port specified the port to listen to.
"""
session = vtkSMSessionClient()
session.Connect("csrc://hostname:" + port)
id = vtkProcessModule.GetProcessModule().RegisterSession(session)
connection = Connection(id, session)
return connection
def Disconnect(session=None):
"""Disconnects the connection. Make sure to clear the proxy manager
first."""
global ActiveConnection
global MultiServerConnections
global fromGUI
if fromGUI:
# Let the UI know that we want to disconnect
ActiveConnection.Session.InvokeEvent('ExitEvent')
return
if ActiveConnection and (not session or session == ActiveConnection.Session):
session = ActiveConnection.Session
if MultiServerConnections:
MultiServerConnections.remove(ActiveConnection)
ActiveConnection.close()
ActiveConnection = None
switchActiveConnection()
else:
ActiveConnection.close()
ActiveConnection = None
elif MultiServerConnections:
for connection in MultiServerConnections:
if connection.Session == session:
connection.close()
MultiServerConnections.remove(connection)
if session:
vtkProcessModule.GetProcessModule().UnRegisterSession(session)
return
def CreateProxy(xml_group, xml_name, session=None):
"""Creates a proxy. If session is set, the proxy's session is
set accordingly. If session is None, the current Session is used, if
present. You should not have to use method normally. Instantiate the
appropriate class from the appropriate module, for example:
sph = servermanager.sources.SphereSource()"""
global ActiveConnection
if not session:
session = ActiveConnection.Session
if not session:
raise RuntimeError, "Cannot create objects without a session."
pxm = ProxyManager(session)
return pxm.NewProxy(xml_group, xml_name)
def GetRenderView(connection=None):
"""Return the render view in use. If more than one render view is in
use, return the first one."""
render_module = None
for aProxy in ProxyManager():
if aProxy.IsA("vtkSMRenderViewProxy"):
render_module = aProxy
break
return render_module
def GetRenderViews(connection=None):
"""Returns the set of all render views."""
render_modules = []
for aProxy in ProxyManager():
if aProxy.IsA("vtkSMRenderViewProxy"):
render_modules.append(aProxy)
return render_modules
def GetContextViews(connection=None):
"""Returns the set of all context views."""
context_modules = []
for aProxy in ProxyManager():
if aProxy.IsA("vtkSMContextViewProxy"):
context_modules.append(aProxy)
return context_modules
def CreateRenderView(session=None, **extraArgs):
"""Creates a render window on the particular session. If session
is not specified, then the active session is used, if available.
This method can also be used to initialize properties by passing
keyword arguments where the key is the name of the property. In addition
registrationGroup and registrationName (optional) can be specified (as
keyword arguments) to automatically register the proxy with the proxy
manager."""
return _create_view("RenderView", session, **extraArgs)
def _create_view(view_xml_name, session=None, **extraArgs):
"""Creates a view on the particular session. If session
is not specified, then the active session is used, if available.
This method can also be used to initialize properties by passing
keyword arguments where the key is the name of the property."""
if not session:
session = ActiveConnection.Session
if not session:
raise RuntimeError, "Cannot create view without session."
pxm = ProxyManager()
view_module = None
if view_xml_name:
view_module = CreateProxy("views", view_xml_name, session)
if not view_module:
return None
extraArgs['proxy'] = view_module
python_proxy_name = _make_name_valid(view_module.GetXMLName())
proxy = rendering.__dict__[python_proxy_name](**extraArgs)
return proxy
def GetRepresentation(aProxy, view):
for rep in view.Representations:
#VSV: ==
try: isRep = rep.Input.IsSame(aProxy)
except: isRep = False
if isRep: return rep
return None
def CreateRepresentation(aProxy, view, **extraArgs):
"""Creates a representation for the proxy and adds it to the render
module.
This method can also be used to initialize properties by passing
keyword arguments where the key is the name of the property.In addition
registrationGroup and registrationName (optional) can be specified (as
keyword arguments) to automatically register the proxy with the proxy
manager.
This method tries to create the best possible representation for the given
proxy in the given view. Additionally, the user can specify proxyName
(optional) to create a representation of a particular type."""
global rendering
if not aProxy:
raise RuntimeError, "proxy argument cannot be None."
if not view:
raise RuntimeError, "view argument cannot be None."
if "proxyName" in extraArgs:
display = CreateProxy("representations", extraArgs['proxyName'], None)
del extraArgs['proxyName']
else:
display = view.SMProxy.CreateDefaultRepresentation(aProxy.SMProxy, 0)
if display:
display.UnRegister(None)
if not display:
return None
extraArgs['proxy'] = display
proxy = rendering.__dict__[display.GetXMLName()](**extraArgs)
proxy.Input = aProxy
proxy.UpdateVTKObjects()
view.Representations.append(proxy)
return proxy
class _ModuleLoader(object):
def find_module(self, fullname, path=None):
if vtkPVPythonModule.HasModule(fullname):
return self
else:
return None
def load_module(self, fullname):
import imp
moduleInfo = vtkPVPythonModule.GetModule(fullname)
if not moduleInfo:
raise ImportError
module = sys.modules.setdefault(fullname, imp.new_module(fullname))
module.__file__ = "<%s>" % moduleInfo.GetFullName()
module.__loader__ = self
if moduleInfo.GetIsPackage:
module.__path__ = moduleInfo.GetFullName()
code = compile(moduleInfo.GetSource(), module.__file__, 'exec')
exec code in module.__dict__
return module
def LoadXML(xmlstring):
"""DEPRECATED. Given a server manager XML as a string, parse and process it."""
raise RuntimeError, "Deprecated. Use LoadPlugin(...) instead."
def LoadPlugin(filename, remote=True, connection=None):
""" Given a filename and a session (optional, otherwise uses
ActiveConnection), loads a plugin. It then updates the sources,
filters and rendering modules."""
if not connection:
connection = ActiveConnection
if not connection:
raise RuntimeError, "Cannot load a plugin without a connection."
plm = vtkSMProxyManager.GetProxyManager().GetPluginManager()
if remote:
status = plm.LoadRemotePlugin(filename, connection.Session)
else:
status = plm.LoadLocalPlugin(filename)
# shouldn't the extension check happend before attempting to load the plugin?
if not status:
raise RuntimeError, "Problem loading plugin %s" % (filename)
else:
# we should never have to call this. The modules should update automatically.
updateModules(connection.Modules)
def Fetch(input, arg1=None, arg2=None, idx=0):
"""
A convenience method that moves data from the server to the client,
optionally performing some operation on the data as it moves.
The input argument is the name of the (proxy for a) source or filter
whose output is needed on the client.
You can use Fetch to do three things:
If arg1 is None (the default) then all of the data is brought to the client.
In parallel runs an appropriate append Filter merges the
data on each processor into one data object. The filter chosen will be
vtkAppendPolyData for vtkPolyData, vtkAppendRectilinearGrid for
vtkRectilinearGrid, vtkMultiBlockDataGroupFilter for vtkCompositeData,
and vtkAppendFilter for anything else.
If arg1 is an integer then one particular processor's output is brought to
the client. In serial runs the arg is ignored. If you have a filter that
computes results in parallel and brings them to the root node, then set
arg to be 0.
If arg1 and arg2 are a algorithms, for example vtkMinMax, the algorithm
will be applied to the data to obtain some result. Here arg1 will be
applied pre-gather and arg2 will be applied post-gather. In parallel
runs the algorithm will be run on each processor to make intermediate
results and then again on the root processor over all of the
intermediate results to create a global result.
Optional argument idx is used to specify the output port number to fetch the
data from. Default is port 0.
"""
import types
reducer = filters.ReductionFilter(Input=OutputPort(input,idx))
#create the pipeline that reduces and transmits the data
if arg1 == None:
cdinfo = input.GetDataInformation(idx).GetCompositeDataInformation()
if cdinfo.GetDataIsComposite():
print "use composite data append"
reducer.PostGatherHelperName = "vtkMultiBlockDataGroupFilter"
elif input.GetDataInformation(idx).GetDataClassName() == "vtkPolyData":
print "use append poly data filter"
reducer.PostGatherHelperName = "vtkAppendPolyData"
elif input.GetDataInformation(idx).GetDataClassName() == "vtkRectilinearGrid":
print "use append rectilinear grid filter"
reducer.PostGatherHelperName = "vtkAppendRectilinearGrid"
elif input.GetDataInformation(idx).IsA("vtkDataSet"):
print "use unstructured append filter"
reducer.PostGatherHelperName = "vtkAppendFilter"
elif type(arg1) is types.IntType:
reducer.PassThrough = arg1
else:
reducer.PreGatherHelper = arg1
reducer.PostGatherHelper = arg2
# reduce
reducer.UpdatePipeline()
dataInfo = reducer.GetDataInformation(0)
dataType = dataInfo.GetDataSetType()
if dataInfo.GetCompositeDataSetType() > 0:
dataType = dataInfo.GetCompositeDataSetType()
fetcher = filters.ClientServerMoveData(Input=reducer)
fetcher.OutputDataType = dataType
fetcher.WholeExtent = dataInfo.GetExtent()[:]
#fetch
fetcher.UpdatePipeline()
op = fetcher.GetClientSideObject().GetOutputDataObject(0)
opc = op.NewInstance()
opc.ShallowCopy(op)
opc.UnRegister(None)
return opc
def AnimateReader(reader, view, filename=None):
"""This is a utility function that, given a reader and a view
animates over all time steps of the reader. If the optional
filename is provided, a movie is created (type depends on the
extension of the filename."""
if not reader:
raise RuntimeError, "No reader was specified, cannot animate."
if not view:
raise RuntimeError, "No view was specified, cannot animate."
# Create an animation scene
scene = animation.AnimationScene()
# We need to have the reader and the view registered with
# the time keeper. This is how the scene gets its time values.
try:
tk = ProxyManager().GetProxiesInGroup("timekeeper").values()[0]
scene.TimeKeeper = tk
except IndexError:
tk = misc.TimeKeeper()
scene.TimeKeeper = tk
if not reader in tk.TimeSources:
tk.TimeSources.append(reader)
if not view in tk.Views:
tk.Views.append(view)
# with 1 view
scene.ViewModules = [view]
# Update the reader to get the time information
reader.UpdatePipelineInformation()
# Animate from 1st time step to last
scene.StartTime = reader.TimestepValues.GetData()[0]
scene.EndTime = reader.TimestepValues.GetData()[-1]
# Each frame will correspond to a time step
scene.PlayMode = 2 #Snap To Timesteps
# Create a special animation cue for time.
cue = animation.TimeAnimationCue()
cue.AnimatedProxy = view
cue.AnimatedPropertyName = "ViewTime"
scene.Cues = [cue]
if filename:
writer = vtkSMAnimationSceneImageWriter()
writer.SetFileName(filename)
writer.SetFrameRate(1)
writer.SetAnimationScene(scene.SMProxy)
# Now save the animation.
if not writer.Save():
raise RuntimeError, "Saving of animation failed!"
else:
scene.Play()
return scene
def GetProgressPrintingIsEnabled():
return progressObserverTag is not None
def SetProgressPrintingEnabled(value):
"""Is not supported because of not supported observers"""
pass
def ToggleProgressPrinting():
"""Turn on/off printing of progress. See SetProgressPrintingEnabled."""
SetProgressPrintingEnabled(not GetProgressPrintingIsEnabled())
def Finalize():
"""Although not required, this can be called at exit to cleanup."""
global progressObserverTag
# Make sure to remove the observer
if progressObserverTag:
ToggleProgressPrinting()
vtkInitializationHelper.Finalize()
# Internal methods
def _getPyProxy(smproxy, outputPort=0):
"""Returns a python wrapper for a server manager proxy. This method
first checks if there is already such an object by looking in the
_pyproxies group and returns it if found. Otherwise, it creates a
new one. Proxies register themselves in _pyproxies upon creation."""
if not smproxy:
return None
if (smproxy, outputPort) in _pyproxies:
return _pyproxies[(smproxy, outputPort)]()
xmlName = smproxy.GetXMLName()
if smproxy.GetXMLLabel():
xmlName = smproxy.GetXMLLabel()
classForProxy = _findClassForProxy(_make_name_valid(xmlName), smproxy.GetXMLGroup())
if classForProxy:
retVal = classForProxy(proxy=smproxy, port=outputPort)
else:
retVal = Proxy(proxy=smproxy, port=outputPort)
return retVal
def _makeUpdateCameraMethod(rv):
""" This internal method is used to create observer methods """
if not hasattr(rv(), "BlockUpdateCamera"):
rv().add_attribute("BlockUpdateCamera", False)
def UpdateCamera(obj, string):
if not rv().BlockUpdateCamera:
# used to avoid some nasty recursion that occurs when interacting in
# the GUI.
rv().BlockUpdateCamera = True
rv().SynchronizeCameraProperties()
rv().BlockUpdateCamera = False
return UpdateCamera
def _createInitialize(group, name):
"""Internal method to create an Initialize() method for the sub-classes
of Proxy"""
pgroup = group
pname = name
def aInitialize(self, connection=None, update=True):
if not connection:
connection = ActiveConnection
if not connection:
raise RuntimeError,\
'Cannot create a proxy without a session.'
if not connection.Session.GetProxyDefinitionManager().HasDefinition(pgroup, pname):
error_msg = "The connection does not provide any definition for %s." % pname
raise RuntimeError, error_msg
self.InitializeFromProxy(\
CreateProxy(pgroup, pname, connection.Session), update)
return aInitialize
def _createGetProperty(pName):
"""Internal method to create a GetXXX() method where XXX == pName."""
propName = pName
def getProperty(self):
return self.GetPropertyValue(propName)
return getProperty
def _createSetProperty(pName):
"""Internal method to create a SetXXX() method where XXX == pName."""
propName = pName
def setProperty(self, value):
return self.SetPropertyWithName(propName, value)
return setProperty
def _findClassForProxy(xmlName, xmlGroup):
"""Given the xmlName for a proxy, returns a Proxy class. Note
that if there are duplicates, the first one is returned."""
global sources, filters, writers, rendering, animation, implicit_functions,\
piecewise_functions, extended_sources, misc
if not xmlName:
return None
if xmlGroup == "sources":
return sources.__dict__[xmlName]
elif xmlGroup == "filters":
return filters.__dict__[xmlName]
elif xmlGroup == "implicit_functions":
return implicit_functions.__dict__[xmlName]
elif xmlGroup == "piecewise_functions":
return piecewise_functions.__dict__[xmlName]
elif xmlGroup == "writers":
return writers.__dict__[xmlName]
elif xmlGroup == "extended_sources":
return extended_sources.__dict__[xmlName]
elif xmlName in rendering.__dict__:
return rendering.__dict__[xmlName]
elif xmlName in animation.__dict__:
return animation.__dict__[xmlName]
elif xmlName in misc.__dict__:
return misc.__dict__[xmlName]
else:
return None
def _printProgress(caller, event):
"""The default event handler for progress. Prints algorithm
name and 1 '.' per 10% progress."""
global currentAlgorithm, currentProgress
pm = vtkProcessModule.GetProcessModule()
progress = pm.GetLastProgress() / 10
# If we got a 100% as the first thing, ignore
# This is to get around the fact that some vtk
# algorithms report 100% more than once (which is
# a bug)
if not currentAlgorithm and progress == 10:
return
alg = pm.GetLastProgressName()
if alg != currentAlgorithm and alg:
if currentAlgorithm:
while currentProgress <= 10:
import sys
sys.stdout.write(".")
currentProgress += 1
print "]"
currentProgress = 0
print alg, ": [ ",
currentAlgorithm = alg
while currentProgress <= progress:
import sys
sys.stdout.write(".")
#sys.stdout.write("%d " % pm.GetLastProgress())
currentProgress += 1
if progress == 10:
print "]"
currentAlgorithm = None
currentProgress = 0
def updateModules(m):
"""Called when a plugin is loaded, this method updates
the proxy class object in all known modules."""
createModule("sources", m.sources)
createModule("filters", m.filters)
createModule("writers", m.writers)
createModule("representations", m.rendering)
createModule("views", m.rendering)
createModule("lookup_tables", m.rendering)
createModule("textures", m.rendering)
createModule('cameramanipulators', m.rendering)
createModule("animation", m.animation)
createModule("misc", m.misc)
createModule('animation_keyframes', m.animation)
createModule('implicit_functions', m.implicit_functions)
createModule('piecewise_functions', m.piecewise_functions)
createModule("extended_sources", m.extended_sources)
createModule("incremental_point_locators", m.misc)
def _createModules(m):
"""Called when the module is loaded, this creates sub-
modules for all know proxy groups."""
m.sources = createModule('sources')
m.filters = createModule('filters')
m.writers = createModule('writers')
m.rendering = createModule('representations')
createModule('views', m.rendering)
createModule("lookup_tables", m.rendering)
createModule("textures", m.rendering)
createModule('cameramanipulators', m.rendering)
m.animation = createModule('animation')
createModule('animation_keyframes', m.animation)
m.implicit_functions = createModule('implicit_functions')
m.piecewise_functions = createModule('piecewise_functions')
m.extended_sources = createModule("extended_sources")
m.misc = createModule("misc")
createModule("incremental_point_locators", m.misc)
class PVModule(object):
pass
def _make_name_valid(name):
"""Make a string into a valid Python variable name."""
if not name:
return None
import string
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
name = str().join([c for c in name if c in valid_chars])
if not name[0].isalpha():
name = 'a' + name
return name
def createModule(groupName, mdl=None):
"""Populates a module with proxy classes defined in the given group.
If mdl is not specified, it also creates the module"""
global ActiveConnection
if not ActiveConnection:
raise RuntimeError, "Please connect to a server using \"Connect\""
pxm = ProxyManager()
# Use prototypes to find all proxy types.
pxm.InstantiateGroupPrototypes(groupName)
debug = False
if not mdl:
debug = True
mdl = PVModule()
definitionIter = pxm.NewDefinitionIterator(groupName)
for i in definitionIter:
proxyName = i['key']
proto = pxm.GetPrototypeProxy(groupName, proxyName)
if not proto:
print "Error while loading %s/%s %s"%(groupName, i['group'], proxyName)
continue
pname = proxyName
if proto.GetXMLLabel():
pname = proto.GetXMLLabel()
pname = _make_name_valid(pname)
if not pname:
continue
if pname in mdl.__dict__:
if debug:
print "Warning: %s is being overwritten. This may point to an issue in the ParaView configuration files" % pname
cdict = {}
# Create an Initialize() method for this sub-class.
cdict['Initialize'] = _createInitialize(groupName, proxyName)
iter = PropertyIterator(proto)
# Add all properties as python properties.
for prop in iter:
propName = iter.GetKey()
if (prop.GetInformationOnly() and propName != "TimestepValues" ) \
or prop.GetIsInternal():
continue
names = [propName]
names = [iter.PropertyLabel]
propDoc = None
if prop.GetDocumentation():
propDoc = prop.GetDocumentation().GetDescription()
for name in names:
name = _make_name_valid(name)
if name:
cdict[name] = property(_createGetProperty(propName),
_createSetProperty(propName),
None,
propDoc)
# Add the documentation as the class __doc__
if proto.GetDocumentation() and \
proto.GetDocumentation().GetDescription():
doc = proto.GetDocumentation().GetDescription()
else:
doc = Proxy.__doc__
cdict['__doc__'] = doc
# Create the new type
if proto.GetXMLName() == "ExodusIIReader":
superclasses = (ExodusIIReaderProxy,)
elif proto.IsA("vtkSMSourceProxy"):
superclasses = (SourceProxy,)
elif proto.IsA("vtkSMViewLayoutProxy"):
superclasses = (ViewLayoutProxy,)
else:
superclasses = (Proxy,)
cobj = type(pname, superclasses, cdict)
# Add it to the modules dictionary
mdl.__dict__[pname] = cobj
return mdl
def __determineGroup(proxy):
"""Internal method"""
if not proxy:
return None
xmlgroup = proxy.GetXMLGroup()
xmlname = proxy.GetXMLName()
if xmlgroup == "sources":
if xmlname in ["BlockSelectionSource",
"FrustumSelectionSource",
"GlobalIDSelectionSource",
"PedigreeIDSelectionSource",
"IDSelectionSource",
"CompositeDataIDSelectionSource",
"HierarchicalDataIDSelectionSource",
"ThresholdSelectionSource",
"LocationSelectionSource"]:
return "selection_sources"
return "sources"
elif xmlgroup == "filters":
return "sources"
elif xmlgroup == "representations":
if xmlname == "ScalarBarWidgetRepresentation":
return "scalar_bars"
return "representations"
elif xmlgroup == "animation_keyframes":
return "animation"
return xmlgroup
__nameCounter = {}
def __determineName(proxy, group):
global __nameCounter
name = _make_name_valid(proxy.GetXMLLabel())
if not name:
return None
if not __nameCounter.has_key(name):
__nameCounter[name] = 1
val = 1
else:
__nameCounter[name] += 1
val = __nameCounter[name]
return "%s%d" % (name, val)
def __getName(proxy, group):
pxm = ProxyManager(proxy.GetSession())
if isinstance(proxy, Proxy):
proxy = proxy.SMProxy
return pxm.GetProxyName(group, proxy)
class MissingRegistrationInformation(Exception):
"""Exception for missing registration information. Raised when a name or group
is not specified or when a group cannot be deduced."""
pass
class MissingProxy(Exception):
"""Exception fired when the requested proxy is missing."""
pass
def Register(proxy, **extraArgs):
"""Registers a proxy with the proxy manager. If no 'registrationGroup' is
specified, then the group is inferred from the type of the proxy.
'registrationName' may be specified to register with a particular name
otherwise a default name will be created."""
# TODO: handle duplicate registration
if "registrationGroup" in extraArgs:
registrationGroup = extraArgs["registrationGroup"]
else:
registrationGroup = __determineGroup(proxy)
if "registrationName" in extraArgs:
registrationName = extraArgs["registrationName"]
else:
registrationName = __determineName(proxy, registrationGroup)
if registrationGroup and registrationName:
pxm = ProxyManager()
pxm.RegisterProxy(registrationGroup, registrationName, proxy)
else:
raise MissingRegistrationInformation, "Registration error %s %s." % (registrationGroup, registrationName)
return (registrationGroup, registrationName)
def UnRegister(proxy, **extraArgs):
"""UnRegisters proxies registered using Register()."""
if "registrationGroup" in extraArgs:
registrationGroup = extraArgs["registrationGroup"]
else:
registrationGroup = __determineGroup(proxy)
if "registrationName" in extraArgs:
registrationName = extraArgs["registrationName"]
else:
registrationName = __getName(proxy, registrationGroup)
if registrationGroup and registrationName:
pxm = ProxyManager()
pxm.UnRegisterProxy(registrationGroup, registrationName, proxy)
else:
raise RuntimeError, "UnRegistration error."
return (registrationGroup, registrationName)
def demo1():
"""This simple demonstration creates a sphere, renders it and delivers
it to the client using Fetch. It returns a tuple of (data, render
view)"""
if not ActiveConnection:
Connect()
ss = sources.Sphere(Radius=2, ThetaResolution=32)
shr = filters.Shrink(Input=OutputPort(ss,0))
cs = sources.Cone()
app = filters.AppendDatasets()
app.Input = [shr, cs]
rv = CreateRenderView()
rep = CreateRepresentation(app, rv)
rv.ResetCamera()
rv.StillRender()
data = Fetch(ss)
return (data, rv)
def demo2(fname="/Users/berk/Work/ParaViewData/Data/disk_out_ref.ex2"):
"""This method demonstrates the user of a reader, representation and
view. It also demonstrates how meta-data can be obtained using proxies.
Make sure to pass the full path to an exodus file. Also note that certain
parameters are hard-coded for disk_out_ref.ex2 which can be found
in ParaViewData. This method returns the render view."""
if not ActiveConnection:
Connect()
# Create the exodus reader and specify a file name
reader = sources.ExodusIIReader(FileName=fname)
# Get the list of point arrays.
arraySelection = reader.PointVariables
print arraySelection.Available
# Select all arrays
arraySelection.SetData(arraySelection.Available)
# Next create a default render view appropriate for the session type.
rv = CreateRenderView()
# Create the matching representation
rep = CreateRepresentation(reader, rv)
rep.Representation = 1 # Wireframe
# Black background is not pretty
rv.Background = [0.4, 0.4, 0.6]
rv.StillRender()
# Reset the camera to include the whole thing
rv.ResetCamera()
rv.StillRender()
# Change the elevation of the camera. See VTK documentation of vtkCamera
# for camera parameters.
c = rv.GetActiveCamera()
c.Elevation(45)
rv.StillRender()
# Now that the reader execute, let's get some information about it's
# output.
pdi = reader[0].PointData
# This prints a list of all read point data arrays as well as their
# value ranges.
print 'Number of point arrays:', len(pdi)
for i in range(len(pdi)):
ai = pdi[i]
print "----------------"
print "Array:", i, ai.Name, ":"
numComps = ai.GetNumberOfComponents()
print "Number of components:", numComps
for j in range(numComps):
print "Range:", ai.GetRange(j)
# White is boring. Let's color the geometry using a variable.
# First create a lookup table. This object controls how scalar
# values are mapped to colors. See VTK documentation for
# details.
lt = rendering.PVLookupTable()
# Assign it to the representation
rep.LookupTable = lt
# Color by point array called Pres
rep.ColorAttributeType = 0 # point data
rep.ColorArrayName = "Pres"
# Add to RGB points. These are tuples of 4 values. First one is
# the scalar values, the other 3 the RGB values. This list has
# 2 points: Pres: 0.00678, color: blue, Pres: 0.0288, color: red
lt.RGBPoints = [0.00678, 0, 0, 1, 0.0288, 1, 0, 0]
lt.ColorSpace = 1 # HSV
rv.StillRender()
return rv
def demo3():
"""This method demonstrates the use of servermanager with numpy as
well as pylab for plotting. It creates an artificial data sources,
probes it with a line, delivers the result to the client using Fetch
and plots it using pylab. This demo requires numpy and pylab installed.
It returns a tuple of (data, render view)."""
import paraview.numpy_support
import pylab
if not ActiveConnection:
Connect()
# Create a synthetic data source
source = sources.Wavelet()
# Let's get some information about the data. First, for the
# source to execute
source.UpdatePipeline()
di = source.GetDataInformation()
print "Data type:", di.GetPrettyDataTypeString()
print "Extent:", di.GetExtent()
print "Array name:", \
source[0].PointData[0].Name
rv = CreateRenderView()
rep1 = CreateRepresentation(source, rv)
rep1.Representation = 3 # outline
# Let's apply a contour filter
cf = filters.Contour(Input=source, ContourValues=[200])
# Select the array to contour by
#cf.SelectInputScalars = 'RTData'
rep2 = CreateRepresentation(cf, rv)
rv.Background = (0.4, 0.4, 0.6)
# Reset the camera to include the whole thing
rv.StillRender()
rv.ResetCamera()
rv.StillRender()
# Now, let's probe the data
probe = filters.ResampleWithDataset(Input=source)
# with a line
line = sources.Line(Resolution=60)
# that spans the dataset
bounds = di.GetBounds()
print "Bounds: ", bounds
line.Point1 = bounds[0:6:2]
line.Point2 = bounds[1:6:2]
probe.Source = line
# Render with the line
rep3 = CreateRepresentation(line, rv)
rv.StillRender()
# Now deliver it to the client. Remember, this is for small data.
data = Fetch(probe)
# Convert it to a numpy array
data = paraview.numpy_support.vtk_to_numpy(
data.GetPointData().GetArray("RTData"))
# Plot it using matplotlib
pylab.plot(data)
pylab.show()
return (data, rv, probe)
def demo4(fname="/Users/berk/Work/ParaViewData/Data/can.ex2"):
"""This method demonstrates the user of AnimateReader for
creating animations."""
if not ActiveConnection:
Connect()
reader = sources.ExodusIIReader(FileName=fname)
view = CreateRenderView()
repr = CreateRepresentation(reader, view)
view.StillRender()
view.ResetCamera()
view.StillRender()
c = view.GetActiveCamera()
c.Elevation(95)
return AnimateReader(reader, view)
def demo5():
""" Simple sphere animation"""
if not ActiveConnection:
Connect()
sphere = sources.Sphere()
view = CreateRenderView()
repr = CreateRepresentation(sphere, view)
view.StillRender()
view.ResetCamera()
view.StillRender()
# Create an animation scene
scene = animation.AnimationScene()
# Add 1 view
scene.ViewModules = [view]
# Create a cue to animate the StartTheta property
cue = animation.KeyFrameAnimationCue()
cue.AnimatedProxy = sphere
cue.AnimatedPropertyName = "StartTheta"
# Add it to the scene's cues
scene.Cues = [cue]
# Create 2 keyframes for the StartTheta track
keyf0 = animation.CompositeKeyFrame()
keyf0.Type = 2 # Set keyframe interpolation type to Ramp.
# At time = 0, value = 0
keyf0.KeyTime = 0
keyf0.KeyValues= [0]
keyf1 = animation.CompositeKeyFrame()
# At time = 1.0, value = 200
keyf1.KeyTime = 1.0
keyf1.KeyValues= [200]
# Add keyframes.
cue.KeyFrames = [keyf0, keyf1]
scene.Play()
return scene
ASSOCIATIONS = { 'POINTS' : 0, 'CELLS' : 1, 'VERTICES' : 4, 'EDGES' : 5, 'ROWS' : 6}
# Users can set the active connection which will be used by API
# to create proxies etc when no connection argument is passed.
# Connect() automatically sets this if it is not already set.
ActiveConnection = None
# Fields for multi-server support
MultiServerConnections = None
# API for multi-server support
def enableMultiServer(multiServer=True):
"""This method enable the current servermanager to support several
connections. Once we enable the multi-server support, the user can create
as many connection as he want and switch from one to another in order to
create and manage proxy."""
global MultiServerConnections, ActiveConnection
if not multiServer and MultiServerConnections:
raise RuntimeError, "Once we enable Multi-Server support we can not get back"
MultiServerConnections = []
if ActiveConnection:
MultiServerConnections.append(ActiveConnection)
def switchActiveConnection(newActiveConnection=None):
"""Switch active connection to be the provided one or if none just pick the
other one"""
global MultiServerConnections, ActiveConnection
if MultiServerConnections == None:
raise RuntimeError, "enableMultiServer() must be called before"
# Manage the case when no connection is provided
if newActiveConnection:
ActiveConnection = newActiveConnection
__exposeActiveModules__()
# Update active session for ParaView
if vtkSMProxyManager.GetProxyManager().GetActiveSession() != ActiveConnection.Session:
vtkSMProxyManager.GetProxyManager().SetActiveSession(ActiveConnection.Session)
return ActiveConnection
else:
for connection in MultiServerConnections:
if connection != ActiveConnection:
ActiveConnection = connection
__exposeActiveModules__()
# Update active session for ParaView
if vtkSMProxyManager.GetProxyManager().GetActiveSession() != ActiveConnection.Session:
vtkSMProxyManager.GetProxyManager().SetActiveSession(ActiveConnection.Session)
return ActiveConnection
return None
# Needs to be called when paraview module is loaded from python instead
# of pvpython, pvbatch or GUI.
if not vtkProcessModule.GetProcessModule():
# pvoptions = None Not applicable for SALOME Python console
# if paraview.options.batch:
# pvoptions = vtkPVOptions();
# pvoptions.SetProcessType(0x40)
# if paraview.options.symmetric:
# pvoptions.SetSymmetricMPIMode(True)
vtkInitializationHelper.Initialize(sys.executable,
vtkProcessModule.PROCESS_CLIENT, pvoptions)
# Initialize progress printing. Can be turned off by calling
# ToggleProgressPrinting() again.
progressObserverTag = None
currentAlgorithm = False
currentProgress = 0
fromGUI = False
ToggleProgressPrinting()
_pyproxies = {}
# Create needed sub-modules
# We can no longer create modules, unless we have connected to a server.
# _createModules()
# Set up our custom importer (if possible)
loader = _ModuleLoader()
sys.meta_path.append(loader)
def __InitAfterConnect__(connection):
"""
This function is called everytime after a server connection is made.
Since the ProxyManager and all proxy definitions are changed every time a
new connection is made, we re-create all the modules
"""
_createModules(connection.Modules)
## VSV fromFilter is alwais False for SALOME because it can't be changed from ParaView code
#if not paraview.fromFilter:
# fromFilter is set when this module is imported from the programmable
# filter
# global _defUpdater
# _defUpdater = __DefinitionUpdater()
connection.AttachDefinitionUpdater()
pass
def __exposeActiveModules__():
"""Update servermanager submodules to point to the current
ActiveConnection.Modules.*"""
# Expose all active module to the current servermanager module
if ActiveConnection:
for m in [mName for mName in dir(ActiveConnection.Modules) if mName[0] != '_' ]:
exec "global %s;%s = ActiveConnection.Modules.%s" % (m,m,m)
# Definitions for working in SALOME GUI mode
#aParams = myParavis.GetConnectionParameters()
#ActiveConnection = Connect()
##Connection(aParams[0])
#ActiveConnection.SetHost(aParams[1], aParams[2], aParams[3], aParams[4], aParams[5])
#ToggleProgressPrinting()
#fromGUI = True
InitFromGUI()
if hasattr(sys, "ps1"):
# session is interactive.
print vtkSMProxyManager.GetParaViewSourceVersion();
def GetConnectionFromId(id):
for connection in MultiServerConnections:
if connection.ID == id:
return connection
return None
def GetConnectionFromSession(session):
for connection in MultiServerConnections:
if connection.Session == session:
return connection
return None
def GetConnectionAt(index):
return MultiServerConnections[index]
def GetNumberOfConnections():
return len(MultiServerConnections)
#VTN: Problem during execution
#atexit.register(vtkPythonProgrammableFilter.DeleteGlobalPythonInterpretor)
|
FedoraScientific/salome-paravis
|
src/PV_SWIG/paravisSM.py
|
Python
|
lgpl-2.1
| 112,492
|
[
"ParaView",
"VTK",
"VisIt"
] |
0ff5fdc6e963882c7b6fcec6c4d964d7dcdbcce0c8ae6145b6180be94eff9421
|
import tensorflow as tf
import numpy as np
import time
import os
import logging
import models
import inferences
import nomen
import util
import stats
def train(config):
"""Train a Variational Autoencoder or deep latent gaussian model on MNIST."""
cfg = config
logger = logging.getLogger()
t0 = time.time()
logdir_name = util.list_to_str(
['dlgm', cfg['p/n_layers'], 'layer', 'w_stddev',
cfg['p_net/init_w_stddev'], cfg['inference'],
'q_init_stddev', cfg['q/init_stddev'], 'lr', cfg['optim/learning_rate']
])
if cfg['inference'] == 'proximity':
logdir_name += '_' + util.list_to_str(
[cfg['c/proximity_statistic'], 'decay_rate', cfg['c/decay_rate'],
'decay_steps', cfg['c/decay_steps'], 'lag', cfg['c/lag'],
cfg['c/decay'], cfg['c/magnitude']])
cfg['log/dir'] = util.make_logdir(cfg, logdir_name)
util.log_to_file(os.path.join(cfg['log/dir'], 'train.log'))
logger.info(cfg)
np.random.seed(433423)
tf.set_random_seed(435354)
sess = tf.InteractiveSession()
data_iterator, _, _ = util.provide_data(cfg['train_data'])
def get_feed_iterator():
while True:
yield {input_data: next(data_iterator)}
feed_iterator = get_feed_iterator()
input_data = tf.placeholder(tf.float32, [cfg['batch_size'], 28, 28, 1])
tf.summary.image('data', input_data)
model = models.DeepLatentGaussianModel(cfg)
variational = models.DeepLatentGaussianVariational(cfg)
if cfg['inference'] == 'vanilla':
inference_fn = inferences.VariationalInference
elif cfg['inference'] == 'proximity':
inference_fn = inferences.ProximityVariationalInference
inference = inference_fn(sess, cfg, model, variational, input_data)
inference.build_train_op()
# prior_predictive = stats.build_prior_predictive(model)
posterior_predictive = stats.build_posterior_predictive(
cfg, model, variational, input_data)
inference.build_summary_op()
ckpt = util.latest_checkpoint(cfg)
if ckpt is not None:
inference.saver.restore(sess, ckpt)
else:
inference.initialize(next(feed_iterator))
if not cfg['eval_only']:
for py_step in range(cfg['n_iterations']):
feed_dict = next(feed_iterator)
if py_step == 0:
inference.initialize(feed_dict)
if cfg['inference'] == 'proximity' and cfg['c/lag'] != 'moving_average':
feed_dict.update(
inference.constraint_feed_dict(py_step, feed_iterator))
if py_step % cfg['print_every'] == 0:
logger.info(inference.log_stats(feed_dict))
#util.save_prior_posterior_predictives(
# cfg, sess, inference, prior_predictive,
# posterior_predictive, feed_dict, feed_dict[input_data])
sess.run(inference.train_op, feed_dict)
print(tf.train.latest_checkpoint(cfg['log/dir']))
# evaluation
if cfg['eval_only']:
valid_iterator, np_valid_data_mean, _ = util.provide_data(
cfg['valid_data'])
def create_iterator():
while True:
yield {input_data: next(valid_iterator)}
valid_feed_iterator = create_iterator()
np_l = 0.
np_log_x = 0.
for i in range(cfg['valid_data/n_examples'] // cfg['valid_data/batch_size']):
feed_dict = next(valid_feed_iterator)
tmp_np_log_x, tmp_np_l = sess.run(
[inference.log_p_x_hat, inference.elbo], feed_dict)
np_log_x += np.sum(tmp_np_log_x)
np_l += np.mean(tmp_np_l)
logger.info('Time total of: %.3f hours' % ((time.time() - t0) / 60. / 60.))
valid_elbo = np_l / cfg['valid_data/n_examples']
valid_log_lik = np_log_x / cfg['valid_data/n_examples']
txt = ('for %s set -- elbo: %.10f\tlog_likelihood: %.10f' % (
cfg['valid_data/split'], valid_elbo, valid_log_lik))
logger.info(txt)
with open(os.path.join(cfg['log/dir'], 'job.log'), 'w') as f:
f.write(txt)
eval_summ = tf.Summary()
eval_summ.value.add(tag='Valid ELBO', simple_value=valid_elbo)
eval_summ.value.add(tag='Valid Log Likelihood', simple_value=valid_log_lik)
inference.summary_writer.add_summary(eval_summ, 0)
inference.summary_writer.flush()
def main(_):
cfg = nomen.Config('deep_latent_gaussian_model_config.yml')
train(cfg)
if __name__ == '__main__':
tf.app.run()
|
altosaar/proximity_vi
|
deep_latent_gaussian_model_train.py
|
Python
|
mit
| 4,221
|
[
"Gaussian"
] |
b1c04aafbef2f84ef857cd294b9462d96d78a38710a9aa32c45105cfc743655c
|
from ase import *
from ase.lattice.surface import fcc100, add_adsorbate
# 2x2-Al(001) surface with 3 layers and an
# Au atom adsorbed in a hollow site:
slab = fcc100('Al', size=(2, 2, 3))
add_adsorbate(slab, 'Au', 1.7, 'hollow')
slab.center(axis=2, vacuum=4.0)
# Make sure the structure is correct:
#view(slab)
# Fix second and third layers:
mask = [atom.tag > 1 for atom in slab]
#print mask
slab.set_constraint(FixAtoms(mask=mask))
# Use EMT potential:
slab.set_calculator(EMT())
# Initial state:
qn = QuasiNewton(slab, trajectory='initial.traj')
qn.run(fmax=0.05)
# Final state:
slab[-1].x += slab.get_cell()[0, 0] / 2
qn = QuasiNewton(slab, trajectory='final.traj')
qn.run(fmax=0.05)
|
freephys/python_ase
|
doc/tutorials/neb/diffusion1.py
|
Python
|
gpl-3.0
| 694
|
[
"ASE"
] |
47d3cc99622640822c002fa9db7604cb2e0ef28628a79aee41467d2f72c5b0a5
|
import os
from os import path, sys
from shutil import rmtree
import subprocess
import numpy as np
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
TRAFIC_LIB_DIR = path.join(path.dirname(path.dirname(path.abspath(__file__))), "TraficLib")
sys.path.append(TRAFIC_LIB_DIR)
print path.join(TRAFIC_LIB_DIR)
from makeDataset import run_make_dataset
from envInstallTF import runMaybeEnvInstallTF
# print runPreprocess
import logging
# TMP_DIR = "/work/dprince/DirectoryTest/"
# TRAIN_DIR = "/work/dprince/Multiclass/Train_32_Cleaned/"
# MODEL_DIR = "/work/dprince/Trash/Models/"
#
# TraficBi
#
class TraficBi(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "TraficBi" # TODO make this more human readable by adding spaces
self.parent.categories = ["Classification"]
self.parent.dependencies = []
self.parent.contributors = ["Prince Ngattai Lam (UNC-NIRAL)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
"""
self.parent.acknowledgementText = """
""" # replace with organization, grant and thanks.
#
# TraficBiWidget
#
class TraficBiWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setupEditionTab(self):
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# UI FILES LOADING #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
loader = qt.QUiLoader()
self.EditionTabName = 'TraficBiEditionTab'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.EditionTabName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.editionTabWidget)
self.editionLayout = qt.QVBoxLayout(self.editionTabWidget)
self.editionWidget = widget
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# FIBER DISPLAY AREA #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
displayFibersCollapsibleButton = ctk.ctkCollapsibleButton()
displayFibersCollapsibleButton.text = "Fiber Bundle"
self.editionLayout.addWidget(displayFibersCollapsibleButton)
self.fiberList = qt.QComboBox()
self.name_labels = ['Select a type of fiber','0','Arc_L_FT','Arc_L_FrontoParietal','Arc_L_TemporoParietal','Arc_R_FT','Arc_R_FrontoParietal','Arc_R_TemporoParietal','CGC_L','CGC_R','CGH_L','CGH_R','CorpusCallosum_Genu',
'CorpusCallosum_Motor','CorpusCallosum_Parietal','CorpusCallosum_PreMotor','CorpusCallosum_Rostrum','CorpusCallosum_Splenium','CorpusCallosum_Tapetum','CorticoFugal-Left_Motor',
'CorticoFugal-Left_Parietal','CorticoFugal-Left_PreFrontal','CorticoFugal-Left_PreMotor','CorticoFugal-Right_Motor','CorticoFugal-Right_Parietal','CorticoFugal-Right_PreFrontal',
'CorticoFugal-Right_PreMotor','CorticoRecticular-Left','CorticoRecticular-Right','CorticoSpinal-Left','CorticoSpinal-Right','CorticoThalamic_L_PreFrontal','CorticoThalamic_L_SUPERIOR',
'CorticoThalamic_Left_Motor','CorticoThalamic_Left_Parietal','CorticoThalamic_Left_PreMotor','CorticoThalamic_R_PreFrontal','CorticoThalamic_R_SUPERIOR',
'CorticoThalamic_Right_Motor','CorticoThalamic_Right_Parietal','CorticoThalamic_Right_PreMotor','Fornix_L','Fornix_R','IFOF_L','IFOF_R','ILF_L','ILF_R',
'OpticRadiation_Left','OpticRadiation_Right','Optic_Tract_L','Optic_Tract_R','SLF_II_L','SLF_II_R','UNC_L','UNC_R']
self.fiberList.addItems(self.name_labels)
self.fiberList.setMaxVisibleItems(5)
# Layout within the dummy collapsible button
displayFibersFormLayout = qt.QFormLayout(displayFibersCollapsibleButton)
#
# Fibers Tree View
#
# self.inputFiber = slicer.qMRMLTractographyDisplayTreeView()
self.inputFiber = slicer.qMRMLNodeComboBox()
self.inputFiber.nodeTypes = ["vtkMRMLFiberBundleNode"]
self.inputFiber.addEnabled = False
self.inputFiber.removeEnabled = True
self.inputFiber.noneEnabled = True
self.inputFiber.showHidden = True
self.inputFiber.showChildNodeTypes = False
self.inputFiber.setMRMLScene(slicer.mrmlScene)
displayFibersFormLayout.addRow("Input Fiber", self.inputFiber)
displayFibersFormLayout.addRow("Type of Fiber", self.fiberList)
# self.progress = qt.QProgressDialog()
# self.progress.setValue(0)
# self.progress.show()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# FIBER SELECTION AREA #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
selectionFibersCollapsibleButton = ctk.ctkCollapsibleButton()
selectionFibersCollapsibleButton.text = "Fiber Selection"
self.editionLayout.addWidget(selectionFibersCollapsibleButton)
# Layout within the dummy collapsible button
selectionFibersFormLayout = qt.QFormLayout(selectionFibersCollapsibleButton)
self.selectionFiber = self.getWidget('qSlicerTractographyEditorROIWidget', index_tab=0)
self.ROISelectorDisplay = self.getWidget('ROIForFiberSelectionMRMLNodeSelector', index_tab=0)
self.ROISelectorDisplay.setMRMLScene(slicer.mrmlScene)
self.classList = self.getWidget('fiberList', index_tab=0)
name_classes = ['Select class of fiber', 'Negative','Positive']
self.classList.addItems(name_classes)
self.classList.setMaxVisibleItems(5)
# selectionFibersFormLayout.addRow(self.selectionFiber)
selectionFibersFormLayout.addRow(self.selectionFiber)
self.disROI = self.getWidget('DisableROI', index_tab=0)
self.posROI = self.getWidget('PositiveROI', index_tab=0)
self.negROI = self.getWidget('NegativeROI', index_tab=0)
self.interROI = self.getWidget('InteractiveROI', index_tab=0)
self.showROI = self.getWidget('ROIVisibility', index_tab=0)
# self.accEditOn = self.getWidget('EnableAccurateEdit')
self.extractFiber = self.getWidget('CreateNewFiberBundle', index_tab=0)
self.ROISelector = slicer.qSlicerTractographyEditorROIWidget()
self.ROISelector.setFiberBundleNode(self.inputFiber.currentNode())
self.ROISelector.setMRMLScene(slicer.mrmlScene)
self.ROISelector.setAnnotationMRMLNodeForFiberSelection(self.ROISelectorDisplay.currentNode())
self.ROISelector.setAnnotationROIMRMLNodeToFiberBundleEnvelope(self.ROISelectorDisplay.currentNode())
# self.editionLayout.addWidget(self.ROISelector)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# FIBER REVIEW AREA #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
reviewsCollapsibleButton = ctk.ctkCollapsibleButton()
reviewsCollapsibleButton.text = "Reviews"
self.editionLayout.addWidget(reviewsCollapsibleButton)
self.reviewsFormLayout = qt.QFormLayout(reviewsCollapsibleButton)
self.reviewsList = slicer.qMRMLTractographyDisplayTreeView()
self.reviewsList.setMRMLScene(slicer.mrmlScene)
self.reviewsFormLayout.addRow(self.reviewsList)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CLEAR AND SAVE AREA #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
self.dFPath = qt.QLineEdit("")
self.outputDirEdit = qt.QLineEdit("")
# self.dFPath.setEnabled(False)
self.dFSelector = qt.QPushButton("Browse")
self.outputDirEditSelector = qt.QPushButton("Browse")
self.clearButton = qt.QPushButton("CLEAR")
self.clearButton.toolTip = "Clear everything."
self.clearButton.enabled = True
self.saveButton = qt.QPushButton("SAVE")
self.saveButton.toolTip = "Save and update Trafic database."
self.saveButton.enabled = True
# self.editionLayout.addWidget(self.ROISelector)
gridLayoutdF = qt.QGridLayout()
gridLayoutClearSave = qt.QGridLayout()
gridLayoutdF.addWidget(qt.QLabel("Displacement field"), 0, 0)
gridLayoutdF.addWidget(self.dFPath, 0, 1)
gridLayoutdF.addWidget(self.dFSelector, 0, 2)
gridLayoutdF.addWidget(qt.QLabel("Output Directory"), 1, 0)
gridLayoutdF.addWidget(self.outputDirEdit, 1, 1)
gridLayoutdF.addWidget(self.outputDirEditSelector, 1, 2)
gridLayoutClearSave.addWidget(self.clearButton, 0, 0)
gridLayoutClearSave.addWidget(self.saveButton, 0, 2)
self.editionLayout.addLayout(gridLayoutdF)
self.editionLayout.addLayout(gridLayoutClearSave)
self.nodeDict ={}
self.nodePosDict = {}
self.nodeNegDict = {}
# Initialization of the dictionnary that will contains the Node ID and their type
for i in xrange(1, len(self.name_labels)):
self.nodePosDict[self.name_labels[i]] = []
self.nodeNegDict[self.name_labels[i]] = []
self.editionLayout.addStretch(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONNECTIONS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
self.inputFiber.connect("currentNodeChanged(vtkMRMLNode*)", self.onChangeCurrentNode)
self.disROI.connect("toggled(bool)", self.onDisROI)
self.posROI.connect("toggled(bool)", self.onPosROI)
self.negROI.connect("toggled(bool)", self.onNegROI)
self.interROI.connect("toggled(bool)", self.onInterROI)
self.showROI.connect("toggled(bool)", self.onShowROI)
# self.accEditOn.connect("toggled(bool)", self.onAccEditOn)
self.ROISelectorDisplay.connect("currentNodeChanged(vtkMRMLNode*)", self.onChangeCurrentNode)
self.ROISelectorDisplay.connect("nodeAddedByUser(vtkMRMLNode*)", self.onAddNode)
self.saveButton.connect("clicked(bool)", self.onSaveButton)
self.clearButton.connect("clicked(bool)", self.onClearButton)
self.extractFiber.connect("clicked(bool)", self.OnExtractFiber)
self.dFSelector.connect("clicked(bool)", self.OndFSelector)
self.dFPath.connect("editingFinished()", self.checkdFPath)
self.outputDirEditSelector.connect("clicked(bool)", self.OnOutputDirEditSelector)
self.outputDirEdit.connect("editingFinished()", self.CheckOutputDirEdit)
return
def setupTrainingTab(self):
self.trainingLayout = qt.QVBoxLayout(self.trainingTabWidget)
gridLayoutTrain = qt.QGridLayout()
self.lr_spinbox = qt.QDoubleSpinBox()
self.lr_spinbox.setSingleStep(0.001)
self.lr_spinbox.setValue(0.01)
self.lr_spinbox.setDecimals(3)
self.num_epochs_spinbox = qt.QSpinBox()
self.num_epochs_spinbox.setSingleStep(1)
self.num_epochs_spinbox.setValue(1)
gridLayoutTrain.addWidget(qt.QLabel("Learning Rate"), 0, 0)
gridLayoutTrain.addWidget(self.lr_spinbox, 0, 1)
gridLayoutTrain.addWidget(qt.QLabel("Number of Epochs"), 1, 0)
gridLayoutTrain.addWidget(self.num_epochs_spinbox, 1, 1)
gridLayoutSumdir = qt.QGridLayout()
self.sumDirTrainSelector = qt.QPushButton("Browse")
self.sumDirTrain = qt.QLineEdit("")
self.modelDirTrainSelector = qt.QPushButton("Browse")
self.modelDirTrain = qt.QLineEdit("")
self.dataDirTrainSelector = qt.QPushButton("Browse")
self.dataDirTrain = qt.QLineEdit("")
gridLayoutSumdir.addWidget(qt.QLabel("Data Directory"), 1, 0)
gridLayoutSumdir.addWidget(self.dataDirTrain, 1, 1)
gridLayoutSumdir.addWidget(self.dataDirTrainSelector, 1, 2)
gridLayoutSumdir.addWidget(qt.QLabel("Model Directory"), 2, 0)
gridLayoutSumdir.addWidget(self.modelDirTrain, 2, 1)
gridLayoutSumdir.addWidget(self.modelDirTrainSelector, 2, 2)
gridLayoutSumdir.addWidget(qt.QLabel("Summary Directory"), 3, 0)
gridLayoutSumdir.addWidget(self.sumDirTrain, 3, 1)
gridLayoutSumdir.addWidget(self.sumDirTrainSelector, 3, 2)
gridResTrain = qt.QGridLayout()
self.trainReset = qt.QPushButton("RESET")
self.trainTrain = qt.QPushButton("TRAIN")
gridResTrain.addWidget(self.trainReset, 0, 0)
gridResTrain.addWidget(self.trainTrain, 0, 1)
self.trainingLayout.addLayout(gridLayoutTrain)
self.trainingLayout.addLayout(gridLayoutSumdir)
self.trainingLayout.addLayout(gridResTrain)
self.trainingLayout.addStretch(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONNECTIONS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
self.trainReset.connect("clicked(bool)", self.OnTrainReset)
self.trainTrain.connect("clicked(bool)", self.OnTrainTrain)
self.dataDirTrainSelector.connect("clicked(bool)", self.OnDataDirTrain)
self.modelDirTrainSelector.connect("clicked(bool)", self.OnModelDirTrain)
self.sumDirTrainSelector.connect("clicked(bool)", self.OnSumDirTrain)
self.dataDirTrain.connect("editingFinished()", self.CheckDataDirTrain)
self.modelDirTrain.connect("editingFinished()", self.CheckModelDirTrain)
self.sumDirTrain.connect("editingFinished()", self.CheckSumDirTrain)
# self.trainingWidget = widget
return
def setupClassificationTab(self):
self.classificationLayout = qt.QVBoxLayout(self.classificationTabWidget)
gridLayoutClass = qt.QGridLayout()
### Input File
self.inputClass = qt.QLineEdit("")
self.inputClassSelector = qt.QPushButton("Browse")
### Output Directory
self.outputDirClass = qt.QLineEdit("")
self.outputDirClassSelector = qt.QPushButton("Browse")
### Model Directory
self.modelDirClass = qt.QLineEdit("")
self.modelDirClassSelector = qt.QPushButton("Browse")
### Summary Directory
self.sumDirClass = qt.QLineEdit("")
self.sumDirClassSelector = qt.QPushButton("Browse")
### Displacement Field
self.dFPathClass = qt.QLineEdit("")
self.dFPathClassSelector = qt.QPushButton("Browse")
gridLayoutClass.addWidget(qt.QLabel("Input File"), 0, 0)
gridLayoutClass.addWidget(self.inputClass, 0, 1)
gridLayoutClass.addWidget(self.inputClassSelector, 0, 2)
gridLayoutClass.addWidget(qt.QLabel("Output Directory"), 1, 0)
gridLayoutClass.addWidget(self.outputDirClass, 1, 1)
gridLayoutClass.addWidget(self.outputDirClassSelector, 1, 2)
gridLayoutClass.addWidget(qt.QLabel("Model Directory"), 2, 0)
gridLayoutClass.addWidget(self.modelDirClass, 2, 1)
gridLayoutClass.addWidget(self.modelDirClassSelector, 2, 2)
gridLayoutClass.addWidget(qt.QLabel("Summary Directory"), 3, 0)
gridLayoutClass.addWidget(self.sumDirClass, 3, 1)
gridLayoutClass.addWidget(self.sumDirClassSelector, 3, 2)
gridLayoutClass.addWidget(qt.QLabel("Displacement Field"), 4, 0)
gridLayoutClass.addWidget(self.dFPathClass, 4, 1)
gridLayoutClass.addWidget(self.dFPathClassSelector, 4, 2)
self.fiberListClass = qt.QComboBox()
self.fiberListClass.addItems(self.name_labels)
gridResClass = qt.QGridLayout()
self.classReset = qt.QPushButton("RESET")
self.classRun = qt.QPushButton("RUN")
gridResClass.addWidget(self.classReset, 0, 0)
gridResClass.addWidget(self.classRun, 0, 1)
self.classificationLayout.addWidget(self.fiberListClass)
self.classificationLayout.addLayout(gridLayoutClass)
self.classificationLayout.addLayout(gridResClass)
self.classificationLayout.addStretch(1)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONNECTIONS #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
self.classReset.connect("clicked(bool)", self.OnClassReset)
self.classRun.connect("clicked(bool)", self.OnClassRun)
self.inputClassSelector.connect("clicked(bool)", self.OnInputClass)
self.outputDirClassSelector.connect("clicked(bool)", self.OnOutputDirClass)
self.modelDirClassSelector.connect("clicked(bool)", self.OnModelDirClass)
self.sumDirClassSelector.connect("clicked(bool)", self.OnSumDirClass)
self.dFPathClassSelector.connect("clicked(bool)", self.OndFClassSelector)
self.inputClass.connect("editingFinished()", self.CheckInputClass)
self.outputDirClass.connect("editingFinished()", self.CheckOutputDirClass)
self.modelDirClass.connect("editingFinished()", self.CheckModelDirClass)
self.sumDirClass.connect("editingFinished()", self.CheckSumDirClass)
self.dFPathClass.connect("editingFinished()", self.CheckdFClassPath)
# self.classificationWidget = widget
return
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
os.environ['ITK_AUTOLOAD_PATH']= ''
self.moduleName = 'TraficBi'
self.editionTabWidget = qt.QWidget()
self.trainingTabWidget = qt.QWidget()
self.classificationTabWidget = qt.QWidget()
self.tabs = qt.QTabWidget()
self.tabs.addTab(self.editionTabWidget,"Edition")
self.tabs.addTab(self.trainingTabWidget,"Training")
self.tabs.addTab(self.classificationTabWidget,"Classification")
self.layout = self.parent.layout()
self.layout.addWidget(self.tabs)
self.setupEditionTab()
self.setupClassificationTab()
self.setupTrainingTab()
def getWidget(self, objectName, index_tab=0):
if index_tab == 0:
return self.findWidget(self.editionWidget, objectName)
if index_tab == 1:
return self.findWidget(self.trainingWidget, objectName)
if index_tab == 2:
return self.findWidget(self.classificationWidget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def onDisROI(self):
if self.disROI.isChecked():
self.ROISelector.disableROISelection(True)
def onPosROI(self):
if self.posROI.isChecked():
self.ROISelector.positiveROISelection(True)
def onNegROI(self):
if self.negROI.isChecked():
self.ROISelector.negativeROISelection(True)
def onInterROI(self):
if self.interROI.isChecked():
self.ROISelector.setInteractiveROI(True)
def onShowROI(self):
self.ROISelectorDisplay.currentNode().SetDisplayVisibility(self.showROI.isChecked())
def OnOutputDirEditSelector(self):
self.OnBrowseDirectory(self.outputDirEdit)
def CheckOutputDirEdit(self):
return self.CheckBrowseDirectory(self.outputDirEdit, "Edition Output Directory")
def OndFSelector(self):
fileDialog = qt.QFileDialog()
fileDialog.setFileMode(qt.QFileDialog.ExistingFile)
fileDialog.setNameFilter("displacement field (*.nrrd)")
if fileDialog.exec_():
text = fileDialog.selectedFiles()
self.dFPath.setText(text[0])
def OndFClassSelector(self):
fileDialog = qt.QFileDialog()
fileDialog.setFileMode(qt.QFileDialog.ExistingFile)
fileDialog.setNameFilter("displacement field (*.nrrd)")
if fileDialog.exec_():
text = fileDialog.selectedFiles()
self.dFPathClass.setText(text[0])
def CheckdFClassPath(self):
if self.dFPathClass.text == "":
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Please choose a displacement field filename")
msg.exec_()
elif not os.path.isfile(self.dFPathClass.text):
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("File doesn't exist. Please correct the displacement field filename")
msg.exec_()
return False
elif self.dFPathClass.text.rfind(".nrrd") == -1:
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Invalid File Format. Must be a .nrrd file. Please correct the displacement field filename")
msg.exec_()
return False
return True
def checkdFPath(self):
if self.dFPath.text == "":
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Please choose a displacement field filename")
msg.exec_()
elif not os.path.isfile(self.dFPath.text):
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("File doesn't exist. Please correct the displacement field filename")
msg.exec_()
return False
elif self.dFPath.text.rfind(".nrrd") == -1:
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Invalid File Format. Must be a .nrrd file. Please correct the displacement field filename")
msg.exec_()
return False
return True
def OnInputClass(self):
fileDialog = qt.QFileDialog()
fileDialog.setFileMode(qt.QFileDialog.ExistingFile)
fileDialog.setNameFilter("input file (*.vtk *.vtp)")
if fileDialog.exec_():
text = fileDialog.selectedFiles()
self.inputClass.setText(text[0])
def CheckInputClass(self):
if not os.path.isfile(self.inputClass.text):
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("File doesn't exist. Please correct the input file")
msg.exec_()
return False
elif self.inputClass.text.rfind(".vtk") == -1 and self.inputClass.text.rfind(".vtp") == -1:
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Invalid File Format. Must be a .vtk or .vtp file. Please correct the input file")
msg.exec_()
return False
return True
def OnDataDirTrain(self):
self.OnBrowseDirectory(self.dataDirTrain)
def OnModelDirTrain(self):
self.OnBrowseDirectory(self.modelDirTrain)
def OnSumDirTrain(self):
self.OnBrowseDirectory(self.sumDirTrain)
def CheckDataDirTrain(self):
return self.CheckBrowseDirectory(self.dataDirTrain, "Data Directory")
def CheckModelDirTrain(self):
return self.CheckBrowseDirectory(self.modelDirTrain, "Model Directory")
def CheckSumDirTrain(self):
return self.CheckBrowseDirectory(self.sumDirTrain, "Summary Directory")
def OnOutputDirClass(self):
self.OnBrowseDirectory(self.outputDirClass)
def OnModelDirClass(self):
self.OnBrowseDirectory(self.modelDirClass)
def OnSumDirClass(self):
self.OnBrowseDirectory(self.sumDirClass)
def CheckOutputDirClass(self):
return self.CheckBrowseDirectory(self.outputDirClass, "Classification Output Directory")
def CheckModelDirClass(self):
return self.CheckBrowseDirectory(self.modelDirClass, "Model Directory")
def CheckSumDirClass(self):
return self.CheckBrowseDirectory(self.sumDirClass, "Summary Directory")
def OnBrowseDirectory(self, dir):
fileDialog = qt.QFileDialog()
fileDialog.setFileMode(qt.QFileDialog.DirectoryOnly)
if fileDialog.exec_():
text = fileDialog.selectedFiles()
dir.setText(text[0])
def CheckFiberListClass(self):
if self.fiberListClass.currentIndex == 0:
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Please select a type of fiber to classify")
msg.exec_()
return False
return True
def CheckBrowseDirectory(self, dir, name):
if dir.text=="":
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Please choose "+ str(name) + "")
msg.exec_()
return False
elif not os.path.isdir(dir.text):
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("Unknown or non valid directory. Please correct the "+ str(name))
msg.exec_()
return False
return True
# def onAccEditOn(self):
# # if self.accEditOn.isChecked():
# # # print
# # # if(self.ROISelector.fiberBundleNode()):
# # self.ROISelector.setInteractiveFiberEdit(True)
# # else:
# print "FAIL"
def onChangeCurrentNode(self):
self.posROI.setChecked(True)
self.ROISelector.setFiberBundleNode(self.inputFiber.currentNode())
self.ROISelector.setAnnotationROIMRMLNodeToFiberBundleEnvelope(self.ROISelectorDisplay.currentNode())
def onAddNode(self):
self.posROI.setChecked(True)
self.ROISelector.setAnnotationMRMLNodeForFiberSelection(self.ROISelectorDisplay.currentNode())
self.ROISelector.setAnnotationROIMRMLNodeToFiberBundleEnvelope(self.ROISelectorDisplay.currentNode())
def onSaveButton(self):
#TO DO: Save all Data and Clear after + Add a message box to confirm the save + Message to choose the dF
print "IN"
if self.checkdFPath() and self.CheckOutputDirEdit():
print "1"
currentPath = os.path.dirname(os.path.abspath(__file__))
tmp_dir = os.path.join(self.outputDirEdit.text, 'tmp_dir_save')
final_dir = os.path.join(self.outputDirEdit.text, 'Biclass')
logic = TraficBiLogic()
print "1"
logic.runSaveFiber(self.nodeNegDict, self.nodePosDict, tmp_dir)
print "1"
self.removeNodeExtracted()
# Initialization of the dictionnary
for key in self.nodeNegDict.keys():
self.nodeNegDict[key] = []
for key in self.nodePosDict.keys():
self.nodePosDict[key] = []
print "1"
logic.runPreProcess(self.dFPath.text, tmp_dir, final_dir)
print "1"
rmtree(tmp_dir)
# logic.runStore()
def OnTrainReset(self):
self.num_epochs_spinbox.setValue(1)
self.lr_spinbox.setValue(0.01)
self.sumDirTrain.text = ""
self.modelDirTrain.text = ""
self.dataDirTrain.text = ""
def OnClassReset(self):
self.sumDirClass.text = ""
self.modelDirClass.text = ""
self.outputDirClass.text = ""
self.inputClass.text = ""
self.dFPathClass.text = ""
def OnClassRun(self):
if self.CheckInputClass() and self.CheckOutputDirClass() and self.CheckModelDirClass() and self.CheckSumDirClass() and self.CheckdFClassPath() and self.CheckFiberListClass():
logic = TraficBiLogic()
logic.runClassification(self.inputClass.text, self.modelDirClass.text, self.sumDirClass.text, self.outputDirClass.text, self.dFPathClass.text, self.fiberListClass.itemText(self.fiberListClass.currentIndex))
return
def OnTrainTrain(self):
if self.CheckDataDirTrain() and self.CheckModelDirTrain() and self.CheckSumDirTrain():
logic = TraficBiLogic()
logic.runStoreAndTrain( self.dataDirTrain.text, self.modelDirTrain.text, self.lr_spinbox.value, self.num_epochs_spinbox.value, self.sumDirTrain.text )
return
def onClearButton(self):
while(self.inputFiber.currentNode() != None):
print "None"
self.inputFiber.removeCurrentNode()
self.removeNodeExtracted()
slicer.mrmlScene.Clear(0)
#TO DO: Clear all Data
return
def removeNodeExtracted(self):
negIDs = np.array(self.nodeNegDict.values())
posIDs = np.array(self.nodePosDict.values())
negIDs = [val for sublist in negIDs for val in sublist] #Flatten the list
posIDs = [val for sublist in posIDs for val in sublist] #Flatten the list
# print nodeIDs
for nodeID in posIDs:
slicer.mrmlScene.RemoveNode(slicer.mrmlScene.GetNodeByID(nodeID))
print "Remove ", nodeID
for nodeID in negIDs:
slicer.mrmlScene.RemoveNode(slicer.mrmlScene.GetNodeByID(nodeID))
print "Remove ", nodeID
def OnExtractFiber(self):
if self.classList.currentIndex == 0 or self.classList.currentIndex == 0:
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("You must choose the type and the class you want to extract from the current fiber")
msg.exec_()
elif self.disROI.isChecked():
msg = qt.QMessageBox()
msg.setIcon(3)
msg.setText("ROI is disable, please choose Positive or Negative region to extract")
msg.exec_()
else:
nameNode = self.fiberList.itemText(self.fiberList.currentIndex)
self.ROISelector.FiberBundleFromSelection.addNode()
nodeID = self.ROISelector.FiberBundleFromSelection.currentNode().GetID()
if self.classList.currentIndex == 1:
self.nodeNegDict[nameNode].append(nodeID)
numExtract = len(self.nodeNegDict[nameNode])
self.ROISelector.FiberBundleFromSelection.currentNode().SetName(nameNode+"_negative_extracted_"+str(numExtract))
elif self.classList.currentIndex == 2:
self.nodePosDict[nameNode].append(nodeID)
numExtract = len(self.nodePosDict[nameNode])
self.ROISelector.FiberBundleFromSelection.currentNode().SetName(nameNode+"_positive_extracted_"+str(numExtract))
logic = TraficBiLogic()
logic.runExtractFiber(self.ROISelector, self.posROI.isChecked(), self.negROI.isChecked())
return
#
# TraficBiLogic
#
class TraficBiLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def runExtractFiber(self, selector, pos, neg):
"""
Run the extraction algorithm
TO DO: Add some verification
"""
selector.createNewBundleFromSelection()
selector.negativeROISelection(pos) # We switch the state of the ROI Selection
selector.positiveROISelection(neg)
selector.updateBundleFromSelection()
selector.negativeROISelection(neg)
selector.positiveROISelection(pos)
def runSaveFiber(self, negDict, posDict, dir):
"""
Run the save algorithm
TO DO: Add some verification + Save through the server + Which Location ? + How to identify fibers and dF ?
"""
if not os.path.dirname(dir):
os.makedirs(dir)
logging.info('Saving Fibers')
for key in negDict.keys():
dirname = os.path.join(dir, key)
for j in xrange(len(negDict[key])):
dirnameNeg = os.path.join(dirname, 'Negative')
if not os.path.isdir(dirnameNeg):
os.makedirs(dirnameNeg)
filename = os.path.join( dirnameNeg, key+"_"+str(len(os.listdir(dirnameNeg)))+".vtk" )
node = slicer.mrmlScene.GetNodeByID(negDict[key][j])
print filename
slicer.util.saveNode(node, filename)
for h in xrange(len(posDict[key])):
dirnamePos = os.path.join(dirname, 'Positive')
if not os.path.isdir(dirnamePos):
os.makedirs(dirnamePos)
filename = os.path.join( dirnamePos, key+"_"+str(len(os.listdir(dirnamePos)))+".vtk" )
node = slicer.mrmlScene.GetNodeByID(posDict[key][h])
print filename
slicer.util.saveNode(node, filename)
logging.info('Fibers saved')
def runPreProcess(self, dF_path, save_dir, final_dir):
#TO CHANGE: LOCATION OF CLI AND VARIABLES
#
currentPath = os.path.dirname(os.path.abspath(__file__))
cli_dir = os.path.join(currentPath,"..", "..","cli-modules")
polydatatransform = os.path.join(cli_dir, "polydatatransform")
# lm_ped = "/work/dprince/PED/LandmarksPed/Arc_L_FT_bundle_clean_landmarks.fcsv"
tmp_dir = os.path.join(currentPath, "tmp_dir_lm_preprocess")
logging.info('Preprocessing started')
new_lm_path = os.path.join(tmp_dir, "lm_prepocess.fcsv")
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
# logging.info('Polydata transform')
# cmd_polydatatransform = [polydatatransform, "--invertx", "--inverty", "--fiber_file", lm_ped, "-D", dF_path, "-o", new_lm_path]
# out, err = subprocess.Popen(cmd_polydatatransform, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
# print("\nout : " + str(out))
# if err != "":
# print("\nerr : " + str(err))
logging.info('Make Dataset')
fiber_list = os.listdir(save_dir)
for _, fiber in enumerate(fiber_list):
lm_ped = os.path.join(currentPath, "Resources", "Landmarks", fiber + "_bundle_clean_landmarks.fcsv")
cmd_polydatatransform = [polydatatransform, "--invertx", "--inverty", "--fiber_file", lm_ped, "-D", dF_path, "-o", new_lm_path]
out, err = subprocess.Popen(cmd_polydatatransform, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print("\nout :\n " + str(out))
input_dir = os.path.join(save_dir, fiber)
output_dir = os.path.join(final_dir, fiber)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
os.makedirs(os.path.join(output_dir, "Negative"))
os.makedirs(os.path.join(output_dir, "Positive"))
run_make_dataset(input_dir, output_dir, landmark_file=new_lm_path)
rmtree(tmp_dir)
logging.info('Preprocessing completed')
return
def runStoreAndTrain(self, data_dir, model_dir, lr, num_epochs, sum_dir):
runMaybeEnvInstallTF()
currentPath = os.path.dirname(os.path.abspath(__file__))
env_dir = os.path.join(currentPath,"..", "..", "miniconda2")
pipeline_train_py = os.path.join(TRAFIC_LIB_DIR, "PipelineTrain.py")
cmd_py = str(pipeline_train_py) + ' --data_dir ' + str(data_dir) + ' --biclass --summary_dir ' + str(sum_dir)+ ' --checkpoint_dir ' + str(model_dir) + ' --lr ' + str(lr) + ' --num_epochs ' + str(num_epochs)
cmd_virtenv = 'ENV_DIR="'+str(env_dir)+'";'
cmd_virtenv = cmd_virtenv + 'export PYTHONPATH=$ENV_DIR/envs/env_trafic/lib/python2.7/site-packages:$ENV_DIR/lib/:$ENV_DIR/lib/python2.7/lib-dynload/:$ENV_DIR/lib/python2.7/:$ENV_DIR/lib/python2.7/site-packages/:$PYTHONPATH;'
# cmd_virtenv = cmd_virtenv + 'export PYTHONHOME=$ENV_DIR/bin/:$PYTHONHOME;'
cmd_virtenv = cmd_virtenv + 'export PATH=$ENV_DIR/bin/:$PATH;'
cmd_virtenv = cmd_virtenv + 'source activate env_trafic;'
cmd_virtenv = cmd_virtenv + 'LD_LIBRARY_PATH=$ENV_DIR/envs/env_trafic/lib/libc6_2.17/lib/:$ENV_DIR/envs/env_trafic/lib/libc6_2.17/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH $ENV_DIR/envs/env_trafic/lib/libc6_2.17/lib/x86_64-linux-gnu/ld-2.17.so `which python` '
cmd_pipeline_train = cmd_virtenv + str(cmd_py) + ';'
print(cmd_pipeline_train)
cmd = ["bash", "-c", str(cmd_pipeline_train)]
out = open(os.path.join(TRAFIC_LIB_DIR,"Logs","training_out.txt"), "wb")
err = open(os.path.join(TRAFIC_LIB_DIR,"Logs","training_err.txt"), "wb")
subprocess.Popen(cmd, stdout=out, stderr=err)
# print("\nout : " + str(out) + "\nerr : " + str(err))
return
def runClassification(self, data_file, model_dir, sum_dir, output_dir, dF_Path, name_fiber):
runMaybeEnvInstallTF()
currentPath = os.path.dirname(os.path.abspath(__file__))
env_dir = os.path.join(currentPath,"..", "..", "miniconda2")
cli_dir = os.path.join(currentPath,"..", "..","cli-modules")
polydatatransform = os.path.join(cli_dir, "polydatatransform")
lm_ped = os.path.join(currentPath, "Resources", "Landmarks", name_fiber + "_bundle_clean_landmarks.fcsv")
tmp_dir = os.path.join(currentPath, "tmp_dir_lm_class")
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
new_lm_path = os.path.join(tmp_dir, "lm_class.fcsv")
cmd_polydatatransform = [polydatatransform, "--invertx", "--inverty", "--fiber_file", lm_ped, "-D", dF_Path, "-o", new_lm_path]
out, err = subprocess.Popen(cmd_polydatatransform, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print("\nout : " + str(out))
pipeline_eval_py = os.path.join(TRAFIC_LIB_DIR, "PipelineEval.py")
cmd_py = str(pipeline_eval_py) + ' --data_file ' + str(data_file) + ' --biclass --summary_dir ' + str(sum_dir)+ ' --checkpoint_dir ' + str(model_dir) + ' --output_dir ' + str(output_dir) + ' --landmark_file ' + str(new_lm_path) + " --fiber_name "+ name_fiber
cmd_virtenv = 'ENV_DIR="'+str(env_dir)+'";'
cmd_virtenv = cmd_virtenv + 'export PYTHONPATH=$ENV_DIR/envs/env_trafic/lib/python2.7/site-packages:$ENV_DIR/lib/:$ENV_DIR/lib/python2.7/lib-dynload/:$ENV_DIR/lib/python2.7/:$ENV_DIR/lib/python2.7/site-packages/:$PYTHONPATH;'
cmd_virtenv = cmd_virtenv + 'export PATH=$ENV_DIR/bin/:$PATH;'
cmd_virtenv = cmd_virtenv + 'source activate env_trafic;'
cmd_virtenv = cmd_virtenv + 'LD_LIBRARY_PATH=$ENV_DIR/envs/env_trafic/lib/libc6_2.17/lib/:$ENV_DIR/envs/env_trafic/lib/libc6_2.17/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH $ENV_DIR/envs/env_trafic/lib/libc6_2.17/lib/x86_64-linux-gnu/ld-2.17.so `which python` '
cmd_pipeline_class = cmd_virtenv + str(cmd_py) + ';'
print(cmd_pipeline_class)
cmd = ["bash", "-c", str(cmd_pipeline_class)]
out = open(os.path.join(TRAFIC_LIB_DIR,"Logs","classification_out.txt"), "wb")
err = open(os.path.join(TRAFIC_LIB_DIR,"Logs","classification_err.txt"), "wb")
_, _ = subprocess.Popen(cmd, stdout=out, stderr=err).communicate()
# print("\nout : " + str(out) + "\nerr : " + str(err))
rmtree(tmp_dir)
# print("\nout : " + str(out) + "\nerr : " + str(err))
return
# logging.info('Processing completed')
# return True
class TraficBiTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_TraficBi1()
def test_TraficBi1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import urllib
downloads = (
('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),
)
for url,name,loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
logging.info('Loading %s...' % (name,))
loader(filePath)
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = TraficBiLogic()
self.assertTrue( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!')
|
PrinceNgattaiLam/Trafic
|
TraficBi/TrafficBi.py
|
Python
|
apache-2.0
| 39,506
|
[
"VTK"
] |
577ff142a9106927d75f034fdf12316990a13596e2a446eb890276b630203f7c
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from neon.initializers import Constant, Gaussian
from neon.layers import Conv, Dropout, Pooling, Affine, GeneralizedCost
from neon.models import Model
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti
def create_network():
# weight initialization
g1 = Gaussian(scale=0.01)
g5 = Gaussian(scale=0.005)
c0 = Constant(0)
c1 = Constant(1)
# model initialization
padding = {'pad_d': 1, 'pad_h': 1, 'pad_w': 1}
strides = {'str_d': 2, 'str_h': 2, 'str_w': 2}
layers = [
Conv((3, 3, 3, 64), padding=padding, init=g1, bias=c0, activation=Rectlin()),
Pooling((1, 2, 2), strides={'str_d': 1, 'str_h': 2, 'str_w': 2}),
Conv((3, 3, 3, 128), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Conv((3, 3, 3, 256), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Conv((3, 3, 3, 256), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Conv((3, 3, 3, 256), padding=padding, init=g1, bias=c1, activation=Rectlin()),
Pooling((2, 2, 2), strides=strides),
Affine(nout=2048, init=g5, bias=c1, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=2048, init=g5, bias=c1, activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=101, init=g1, bias=c0, activation=Softmax())
]
return Model(layers=layers), GeneralizedCost(costfunc=CrossEntropyMulti())
|
NervanaSystems/neon
|
examples/video-c3d/network.py
|
Python
|
apache-2.0
| 2,289
|
[
"Gaussian"
] |
e95a76d3de88dadbcf326866ad1a11160d2a166a166f7f8bdea8524409a0d547
|
"""Polynomial factorization routines in characteristic zero. """
from __future__ import print_function, division
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dmp_from_dict,
dmp_zero_p,
dmp_one,
dmp_nest, dmp_raise,
dup_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dmp_pow,
dup_div, dmp_div,
dup_quo, dmp_quo,
dmp_expand,
dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets
from math import ceil as _ceil, log as _log
from sympy.core.compatibility import xrange
def dup_trial_division(f, factors, K):
"""Determine multiplicities of factors using trial division. """
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dmp_trial_division(f, factors, u, K):
"""Determine multiplicities of factors using trial division. """
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(K(n + 1))*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n + 1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f == g*h (mod m)
s*g + t*h == 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) == 1
deg(f) == deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f == G*H (mod m**2)
S*G + T**H == 1 (mod m**2)
References
==========
1. [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
1. [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(_ceil(_log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k + 1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d + 1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
def _test_pl(fc, q, pl):
if q > pl // 2:
q = q - pl
if not q:
return True
return fc % q == 0
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
fc = f[-1]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n + 1))*2**n*A*b))
C = int((n + 1)**(2*n)*A**(2*n - 1))
gamma = int(_ceil(2*_log(C, 2)))
bound = int(2*gamma*_log(gamma))
a = []
# choose a prime number `p` such that `f` be square free in Z_p
# if there are many factors in Z_p, choose among a few different `p`
# the one with fewer factors
for px in xrange(3, bound + 1):
if not isprime(px) or b % px == 0:
continue
px = K.convert(px)
F = gf_from_int_poly(f, px)
if not gf_sqf_p(F, px, K):
continue
fsqfx = gf_factor_sqf(F, px, K)[1]
a.append((px, fsqfx))
if len(fsqfx) < 15 or len(a) > 4:
break
p, fsqf = min(a, key=lambda x: len(x[1]))
l = int(_ceil(_log(2*B + 1, p)))
modular = [gf_to_int_poly(ff, p) for ff in fsqf]
g = dup_zz_hensel_lift(p, f, modular, l, K)
sorted_T = range(len(g))
T = set(sorted_T)
factors, s = [], 1
pl = p**l
while 2*s <= len(T):
for S in subsets(sorted_T, s):
# lift the constant coefficient of the product `G` of the factors
# in the subset `S`; if it is does not divide `fc`, `G` does
# not divide the input polynomial
if b == 1:
q = 1
for i in S:
q = q*g[i][-1]
q = q % pl
if not _test_pl(fc, q, pl):
continue
else:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
G1 = dup_primitive(G, K)[1]
q = G1[-1]
if q and fc % q != 0:
continue
H = [b]
S = set(S)
T_S = T - S
if b == 1:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
for i in T_S:
H = dup_mul(H, g[i], K)
H = dup_trunc(H, pl, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T_S
sorted_T = [i for i in sorted_T if i not in S]
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.keys():
if (lc % p) and (tc % p**2):
return True
def dup_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(f)
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(g)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in xrange(n, -1, -2):
g.insert(0, f[i])
for i in xrange(n - 1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K):
return True
return False
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polnomial. """
h = [K.one, -K.one]
for p, k in factorint(n).items():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k - 1), K)
return h
def _dup_cyclotomic_decompose(n, K):
H = [[K.one, -K.one]]
for p, k in factorint(n).items():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in xrange(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
1. [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any(bool(cf) for cf in f[1:-1]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polyomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [g]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [g]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_factor(2*x**4 - 2)
(2, [(x - 1, 1), (x + 1, 1), (x**2 + 1, 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
1. [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H = None
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
factors = dup_trial_division(f, H, K)
return cont, factors
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u - 1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u - 1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u - 1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(xrange(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k + 1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any(not j for j in J):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H) - 1), u, K)
return f, HHH, CCC
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
T = dup_zz_diophantine(F, n - i, p, K)
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in K.map(xrange(0, d)):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k + 1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k + 1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u - 1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n - i, u - i, K)
S.insert(0, dmp_ground_trunc(s, p, v - i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(xrange(2, n + 2), S, A):
G, w = list(H), j - 1
I, J = A[:j - 2], A[j - 1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w - 1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w - 1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in K.map(xrange(0, dj)):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k + 1, a, w, w, K)
if not dmp_zero_p(C, w - 1):
C = dmp_quo_ground(C, K.factorial(k + 1), w - 1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w - 1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w - 1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
def dmp_zz_wang(f, u, K, mod=None, seed=None):
"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is
primitive and square-free in `x_1`, computes factorization of `f` into
irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate polynomial
in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers. The
mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`,
which can be factored efficiently using Zassenhaus algorithm. The last
step is to lift univariate factors to obtain true multivariate
factors. For this purpose a parallel Hensel lifting procedure is used.
The parameter ``seed`` is passed to _randint and can be used to seed randint
(when an integer) or (for testing purposes) can be a sequence of numbers.
References
==========
1. [Wang78]_
2. [Geddes92]_
"""
from sympy.utilities.randtest import _randint
randint = _randint(seed)
ct, T = dmp_zz_factor(dmp_LC(f, K), u - 1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in xrange(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in xrange(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
orig_f = f
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(orig_f, u, K, mod + 1)
else:
raise ExtraneousFactors(
"we need to restart algorithm with better parameters")
negative, result = 0, []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
def dmp_zz_factor(f, u, K):
"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_zz_factor(2*x**2 - 2*y**2)
(2, [(x - y, 1), (x + y, 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
1. [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all(d <= 0 for d in dmp_degree_list(g, u)):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
factors = dmp_trial_division(f, H, u, K)
for g, k in dmp_zz_factor(G, u - 1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all(d <= 0 for d in dmp_degree_list(f, u)):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
coeff, factors = lc, [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise NotImplementedError('multivariate polynomials over finite fields')
def dup_factor_list(f, K0):
"""Factor polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
if K0.is_FiniteField:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.has_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.has_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
if K0_inexact is None:
coeff = coeff/denom
else:
for i, (f, k) in enumerate(factors):
f = dup_quo_ground(f, denom, K0)
f = dup_convert(f, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
def dmp_factor_list(f, u, K0):
"""Factor polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
if K0.is_FiniteField: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.has_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.has_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
if K0_inexact is None:
coeff = coeff/denom
else:
for i, (f, k) in enumerate(factors):
f = dmp_quo_ground(f, denom, u, K0)
f = dmp_convert(f, u, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u - i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff, _sort_factors(factors)
def dmp_factor_list_include(f, u, K):
"""Factor polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
def dup_irreducible_p(f, K):
"""Returns ``True`` if ``f`` has no factors over its domain. """
return dmp_irreducible_p(f, 0, K)
def dmp_irreducible_p(f, u, K):
"""Returns ``True`` if ``f`` has no factors over its domain. """
_, factors = dmp_factor_list(f, u, K)
if not factors:
return True
elif len(factors) > 1:
return False
else:
_, k = factors[0]
return k == 1
|
kmacinnis/sympy
|
sympy/polys/factortools.py
|
Python
|
bsd-3-clause
| 33,837
|
[
"Gaussian"
] |
e6b38eba3f5cc6cd3d1a9544905511cf32d780203e9cee4504e21c6d8fabdc9d
|
# -*- coding: utf-8 -*-
import datetime
from lettuce import *
from django.utils.datastructures import SortedDict
from rapidsms.contrib.locations.models import *
from survey.features.page_objects.aggregates import AggregateStatusPage, DownloadExcelPage, InvestigatorReportPage
from survey.features.page_objects.survey_completion_rates import SurveyCompletionRatesPage
from survey.models import Survey, EnumerationArea, HouseholdMemberGroup
from survey.models.batch import Batch
from survey.models.households import Household, HouseholdMember
from survey.models.investigator import Investigator
from survey import investigator_configs
@step(u'And I have 2 batches with one open')
def and_i_have_2_batches_with_one_open(step):
world.batch_1 = Batch.objects.create(
order=1, name="Batch A", survey=world.survey_1)
world.batch_2 = Batch.objects.create(
order=2, name="Batch B", survey=world.survey_2)
world.kampala_county = Location.objects.get(name="Kampala County")
world.someother_county = Location.objects.create(
name="Some County", tree_parent=world.kampala_county.tree_parent)
world.batch_1.open_for_location(world.kampala_county.tree_parent)
@step(u'And I have eas in the lowest location')
def and_i_have_eas_in_the_lowest_location(step):
world.ea = EnumerationArea.objects.create(name="EA", survey=world.survey_1)
world.ea.locations.add(world.kampala_village)
@step(u'And one household has completed that open batch')
def and_one_household_has_completed_that_open_batch(step):
world.household_1.completed_batches.get_or_create(batch=world.batch_1)
@step(u'And I visit aggregate status page')
def and_i_visit_aggregate_status_page(step):
world.page = AggregateStatusPage(world.browser)
world.page.visit()
@step(u'Then I should see an option to select location hierarchically')
def then_i_should_see_an_option_to_select_location_hierarchically(step):
world.page.choose_location(
{'district': 'Kampala', 'county': 'Kampala County'})
@step(u'And I should see an option to select batch')
def and_i_should_see_an_option_to_select_batch(step):
world.page.check_if_batches_present([world.batch_1])
@step(u'And I should see a get status button')
def and_i_should_see_a_get_status_button(step):
world.page.check_get_status_button_presence()
@step(u'And I have 2 investigators with households')
def and_i_have_2_investigators_with_households(step):
investigator = Investigator.objects.create(
name="Rajini", mobile_number="123", location=world.kampala_county)
investigator_2 = Investigator.objects.create(
name="Batman", mobile_number="1234", location=world.someother_county)
uid_counter = 0
for index in range(
investigator_configs.NUMBER_OF_HOUSEHOLD_PER_INVESTIGATOR):
Household.objects.create(
investigator=investigator, uid=uid_counter + index)
Household.objects.create(
investigator=investigator_2, uid=uid_counter + 1 + index)
uid_counter = uid_counter + 2
world.investigator = investigator
world.investigator_2 = investigator_2
@step(u'And I choose a location and an open batch')
def and_i_choose_a_location_and_an_open_batch(step):
locations = SortedDict()
locations['district'] = 'Kampala'
locations['county'] = 'Kampala County'
world.page.choose_location(locations)
world.page.choose_batch(world.batch_1)
@step(u'And I change my mind to select all districts')
def and_i_change_my_mind_to_select_all_districts(step):
world.page.select_all_district()
@step(u'And I click get status button')
def and_i_click_get_status_button(step):
world.page.submit()
@step(u'And I should see all districts as location selected')
def and_i_should_see_all_districts_location_selected(step):
world.page.see_all_districts_location_selected()
@step(u'Then I should see number of households and clusters completed and pending')
def then_i_should_see_number_of_households_and_clusters_completed_and_pending(
step):
world.page.assert_status_count(
pending_households=20,
completed_housesholds=0,
pending_clusters=2,
completed_clusters=0)
@step(u'And I should see a list of investigators with corresponding phone numbers and pending households')
def and_i_should_see_a_list_of_investigators_with_corresponding_phone_numbers_and_pending_households(
step):
world.page.check_presence_of_investigators(
world.investigator, world.investigator_2)
@step(u'And I choose a location and a closed batch')
def and_i_choose_a_location_and_a_closed_batch(step):
world.page.choose_location({'district': 'Kampala'})
world.page.choose_batch(world.batch_2)
@step(u'And I should see a message that says that this batch is currently closed')
def and_i_should_see_a_message_that_says_that_this_batch_is_currently_closed(
step):
world.page.assert_presence_of_batch_is_closed_message()
@step(u'And I visit download excel page')
def and_i_visit_download_excel_page(step):
world.page = DownloadExcelPage(world.browser)
world.page.visit()
@step(u'And I visit district aggregate page')
def and_i_visit_district_aggregate_page(step):
world.page = SurveyCompletionRatesPage(world.browser)
world.page.visit()
@step(u'Then I should see a table for completion rates')
def then_i_should_see_a_table_for_completion_rates(step):
world.page.see_completion_rates_table()
@step(u'And I should see descendants in the table')
def and_i_should_see_descendants_in_the_table(step):
world.page.is_text_present(world.kampala_subcounty.name)
@step(u'When I click on descendant name')
def when_i_click_on_descendant_name(step):
world.page.click_link_by_text(world.kampala_subcounty.name)
@step(u'Then I should see status page for that location')
def then_i_should_see_status_page_for_that_location(step):
world.page.see_completion_rates_table()
world.page.is_text_present(world.kampala_parish.name)
@step(u'And I choose ea and an open batch')
def and_i_choose_ea_and_an_open_batch(step):
locations = SortedDict()
locations['district'] = world.kampala_district.name
locations['county'] = world.kampala_county.name
locations['subcounty'] = world.kampala_subcounty.name
locations['parish'] = world.kampala_parish.name
world.page.choose_location(locations)
world.page.choose_batch(world.batch_1)
world.page.choose_ea(world.ea)
@step(u'Then I should see a table for household completion rates')
def then_i_should_see_a_table_for_household_completion_rates(step):
world.page.see_houdehold_completion_table()
@step(u'And I should see household details text')
def and_i_should_see_household_details_text(step):
world.page.is_text_present(
"Survey Completion by household in %s EA" % world.ea.name)
world.page.is_text_present("%s" % world.household_1.uid)
world.page.is_text_present(
"%s" % world.household_1.household_member.all().count())
@step(u'And I should see investigator details text')
def and_i_should_see_investigator_details_text(step):
world.page.is_text_present('Investigator: %s(%s)' % (
world.investigator.name, world.investigator.mobile_number))
@step(u'And I have an investigator and households')
def and_i_have_an_investigator_and_households(step):
world.batch = Batch.objects.create(survey=world.survey_1, name="Haha")
world.investigator = Investigator.objects.create(
name="some_investigator", mobile_number="123456784", ea=world.ea)
world.household_1 = Household.objects.create(
investigator=world.investigator,
uid=101,
ea=world.ea,
survey=world.survey_1)
world.household_2 = Household.objects.create(
investigator=world.investigator,
uid=102,
ea=world.ea,
survey=world.survey_1)
world.member_2 = HouseholdMember.objects.create(
household=world.household_2, date_of_birth=datetime.datetime(2000, 0o2, 0o2))
@step(u'And I should see percent completion')
def and_i_should_see_percent_completion(step):
world.page.is_text_present('Percent Completion: 50')
@step(u'And I have 2 surveys with one batch each')
def and_i_have_2_surveys_with_one_batch_each(step):
world.batch_1 = Batch.objects.create(
name='batch1', order=1, survey=world.survey_1)
world.batch_2 = Batch.objects.create(
name='batch2', order=1, survey=world.survey_2)
@step(u'When I select survey 2 from survey list')
def when_i_select_survey_2_from_survey_list(step):
world.page.select('survey', [world.survey_2.id])
@step(u'Then I should see batch2 in batch list')
def then_i_should_see_batch2_in_batch_list(step):
world.page.see_select_option([world.batch_2.name], 'batch')
@step(u'And I should not see batch1 in batch list')
def and_i_should_not_see_batch1_in_batch_list(step):
world.page.option_not_present([world.batch_1.name], 'batch')
@step(u'When I select survey 1 from survey list')
def when_i_select_survey_1_from_survey_list(step):
world.page.select('survey', [world.survey_1.id])
@step(u'Then I should see batch1 in batch list')
def then_i_should_see_batch1_in_batch_list(step):
world.page.see_select_option([world.batch_1.name], 'batch')
@step(u'And I should not see batch2 in batch list')
def and_i_should_not_see_batch2_in_batch_list(step):
world.page.option_not_present([world.batch_2.name], 'batch')
@step(u'And I should see title message')
def and_i_should_see_title_message(step):
world.page.is_text_present('Survey Completion by Region/District')
@step(u'When I visit investigator report page')
def when_i_visit_investigator_report_page(step):
world.page = InvestigatorReportPage(world.browser)
world.page.visit()
@step(u'Then I should see title-text message')
def then_i_should_see_title_text_message(step):
world.page.is_text_present(
'Choose survey to get investigators who completed the survey')
@step(u'And I should see dropdown with two surveys')
def and_i_should_see_dropdown_with_two_surveys(step):
world.page.see_select_option(
[world.survey_1.name, world.survey_2.name], 'survey')
@step(u'And I should see generate report button')
def and_i_should_see_generate_report_button(step):
assert world.browser.find_by_css(
"#download-investigator-form")[0].find_by_tag('button')[0].text == "Generate Report"
@step(u'And I have 100 locations')
def and_i_have_100_locations(step):
country = LocationType.objects.create(name="Country", slug="country")
district = LocationType.objects.create(name="District", slug="district")
world.uganda = Location.objects.create(name="uganda", type=country)
for i in xrange(100):
Location.objects.create(name="name" + str(i),
tree_parent=world.uganda, type=district)
@step(u'Then I should see district completion table paginated')
def then_i_should_see_district_completion_table_paginated(step):
world.page.validate_pagination()
@step(u'And I have one batch open in those locations')
def and_i_have_one_batch_open_in_those_locations(step):
world.batch_12 = Batch.objects.create(
order=12, name="Batch A", survey=world.survey_1)
world.batch_12.open_for_location(world.uganda)
@step(u'When I select one of the survey')
def when_i_select_one_of_the_survey(step):
world.page.see_select_option(
[world.survey_1.name, world.survey_2.name], 'survey')
@step(u'Then I should batches in that survey')
def then_i_should_batches_in_that_survey(step):
world.page.validate_select_option(world.batch_1)
@step(u'And I click generate report button')
def and_i_click_generate_report_button(step):
world.page.find_by_css("#generate_report", "Generate Report")
@step(u'And I have three surveys')
def and_i_have_three_surveys(step):
world.survey_1 = Survey.objects.create(name="Haha Survey")
world.survey_2 = Survey.objects.create(name="Hoho Survey")
@step(u'And I have batches in those surveys')
def and_i_have_batches_in_those_surveys(step):
world.batch_1 = Batch.objects.create(
order=1, name="Batch A haha", survey=world.survey_1)
world.batch_2 = Batch.objects.create(
order=2, name="Batch A hoho", survey=world.survey_2)
@step(u'Then I should only see the batches in that survey')
def then_i_should_only_see_the_batches_in_that_survey(step):
world.page.see_select_option(['All', str(world.batch_2.name)], 'batch')
@step(u'When I choose a batch in that survey')
def when_i_choose_a_batch_in_that_survey(step):
world.page.select('batch', [world.batch_2.id])
@step(u'Then I should be able to export the responses for that batch')
def then_i_should_be_able_to_export_the_responses_for_that_batch(step):
world.page.find_by_css("#export_excel", "Export to spreadsheet")
@step(u'When I select one of the two surveys')
def when_i_select_one_of_the_two_surveys(step):
world.page.select('survey', [str(world.survey_2.id)])
@step(u'And I have general member group')
def and_i_have_general_member_group(step):
HouseholdMemberGroup.objects.create(order=1, name="GENERAL")
|
unicefuganda/uSurvey
|
survey/features/aggregates-steps.py
|
Python
|
bsd-3-clause
| 13,171
|
[
"VisIt"
] |
864f3a5e4e008e6c630146e67581c71203fcf315551347f9efd13fbacdc085ba
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkTreeFieldAggregator(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkTreeFieldAggregator(), 'Processing.',
('vtkTree',), ('vtkTree',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkTreeFieldAggregator.py
|
Python
|
bsd-3-clause
| 491
|
[
"VTK"
] |
8a241fe8fa0dbbb03f67a6f3d9752fbbc1b315375a6effe92ff200691684302a
|
"""
Sequence classes
"""
import gzip
import json
import logging
import os
import re
import string
from cgi import escape
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.util.checkers import is_gzip
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import get_headers
from galaxy.datatypes.util.image_util import check_image_type
from galaxy.util import nice_size
from . import data
import bx.align.maf
log = logging.getLogger(__name__)
class SequenceSplitLocations( data.Text ):
"""
Class storing information about a sequence file composed of multiple gzip files concatenated as
one OR an uncompressed file. In the GZIP case, each sub-file's location is stored in start and end.
The format of the file is JSON::
{ "sections" : [
{ "start" : "x", "end" : "y", "sequences" : "z" },
...
]}
"""
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
try:
parsed_data = json.load(open(dataset.file_name))
# dataset.peek = json.dumps(data, sort_keys=True, indent=4)
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
dataset.blurb = '%d sections' % len(parsed_data['sections'])
except Exception:
dataset.peek = 'Not FQTOC file'
dataset.blurb = 'Not FQTOC file'
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
file_ext = "fqtoc"
def sniff( self, filename ):
if os.path.getsize(filename) < 50000:
try:
data = json.load(open(filename))
sections = data['sections']
for section in sections:
if 'start' not in section or 'end' not in section or 'sequences' not in section:
return False
return True
except:
pass
return False
class Sequence( data.Text ):
"""Class describing a sequence"""
"""Add metadata elements"""
MetadataElement( name="sequences", default=0, desc="Number of sequences", readonly=True, visible=False, optional=True, no_value=0 )
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines in dataset.
"""
data_lines = 0
sequences = 0
for line in file( dataset.file_name ):
line = line.strip()
if line and line.startswith( '#' ):
# We don't count comment lines for sequence data types
continue
if line and line.startswith( '>' ):
sequences += 1
data_lines += 1
else:
data_lines += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if dataset.metadata.sequences:
dataset.blurb = "%s sequences" % util.commaify( str( dataset.metadata.sequences ) )
else:
dataset.blurb = nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def get_sequences_per_file(total_sequences, split_params):
if split_params['split_mode'] == 'number_of_parts':
# legacy basic mode - split into a specified number of parts
parts = int(split_params['split_size'])
sequences_per_file = [total_sequences / parts for i in range(parts)]
for i in range(total_sequences % parts):
sequences_per_file[i] += 1
elif split_params['split_mode'] == 'to_size':
# loop through the sections and calculate the number of sequences
chunk_size = long(split_params['split_size'])
rem = total_sequences % chunk_size
sequences_per_file = [chunk_size for i in range(total_sequences / chunk_size)]
# TODO: Should we invest the time in a better way to handle small remainders?
if rem > 0:
sequences_per_file.append(rem)
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
return sequences_per_file
get_sequences_per_file = staticmethod(get_sequences_per_file)
def do_slow_split( cls, input_datasets, subdir_generator_function, split_params):
# count the sequences so we can split
# TODO: if metadata is present, take the number of lines / 4
if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences is not None:
total_sequences = input_datasets[0].metadata.sequences
else:
input_file = input_datasets[0].file_name
compress = is_gzip(input_file)
if compress:
# gzip is really slow before python 2.7!
in_file = gzip.GzipFile(input_file, 'r')
else:
# TODO
# if a file is not compressed, seek locations can be calculated and stored
# ideally, this would be done in metadata
# TODO
# Add BufferedReader if python 2.7?
in_file = open(input_file, 'rt')
total_sequences = long(0)
for i, line in enumerate(in_file):
total_sequences += 1
in_file.close()
total_sequences /= 4
sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params)
return cls.write_split_files(input_datasets, None, subdir_generator_function, sequences_per_file)
do_slow_split = classmethod(do_slow_split)
def do_fast_split( cls, input_datasets, toc_file_datasets, subdir_generator_function, split_params):
data = json.load(open(toc_file_datasets[0].file_name))
sections = data['sections']
total_sequences = long(0)
for section in sections:
total_sequences += long(section['sequences'])
sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params)
return cls.write_split_files(input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file)
do_fast_split = classmethod(do_fast_split)
def write_split_files(cls, input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file):
directories = []
def get_subdir(idx):
if idx < len(directories):
return directories[idx]
dir = subdir_generator_function()
directories.append(dir)
return dir
# we know how many splits and how many sequences in each. What remains is to write out instructions for the
# splitting of all the input files. To decouple the format of those instructions from this code, the exact format of
# those instructions is delegated to scripts
start_sequence = 0
for part_no in range(len(sequences_per_file)):
dir = get_subdir(part_no)
for ds_no in range(len(input_datasets)):
ds = input_datasets[ds_no]
base_name = os.path.basename(ds.file_name)
part_path = os.path.join(dir, base_name)
split_data = dict(class_name='%s.%s' % (cls.__module__, cls.__name__),
output_name=part_path,
input_name=ds.file_name,
args=dict(start_sequence=start_sequence, num_sequences=sequences_per_file[part_no]))
if toc_file_datasets is not None:
toc = toc_file_datasets[ds_no]
split_data['args']['toc_file'] = toc.file_name
f = open(os.path.join(dir, 'split_info_%s.json' % base_name), 'w')
json.dump(split_data, f)
f.close()
start_sequence += sequences_per_file[part_no]
return directories
write_split_files = classmethod(write_split_files)
def split( cls, input_datasets, subdir_generator_function, split_params):
"""Split a generic sequence file (not sensible or possible, see subclasses)."""
if split_params is None:
return None
raise NotImplementedError("Can't split generic sequence files")
def get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count):
"""
Uses a Table of Contents dict, parsed from an FQTOC file, to come up with a set of
shell commands that will extract the parts necessary
>>> three_sections=[dict(start=0, end=74, sequences=10), dict(start=74, end=148, sequences=10), dict(start=148, end=148+76, sequences=10)]
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=10)
['dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=1, sequence_count=5)
['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +5 2> /dev/null) | head -20 | gzip -c >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=20)
['dd bs=1 skip=0 count=148 if=./input.gz 2> /dev/null >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=10)
['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', '(dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=10, sequence_count=10)
['dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz']
>>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=20)
['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', 'dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz', '(dd bs=1 skip=148 count=76 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz']
"""
sections = toc_file['sections']
result = []
current_sequence = long(0)
i = 0
# skip to the section that contains my starting sequence
while i < len(sections) and start_sequence >= current_sequence + long(sections[i]['sequences']):
current_sequence += long(sections[i]['sequences'])
i += 1
if i == len(sections): # bad input data!
raise Exception('No FQTOC section contains starting sequence %s' % start_sequence)
# These two variables act as an accumulator for consecutive entire blocks that
# can be copied verbatim (without decompressing)
start_chunk = long(-1)
end_chunk = long(-1)
copy_chunk_cmd = 'dd bs=1 skip=%s count=%s if=%s 2> /dev/null >> %s'
while sequence_count > 0 and i < len(sections):
# we need to extract partial data. So, find the byte offsets of the chunks that contain the data we need
# use a combination of dd (to pull just the right sections out) tail (to skip lines) and head (to get the
# right number of lines
sequences = long(sections[i]['sequences'])
skip_sequences = start_sequence - current_sequence
sequences_to_extract = min(sequence_count, sequences - skip_sequences)
start_copy = long(sections[i]['start'])
end_copy = long(sections[i]['end'])
if sequences_to_extract < sequences:
if start_chunk > -1:
result.append(copy_chunk_cmd % (start_chunk, end_chunk - start_chunk, input_name, output_name))
start_chunk = -1
# extract, unzip, trim, recompress
result.append('(dd bs=1 skip=%s count=%s if=%s 2> /dev/null )| zcat | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c >> %s' %
(start_copy, end_copy - start_copy, input_name, skip_sequences * 4 + 1, sequences_to_extract * 4, output_name))
else: # whole section - add it to the start_chunk/end_chunk accumulator
if start_chunk == -1:
start_chunk = start_copy
end_chunk = end_copy
sequence_count -= sequences_to_extract
start_sequence += sequences_to_extract
current_sequence += sequences
i += 1
if start_chunk > -1:
result.append(copy_chunk_cmd % (start_chunk, end_chunk - start_chunk, input_name, output_name))
if sequence_count > 0:
raise Exception('%s sequences not found in file' % sequence_count)
return result
get_split_commands_with_toc = staticmethod(get_split_commands_with_toc)
def get_split_commands_sequential(is_compressed, input_name, output_name, start_sequence, sequence_count):
"""
Does a brain-dead sequential scan & extract of certain sequences
>>> Sequence.get_split_commands_sequential(True, './input.gz', './output.gz', start_sequence=0, sequence_count=10)
['zcat "./input.gz" | ( tail -n +1 2> /dev/null) | head -40 | gzip -c > "./output.gz"']
>>> Sequence.get_split_commands_sequential(False, './input.fastq', './output.fastq', start_sequence=10, sequence_count=10)
['tail -n +41 "./input.fastq" 2> /dev/null | head -40 > "./output.fastq"']
"""
start_line = start_sequence * 4
line_count = sequence_count * 4
# TODO: verify that tail can handle 64-bit numbers
if is_compressed:
cmd = 'zcat "%s" | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c' % (input_name, start_line + 1, line_count)
else:
cmd = 'tail -n +%s "%s" 2> /dev/null | head -%s' % (start_line + 1, input_name, line_count)
cmd += ' > "%s"' % output_name
return [cmd]
get_split_commands_sequential = staticmethod(get_split_commands_sequential)
class Alignment( data.Text ):
"""Class describing an alignment"""
"""Add metadata elements"""
MetadataElement( name="species", desc="Species", default=[], param=metadata.SelectParameter, multiple=True, readonly=True, no_value=None )
def split( cls, input_datasets, subdir_generator_function, split_params):
"""Split a generic alignment file (not sensible or possible, see subclasses)."""
if split_params is None:
return None
raise NotImplementedError("Can't split generic alignment files")
class Fasta( Sequence ):
"""Class representing a FASTA sequence"""
edam_format = "format_1929"
file_ext = "fasta"
def sniff( self, filename ):
"""
Determines whether the file is in fasta format
A sequence in FASTA format consists of a single-line description, followed by lines of sequence data.
The first character of the description line is a greater-than (">") symbol in the first column.
All lines should be shorter than 80 characters
For complete details see http://www.ncbi.nlm.nih.gov/blast/fasta.shtml
Rules for sniffing as True:
We don't care about line length (other than empty lines).
The first non-empty line must start with '>' and the Very Next line.strip() must have sequence data and not be a header.
'sequence data' here is loosely defined as non-empty lines which do not start with '>'
This will cause Color Space FASTA (csfasta) to be detected as True (they are, after all, still FASTA files - they have a header line followed by sequence data)
Previously this method did some checking to determine if the sequence data had integers (presumably to differentiate between fasta and csfasta)
This should be done through sniff order, where csfasta (currently has a null sniff function) is detected for first (stricter definition) followed sometime after by fasta
We will only check that the first purported sequence is correctly formatted.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'sequence.maf' )
>>> Fasta().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.fasta' )
>>> Fasta().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break # EOF
line = line.strip()
if line: # first non-empty line
if line.startswith( '>' ):
# The next line.strip() must not be '', nor startwith '>'
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
# If there is a third line, and it isn't a header line, it may not contain chars like '()[].' otherwise it's most likely a DotBracket file
line = fh.readline()
if not line.startswith('>') and re.search("[\(\)\[\]\.]", line):
break
return True
else:
break # we found a non-empty line, but it's not a fasta header
fh.close()
except:
pass
return False
def split(cls, input_datasets, subdir_generator_function, split_params):
"""Split a FASTA file sequence by sequence.
Note that even if split_mode="number_of_parts", the actual number of
sub-files produced may not match that requested by split_size.
If split_mode="to_size" then split_size is treated as the number of
FASTA records to put in each sub-file (not size in bytes).
"""
if split_params is None:
return
if len(input_datasets) > 1:
raise Exception("FASTA file splitting does not support multiple files")
input_file = input_datasets[0].file_name
# Counting chunk size as number of sequences.
if 'split_mode' not in split_params:
raise Exception('Tool does not define a split mode')
elif split_params['split_mode'] == 'number_of_parts':
split_size = int(split_params['split_size'])
log.debug("Split %s into %i parts..." % (input_file, split_size))
# if split_mode = number_of_parts, and split_size = 10, and
# we know the number of sequences (say 1234), then divide by
# by ten, giving ten files of approx 123 sequences each.
if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences:
# Galaxy has already counted/estimated the number
batch_size = 1 + input_datasets[0].metadata.sequences // split_size
cls._count_split(input_file, batch_size, subdir_generator_function)
else:
# OK, if Galaxy hasn't counted them, it may be a big file.
# We're not going to count the records which would be slow
# and a waste of disk IO time - instead we'll split using
# the file size.
chunk_size = os.path.getsize(input_file) // split_size
cls._size_split(input_file, chunk_size, subdir_generator_function)
elif split_params['split_mode'] == 'to_size':
# Split the input file into as many sub-files as required,
# each containing to_size many sequences
batch_size = int(split_params['split_size'])
log.debug("Split %s into batches of %i records..." % (input_file, batch_size))
cls._count_split(input_file, batch_size, subdir_generator_function)
else:
raise Exception('Unsupported split mode %s' % split_params['split_mode'])
split = classmethod(split)
def _size_split(cls, input_file, chunk_size, subdir_generator_function):
"""Split a FASTA file into chunks based on size on disk.
This does of course preserve complete records - it only splits at the
start of a new FASTQ sequence record.
"""
log.debug("Attemping to split FASTA file %s into chunks of %i bytes" % (input_file, chunk_size))
f = open(input_file, "rU")
part_file = None
try:
# Note if the input FASTA file has no sequences, we will
# produce just one sub-file which will be a copy of it.
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
start_offset = 0
while True:
offset = f.tell()
line = f.readline()
if not line:
break
if line[0] == ">" and offset - start_offset >= chunk_size:
# Start a new sub-file
part_file.close()
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
start_offset = f.tell()
part_file.write(line)
except Exception as e:
log.error('Unable to size split FASTA file: %s' % str(e))
f.close()
if part_file is not None:
part_file.close()
raise
f.close()
_size_split = classmethod(_size_split)
def _count_split(cls, input_file, chunk_size, subdir_generator_function):
"""Split a FASTA file into chunks based on counting records."""
log.debug("Attemping to split FASTA file %s into chunks of %i sequences" % (input_file, chunk_size))
f = open(input_file, "rU")
part_file = None
try:
# Note if the input FASTA file has no sequences, we will
# produce just one sub-file which will be a copy of it.
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
rec_count = 0
while True:
line = f.readline()
if not line:
break
if line[0] == ">":
rec_count += 1
if rec_count > chunk_size:
# Start a new sub-file
part_file.close()
part_dir = subdir_generator_function()
part_path = os.path.join(part_dir, os.path.basename(input_file))
part_file = open(part_path, 'w')
log.debug("Writing %s part to %s" % (input_file, part_path))
rec_count = 1
part_file.write(line)
part_file.close()
except Exception as e:
log.error('Unable to count split FASTA file: %s' % str(e))
f.close()
if part_file is not None:
part_file.close()
raise
f.close()
_count_split = classmethod(_count_split)
class csFasta( Sequence ):
""" Class representing the SOLID Color-Space sequence ( csfasta ) """
edam_format = "format_1929"
file_ext = "csfasta"
def sniff( self, filename ):
"""
Color-space sequence:
>2_15_85_F3
T213021013012303002332212012112221222112212222
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'sequence.fasta' )
>>> csFasta().sniff( fname )
False
>>> fname = get_test_fname( 'sequence.csfasta' )
>>> csFasta().sniff( fname )
True
"""
try:
fh = open( filename )
while True:
line = fh.readline()
if not line:
break # EOF
line = line.strip()
if line and not line.startswith( '#' ): # first non-empty non-comment line
if line.startswith( '>' ):
line = fh.readline().strip()
if line == '' or line.startswith( '>' ):
break
elif line[0] not in string.ascii_uppercase:
return False
elif len( line ) > 1 and not re.search( '^[\d.]+$', line[1:] ):
return False
return True
else:
break # we found a non-empty line, but it's not a header
fh.close()
except:
pass
return False
def set_meta( self, dataset, **kwd ):
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
dataset.metadata.data_lines = None
dataset.metadata.sequences = None
return
return Sequence.set_meta( self, dataset, **kwd )
class Fastq ( Sequence ):
"""Class representing a generic FASTQ sequence"""
edam_format = "format_1930"
file_ext = "fastq"
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines
in dataset.
FIXME: This does not properly handle line wrapping
"""
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
dataset.metadata.data_lines = None
dataset.metadata.sequences = None
return
data_lines = 0
sequences = 0
seq_counter = 0 # blocks should be 4 lines long
for line in file( dataset.file_name ):
line = line.strip()
if line and line.startswith( '#' ) and not data_lines:
# We don't count comment lines for sequence data types
continue
seq_counter += 1
data_lines += 1
if line and line.startswith( '@' ):
if seq_counter >= 4:
# count previous block
# blocks should be 4 lines long
sequences += 1
seq_counter = 1
if seq_counter >= 4:
# count final block
sequences += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def sniff( self, filename ):
"""
Determines whether the file is in generic fastq format
For details, see http://maq.sourceforge.net/fastq.shtml
Note: There are three kinds of FASTQ files, known as "Sanger" (sometimes called "Standard"), Solexa, and Illumina
These differ in the representation of the quality scores
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.fastqsanger' )
>>> Fastq().sniff( fname )
True
>>> fname = get_test_fname( '2.fastqsanger' )
>>> Fastq().sniff( fname )
True
"""
headers = get_headers( filename, None )
bases_regexp = re.compile( "^[NGTAC]*" )
# check that first block looks like a fastq block
try:
if len( headers ) >= 4 and headers[0][0] and headers[0][0][0] == "@" and headers[2][0] and headers[2][0][0] == "+" and headers[1][0]:
# Check the sequence line, make sure it contains only G/C/A/T/N
if not bases_regexp.match( headers[1][0] ):
return False
return True
return False
except:
return False
def split( cls, input_datasets, subdir_generator_function, split_params):
"""
FASTQ files are split on cluster boundaries, in increments of 4 lines
"""
if split_params is None:
return None
# first, see if there are any associated FQTOC files that will give us the split locations
# if so, we don't need to read the files to do the splitting
toc_file_datasets = []
for ds in input_datasets:
tmp_ds = ds
fqtoc_file = None
while fqtoc_file is None and tmp_ds is not None:
fqtoc_file = tmp_ds.get_converted_files_by_type('fqtoc')
tmp_ds = tmp_ds.copied_from_library_dataset_dataset_association
if fqtoc_file is not None:
toc_file_datasets.append(fqtoc_file)
if len(toc_file_datasets) == len(input_datasets):
return cls.do_fast_split(input_datasets, toc_file_datasets, subdir_generator_function, split_params)
return cls.do_slow_split(input_datasets, subdir_generator_function, split_params)
split = classmethod(split)
def process_split_file(data):
"""
This is called in the context of an external process launched by a Task (possibly not on the Galaxy machine)
to create the input files for the Task. The parameters:
data - a dict containing the contents of the split file
"""
args = data['args']
input_name = data['input_name']
output_name = data['output_name']
start_sequence = long(args['start_sequence'])
sequence_count = long(args['num_sequences'])
if 'toc_file' in args:
toc_file = json.load(open(args['toc_file'], 'r'))
commands = Sequence.get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count)
else:
commands = Sequence.get_split_commands_sequential(is_gzip(input_name), input_name, output_name, start_sequence, sequence_count)
for cmd in commands:
if 0 != os.system(cmd):
raise Exception("Executing '%s' failed" % cmd)
return True
process_split_file = staticmethod(process_split_file)
class FastqSanger( Fastq ):
"""Class representing a FASTQ sequence ( the Sanger variant )"""
edam_format = "format_1932"
file_ext = "fastqsanger"
class FastqSolexa( Fastq ):
"""Class representing a FASTQ sequence ( the Solexa variant )"""
edam_format = "format_1933"
file_ext = "fastqsolexa"
class FastqIllumina( Fastq ):
"""Class representing a FASTQ sequence ( the Illumina 1.3+ variant )"""
edam_format = "format_1931"
file_ext = "fastqillumina"
class FastqCSSanger( Fastq ):
"""Class representing a Color Space FASTQ sequence ( e.g a SOLiD variant )"""
file_ext = "fastqcssanger"
class Maf( Alignment ):
"""Class describing a Maf alignment"""
edam_format = "format_3008"
file_ext = "maf"
# Readonly and optional, users can't unset it, but if it is not set, we are generally ok; if required use a metadata validator in the tool definition
MetadataElement( name="blocks", default=0, desc="Number of blocks", readonly=True, optional=True, visible=False, no_value=0 )
MetadataElement( name="species_chromosomes", desc="Species Chromosomes", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
MetadataElement( name="maf_index", desc="MAF Index File", param=metadata.FileParameter, readonly=True, no_value=None, visible=False, optional=True )
def init_meta( self, dataset, copy_from=None ):
Alignment.init_meta( self, dataset, copy_from=copy_from )
def set_meta( self, dataset, overwrite=True, **kwd ):
"""
Parses and sets species, chromosomes, index from MAF file.
"""
# these metadata values are not accessable by users, always overwrite
# Imported here to avoid circular dependency
from galaxy.tools.util.maf_utilities import build_maf_index_species_chromosomes
indexes, species, species_chromosomes, blocks = build_maf_index_species_chromosomes( dataset.file_name )
if indexes is None:
return # this is not a MAF file
dataset.metadata.species = species
dataset.metadata.blocks = blocks
# write species chromosomes to a file
chrom_file = dataset.metadata.species_chromosomes
if not chrom_file:
chrom_file = dataset.metadata.spec['species_chromosomes'].param.new_file( dataset=dataset )
chrom_out = open( chrom_file.file_name, 'wb' )
for spec, chroms in species_chromosomes.items():
chrom_out.write( "%s\t%s\n" % ( spec, "\t".join( chroms ) ) )
chrom_out.close()
dataset.metadata.species_chromosomes = chrom_file
index_file = dataset.metadata.maf_index
if not index_file:
index_file = dataset.metadata.spec['maf_index'].param.new_file( dataset=dataset )
indexes.write( open( index_file.file_name, 'wb' ) )
dataset.metadata.maf_index = index_file
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
# The file must exist on disk for the get_file_peek() method
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if dataset.metadata.blocks:
dataset.blurb = "%s blocks" % util.commaify( str( dataset.metadata.blocks ) )
else:
# Number of blocks is not known ( this should not happen ), and auto-detect is
# needed to set metadata
dataset.blurb = "? blocks"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return self.make_html_table( dataset )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
try:
out.append('<tr><th>Species: ')
for species in dataset.metadata.species:
out.append( '%s ' % species )
out.append( '</th></tr>' )
if not dataset.peek:
dataset.set_peek()
data = dataset.peek
lines = data.splitlines()
for line in lines:
line = line.strip()
if not line:
continue
out.append( '<tr><td>%s</td></tr>' % escape( line ) )
out.append( '</table>' )
out = "".join( out )
except Exception as exc:
out = "Can't create peek %s" % exc
return out
def sniff( self, filename ):
"""
Determines wether the file is in maf format
The .maf format is line-oriented. Each multiple alignment ends with a blank line.
Each sequence in an alignment is on a single line, which can get quite long, but
there is no length limit. Words in a line are delimited by any white space.
Lines starting with # are considered to be comments. Lines starting with ## can
be ignored by most programs, but contain meta-data of one form or another.
The first line of a .maf file begins with ##maf. This word is followed by white-space-separated
variable=value pairs. There should be no white space surrounding the "=".
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format5
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'sequence.maf' )
>>> Maf().sniff( fname )
True
>>> fname = get_test_fname( 'sequence.fasta' )
>>> Maf().sniff( fname )
False
"""
headers = get_headers( filename, None )
try:
if len(headers) > 1 and headers[0][0] and headers[0][0] == "##maf":
return True
else:
return False
except:
return False
class MafCustomTrack( data.Text ):
file_ext = "mafcustomtrack"
MetadataElement( name="vp_chromosome", default='chr1', desc="Viewport Chromosome", readonly=True, optional=True, visible=False, no_value='' )
MetadataElement( name="vp_start", default='1', desc="Viewport Start", readonly=True, optional=True, visible=False, no_value='' )
MetadataElement( name="vp_end", default='100', desc="Viewport End", readonly=True, optional=True, visible=False, no_value='' )
def set_meta( self, dataset, overwrite=True, **kwd ):
"""
Parses and sets viewport metadata from MAF file.
"""
max_block_check = 10
chrom = None
forward_strand_start = float( 'inf' )
forward_strand_end = 0
try:
maf_file = open( dataset.file_name )
maf_file.readline() # move past track line
for i, block in enumerate( bx.align.maf.Reader( maf_file ) ):
ref_comp = block.get_component_by_src_start( dataset.metadata.dbkey )
if ref_comp:
ref_chrom = bx.align.maf.src_split( ref_comp.src )[-1]
if chrom is None:
chrom = ref_chrom
if chrom == ref_chrom:
forward_strand_start = min( forward_strand_start, ref_comp.forward_strand_start )
forward_strand_end = max( forward_strand_end, ref_comp.forward_strand_end )
if i > max_block_check:
break
if forward_strand_end > forward_strand_start:
dataset.metadata.vp_chromosome = chrom
dataset.metadata.vp_start = forward_strand_start
dataset.metadata.vp_end = forward_strand_end
except:
pass
class Axt( data.Text ):
"""Class describing an axt alignment"""
# gvk- 11/19/09 - This is really an alignment, but we no longer have tools that use this data type, and it is
# here simply for backward compatibility ( although it is still in the datatypes registry ). Subclassing
# from data.Text eliminates managing metadata elements inherited from the Alignemnt class.
file_ext = "axt"
def sniff( self, filename ):
"""
Determines whether the file is in axt format
axt alignment files are produced from Blastz, an alignment tool available from Webb Miller's lab
at Penn State University.
Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines.
Blocks are separated from one another by blank lines.
The summary line contains chromosomal position and size information about the alignment. It
consists of 9 required fields.
The sequence lines contain the sequence of the primary assembly (line 2) and aligning assembly
(line 3) with inserts. Repeats are indicated by lower-case letters.
For complete details see http://genome.ucsc.edu/goldenPath/help/axt.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'alignment.axt' )
>>> Axt().sniff( fname )
True
>>> fname = get_test_fname( 'alignment.lav' )
>>> Axt().sniff( fname )
False
"""
headers = get_headers( filename, None )
if len(headers) < 4:
return False
for hdr in headers:
if len(hdr) > 0 and hdr[0].startswith("##matrix=axt"):
return True
if len(hdr) > 0 and not hdr[0].startswith("#"):
if len(hdr) != 9:
return False
try:
map( int, [hdr[0], hdr[2], hdr[3], hdr[5], hdr[6], hdr[8]] )
except:
return False
if hdr[7] not in data.valid_strand:
return False
else:
return True
class Lav( data.Text ):
"""Class describing a LAV alignment"""
edam_format = "format_3014"
file_ext = "lav"
# gvk- 11/19/09 - This is really an alignment, but we no longer have tools that use this data type, and it is
# here simply for backward compatibility ( although it is still in the datatypes registry ). Subclassing
# from data.Text eliminates managing metadata elements inherited from the Alignemnt class.
def sniff( self, filename ):
"""
Determines whether the file is in lav format
LAV is an alignment format developed by Webb Miller's group. It is the primary output format for BLASTZ.
The first line of a .lav file begins with #:lav.
For complete details see http://www.bioperl.org/wiki/LAV_alignment_format
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'alignment.lav' )
>>> Lav().sniff( fname )
True
>>> fname = get_test_fname( 'alignment.axt' )
>>> Lav().sniff( fname )
False
"""
headers = get_headers( filename, None )
try:
if len(headers) > 1 and headers[0][0] and headers[0][0].startswith('#:lav'):
return True
else:
return False
except:
return False
class RNADotPlotMatrix( data.Data ):
edam_format = "format_3466"
file_ext = "rna_eps"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = 'RNA Dot Plot format (Postscript derivative)'
dataset.blurb = nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def sniff(self, filename):
"""Determine if the file is in RNA dot plot format."""
if check_image_type( filename, ['EPS'] ):
seq = False
coor = False
pairs = False
with open( filename ) as handle:
for line in handle:
line = line.strip()
if line:
if line.startswith('/sequence'):
seq = True
elif line.startswith('/coor'):
coor = True
elif line.startswith('/pairs'):
pairs = True
if seq and coor and pairs:
return True
return False
class DotBracket ( Sequence ):
edam_format = "format_1457"
file_ext = "dbn"
sequence_regexp = re.compile( "^[ACGTURYKMSWBDHVN]+$", re.I)
structure_regexp = re.compile( "^[\(\)\.\[\]{}]+$" )
def set_meta( self, dataset, **kwd ):
"""
Set the number of sequences and the number of data lines
in dataset.
"""
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
dataset.metadata.data_lines = None
dataset.metadata.sequences = None
dataset.metadata.seconday_structures = None
return
data_lines = 0
sequences = 0
for line in file( dataset.file_name ):
line = line.strip()
data_lines += 1
if line and line.startswith( '>' ):
sequences += 1
dataset.metadata.data_lines = data_lines
dataset.metadata.sequences = sequences
def sniff(self, filename):
"""
Galaxy Dbn (Dot-Bracket notation) rules:
* The first non-empty line is a header line: no comment lines are allowed.
* A header line starts with a '>' symbol and continues with 0 or multiple symbols until the line ends.
* The second non-empty line is a sequence line.
* A sequence line may only include chars that match the Fasta format (https://en.wikipedia.org/wiki/FASTA_format#Sequence_representation) symbols for nucleotides: ACGTURYKMSWBDHVN, and may thus not include whitespaces.
* A sequence line has no prefix and no suffix.
* A sequence line is case insensitive.
* The third non-empty line is a structure (Dot-Bracket) line and only describes the 2D structure of the sequence above it.
* A structure line must consist of the following chars: '.{}[]()'.
* A structure line must be of the same length as the sequence line, and each char represents the structure of the nucleotide above it.
* A structure line has no prefix and no suffix.
* A nucleotide pairs with only 1 or 0 other nucleotides.
* In a structure line, the number of '(' symbols equals the number of ')' symbols, the number of '[' symbols equals the number of ']' symbols and the number of '{' symbols equals the number of '}' symbols.
* The format accepts multiple entries per file, given that each entry is provided as three lines: the header, sequence and structure line.
* Sniffing is only applied on the first entry.
* Empty lines are allowed.
"""
state = 0
with open( filename, "r" ) as handle:
for line in handle:
line = line.strip()
if line:
# header line
if state == 0:
if(line[0] != '>'):
return False
else:
state = 1
# sequence line
elif state == 1:
if not self.sequence_regexp.match(line):
return False
else:
sequence_size = len(line)
state = 2
# dot-bracket structure line
elif state == 2:
if sequence_size != len(line) or not self.structure_regexp.match(line) or \
line.count('(') != line.count(')') or \
line.count('[') != line.count(']') or \
line.count('{') != line.count('}'):
return False
else:
return True
# Number of lines is less than 3
return False
|
icaoberg/cellorganizer-galaxy-tools
|
datatypes/sequence.py
|
Python
|
gpl-3.0
| 47,418
|
[
"BLAST",
"BioPerl",
"Galaxy"
] |
890a7a3a62b74f7e66de904dcab60be7d32bc4f3f65accc9e6ae9b12c2bddea7
|
"""
Module StringFormat
The StringFormat module allows for character-by-character formatting of
strings. It imitates the SPING string drawing and string metrics
interface. The string formatting is done with specialized XML syntax
within the string. Therefore, the interface for the StringFormat module
consists of wrapper functions for the SPING string interface and
various XML tags and characters.
StringFormat functions
drawString(canvas, s, x, y, [font], [color], [angle])
stringWidth(canvas, s, [font])
fontHeight(canvas, [font])
fontAscent(canvas, [font])
fontDescent(canvas, [font])
StringFormat XML tags
<b> </b> - bold
<i> </i> - italics
<u> </u> - underline
<super> </super> - superscript
<sub> </sub> - subscript
StringFormat XML characters
Greek Letter Symbols as specified in MathML
"""
# How it works: Each tag grouping <b></b> sets a flag upon entry and
# clears the flag upon exit. Each call to handle_data creates a
# StringSegment which takes on all of the characteristics specified
# by flags currently set. The greek letters can be specified as either
# α or <alpha/>. The are essentially transformed into <alpha/>
# no matter what and then there is a handler for each greek letter.
# To add or change greek letter to symbol font mappings only
# the greekchars map needs to change.
from rdkit.sping.pid import Font
import xmllib
import math
#------------------------------------------------------------------------
# constants
sizedelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.5 # fraction of font size that a sub script should be lowered
superFraction = 0.5 # fraction of font size that a super script should be raised
#------------------------------------------------------------------------
# greek mapping dictionary
# characters not supported: epsi, Gammad, gammad, kappav, rhov
# Upsi, upsi
greekchars = {
'alpha': 'a',
'beta': 'b',
'chi': 'c',
'Delta': 'D',
'delta': 'd',
'epsiv': 'e',
'eta': 'h',
'Gamma': 'G',
'gamma': 'g',
'iota': 'i',
'kappa': 'k',
'Lambda': 'L',
'lambda': 'l',
'mu': 'm',
'nu': 'n',
'Omega': 'W',
'omega': 'w',
'omicron': 'x',
'Phi': 'F',
'phi': 'f',
'phiv': 'j',
'Pi': 'P',
'pi': 'p',
'piv': 'v',
'Psi': 'Y',
'psi': 'y',
'rho': 'r',
'Sigma': 'S',
'sigma': 's',
'sigmav': 'V',
'tau': 't',
'Theta': 'Q',
'theta': 'q',
'thetav': 'j',
'Xi': 'X',
'xi': 'x',
'zeta': 'z'
}
#------------------------------------------------------------------------
class StringSegment:
"""class StringSegment contains the intermediate representation of string
segments as they are being parsed by the XMLParser.
"""
def __init__(self):
self.super = 0
self.sub = 0
self.bold = 0
self.italic = 0
self.underline = 0
self.s = ""
self.width = 0
self.greek = 0
def calcNewFont(self, font):
"Given a font (does not accept font==None), creates a \
new font based on the format of this text segment."
# if we are a greek character we need to pick a different fontface
if self.greek:
face = "symbol"
else:
face = font.face
# want to make sure that we don't lose any of the base
# font formatting
return Font(face=face, size=font.size - (self.super * sizedelta) - (self.sub * sizedelta),
underline=self.underline or font.underline, bold=self.bold or font.bold,
italic=self.italic or font.italic)
def calcNewY(self, font, y):
"Returns a new y coordinate depending on its \
whether the string is a sub and super script."
# should this take into account angle, I think probably not
if self.sub == 1:
return y + (font.size * subFraction)
elif self.super == 1:
return y - (font.size * superFraction)
else:
return y
def dump(self):
print("StringSegment: ]%s[" % self.s)
print("\tsuper = ", self.super)
print("\tsub = ", self.sub)
print("\tbold = ", self.bold)
print("\titalic = ", self.italic)
print("\tunderline = ", self.underline)
print("\twidth = ", self.width)
print("\tgreek = ", self.greek)
#------------------------------------------------------------------
# The StringFormatter will be able to format the following xml
# tags:
# < b > < /b > - bold
# < i > < /i > - italics
# < u > < /u > - underline
# < super > < /super > - superscript
# < sub > < /sub > - subscript
#
# It will also be able to handle any MathML specified Greek characters.
#
# Possible future additions: changing color and font
# character-by-character
#------------------------------------------------------------------
class StringFormatter(xmllib.XMLParser):
#----------------------------------------------------------
# First we will define all of the xml tag handler functions.
#
# start_<tag>(attributes)
# end_<tag>()
#
# While parsing the xml StringFormatter will call these
# functions to handle the string formatting tags.
# At the start of each tag the corresponding field will
# be set to 1 and at the end tag the corresponding field will
# be set to 0. Then when handle_data is called the options
# for that data will be apparent by the current settings.
#----------------------------------------------------------
#### bold
def start_b(self, attributes):
self.bold = 1
def end_b(self):
self.bold = 0
#### italics
def start_i(self, attributes):
self.italic = 1
def end_i(self):
self.italic = 0
#### underline
def start_u(self, attributes):
self.underline = 1
def end_u(self):
self.underline = 0
#### super script
def start_super(self, attributes):
self.super = 1
def end_super(self):
self.super = 0
#### sub script
def start_sub(self, attributes):
self.sub = 1
def end_sub(self):
self.sub = 0
#### greek script
def start_greek(self, attributes, letter):
# print("creating a greek letter... ", letter)
self.greek = 1
self.handle_data(letter)
def end_greek(self):
self.greek = 0
#----------------------------------------------------------------
def __init__(self):
xmllib.XMLParser.__init__(self)
# initialize list of string segments to empty
self.segmentlist = []
# initialize tag values
self.sub = 0
self.super = 0
self.bold = 0
self.italic = 0
self.underline = 0
# set up handlers for various tags
self.elements = {'b': (self.start_b, self.end_b),
'u': (self.start_u, self.end_u),
'i': (self.start_i, self.end_i),
'super': (self.start_super, self.end_super),
'sub': (self.start_sub, self.end_sub)}
# automatically add handlers for all of the greek characters
for item in greekchars.keys():
self.elements[item] = (lambda attr,self=self,letter=greekchars[item]: \
self.start_greek(attr,letter), self.end_greek)
# flag for greek characters
self.greek = 0
# set up dictionary for greek characters, this is a class variable
# should I copy it and then update it?
for item in greekchars.keys():
self.entitydefs[item] = '<%s/>' % item
#----------------------------------------------------------------
# def syntax_error(self,message):
# print(message)
#----------------------------------------------------------------
def handle_data(self, data):
"Creates an intermediate representation of string segments."
# segment first has data
segment = StringSegment()
segment.s = data
# if sub and super are both one they will cancel each other out
if self.sub == 1 and self.super == 1:
segment.sub = 0
segment.super = 0
else:
segment.sub = self.sub
segment.super = self.super
# bold, italic, and underline
segment.bold = self.bold
segment.italic = self.italic
segment.underline = self.underline
# greek character
segment.greek = self.greek
self.segmentlist.append(segment)
#----------------------------------------------------------------
def parseSegments(self, s):
"Given a formatted string will return a list of \
StringSegment objects with their calculated widths."
# the xmlparser requires that all text be surrounded by xml
# tags, therefore we must throw some unused flags around the
# given string
self.feed("<formattedstring>" + s + "</formattedstring>")
self.close() # force parsing to complete
self.reset() # get rid of any previous data
segmentlist = self.segmentlist
self.segmentlist = []
return segmentlist
#------------------------------------------------------------------------
# These functions just implement an interface layer to SPING
def fontHeight(canvas, font=None):
"Find the total height (ascent + descent) of the given font."
return canvas.fontHeight(font)
def fontAscent(canvas, font=None):
"Find the ascent (height above base) of the given font."
return canvas.fontAscent(font)
def fontDescent(canvas, font=None):
"Find the descent (extent below base) of the given font."
return canvas.fontDescent(font)
#------------------------------------------------------------------------
# create an instantiation of the StringFormatter
#sformatter = StringFormatter()
#------------------------------------------------------------------------
# stringWidth and drawString both have to parse the formatted strings
def stringWidth(canvas, s, font=None):
"Return the logical width of the string if it were drawn \
in the current font (defaults to canvas.font)."
sformatter = StringFormatter()
segmentlist = sformatter.parseSegments(s)
# to calculate a new font the segments must be given an actual font
if not font:
font = canvas.defaultFont
# sum up the string widths of each formatted segment
sum = 0
for seg in segmentlist:
sum = sum + canvas.stringWidth(seg.s, seg.calcNewFont(font))
return sum
def rotateXY(x, y, theta):
"Rotate (x,y) by theta degrees. Got transformation \
from page 299 in linear algebra book."
radians = theta * math.pi / 180.0
# had to change the signs to deal with the fact that the y coordinate
# is positive going down the screen
return (math.cos(radians) * x + math.sin(radians) * y,
-(math.sin(radians) * x - math.cos(radians) * y))
def drawString(canvas, s, x, y, font=None, color=None, angle=0):
"Draw a formatted string starting at location x,y in canvas."
sformatter = StringFormatter()
segmentlist = sformatter.parseSegments(s)
# to calculate a new font the segments must be given an actual font
if not font:
font = canvas.defaultFont
# have each formatted string segment specify its own font
startpos = x
for seg in segmentlist:
# calculate x and y for this segment based on the angle
# if the string wasn't at an angle then
# (draw_x,draw_y) = (startpos, seg.calcNewY(font, y)) want to
# rotate around original x and y
(delta_x, delta_y) = rotateXY(startpos - x, seg.calcNewY(font, y) - y, angle)
canvas.drawString(seg.s, x + delta_x, y + delta_y, seg.calcNewFont(font), color, angle)
# new x start position, startpos is calculated assuming no angle
startpos = startpos + canvas.stringWidth(seg.s, seg.calcNewFont(font))
#------------------------------------------------------------------------
# Testing
#------------------------------------------------------------------------
from sping.PDF import PDFCanvas
def test1():
canvas = PDFCanvas('test1.pdf')
drawString(canvas, "<u><b>hello there</b></u><super>hi</super>", 10, 20)
drawString(canvas, "hello!", 10, 40)
print("'hello!' width = ", stringWidth(canvas, "hello!"))
print("'hello!' SPING width = ", canvas.stringWidth("hello!"))
drawString(canvas, "<b>hello!</b> goodbye", 10, 60)
print("'<b>hello!</b> goodbye' width = ", stringWidth(canvas, "<b>hello!</b> goodbye"))
drawString(canvas, "hello!", 10, 80, Font(bold=1))
print("'hello!' Font(bold=1) SPING width = ", canvas.stringWidth("hello!", Font(bold=1)))
drawString(canvas, " goodbye", 10, 100)
print("' goodbye' SPING width = ", canvas.stringWidth(" goodbye"))
canvas.flush()
def test2():
canvas = PDFCanvas('test2.pdf')
drawString(canvas, "<alpha/>", 10, 10)
# drawString(canvas, "&", 10, 10)
drawString(canvas, "α", 10, 30)
# drawString(canvas, "a", 10, 50, Font(face="symbol"))
# drawString(canvas, "hello there!", 30, 90, angle= -90)
# drawString(canvas, "<b>goodbye!</b> <u>yall</u>", 100, 90, angle= 45)
# drawString(canvas, "there is a <u>time</u> and a <b>place</b><super>2</super>",
# 100, 90, angle= -75)
canvas.flush()
def allTagCombos(canvas, x, y, font=None, color=None, angle=0):
"""Try out all tags and various combinations of them.
Starts at given x,y and returns possible next (x,y)."""
oldDefault = canvas.defaultFont
if font:
canvas.defaultFont = font
oldx = x
dx = stringWidth(canvas, " ")
dy = canvas.defaultFont.size * 1.5
drawString(canvas, "<b>bold</b>", x, y, color=color, angle=angle)
x = x + stringWidth(canvas, "<b>bold</b>") + dx
drawString(canvas, "<i>italic</i>", x, y, color=color, angle=angle)
x = x + stringWidth(canvas, "<i>italic</i>") + dx
drawString(canvas, "<u>underline</u>", x, y, color=color, angle=angle)
x = x + stringWidth(canvas, "<u>underline</u>") + dx
drawString(canvas, "<super>super</super>", x, y, color=color, angle=angle)
x = x + stringWidth(canvas, "<super>super</super>") + dx
drawString(canvas, "<sub>sub</sub>", x, y, color=color, angle=angle)
y = y + dy
drawString(canvas, "<b><u>bold+underline</u></b>", oldx, y, color=color, angle=angle)
x = oldx + stringWidth(canvas, "<b><u>bold+underline</u></b>") + dx
drawString(canvas, "<super><i>super+italic</i></super>", x, y, color=color, angle=angle)
x = x + stringWidth(canvas, "<super><i>super+italic</i></super>") + dx
drawString(canvas, "<b><sub>bold+sub</sub></b>", x, y, color=color, angle=angle)
# x = x + stringWidth(canvas,"<b><sub>bold+sub</sub></b>") + dx
y = y + dy
canvas.defaultFont = oldDefault
return (oldx, y)
def stringformatTest():
# change the following line only to try a different SPING backend
canvas = PDFCanvas('bigtest1.pdf')
################################################### testing drawString tags
# < b > < /b > - bold
# < i > < /i > - italics
# < u > < /u > - underline
# < super > < /super > - superscript
# < sub > < /sub > - subscript
x = 10
y = canvas.defaultFont.size * 1.5
##### try out each possible tags and all combos
(x, y) = allTagCombos(canvas, x, y)
##### now try various fonts
(x, y) = allTagCombos(canvas, x, y + 30, Font(face="serif"))
(x, y) = allTagCombos(canvas, x, y + 30, Font(face="monospaced"))
# what about rotated
(x, y) = allTagCombos(canvas, x, y + 30, Font(face="serif"), angle=-30)
##### now try a couple of different font sizes
(x, y) = allTagCombos(canvas, x, y + 30, Font(size=16))
(x, y) = allTagCombos(canvas, x, y + 30, Font(size=9))
##### now try a different default style setting
(x, y) = allTagCombos(canvas, x, y + 30, Font(underline=1))
##### now try a combo of the above 4 and a different color
(x, y) = allTagCombos(canvas, x, y + 30, color=red)
################################################### testing stringWidth tags
sfwidth = stringWidth(canvas,
"<b><sub>bold+sub</sub></b> hello <u><super>underline+super</super></u>")
# break down the various string widths
print('sw("<b><sub>bold+sub</sub></b>") = ', stringWidth(canvas, "<b><sub>bold+sub</sub></b>"))
print('sw(" hello ") = ', stringWidth(canvas, " hello "))
print('sw("<u><super>underline+super</super></u>") = ',
stringWidth(canvas, "<u><super>underline+super</super></u>"))
pwidth1 = canvas.stringWidth("bold+sub", Font(size=canvas.defaultFont.size - sizedelta, bold=1))
print("pwidth1 = ", pwidth1)
pwidth2 = canvas.stringWidth(" hello ")
print("pwidth2 = ", pwidth2)
pwidth3 = canvas.stringWidth("underline+super",
Font(size=canvas.defaultFont.size - sizedelta, underline=1))
print("pwidth3 = ", pwidth3)
# these should be the same
print("sfwidth = ", sfwidth, " pwidth = ", pwidth1 + pwidth2 + pwidth3)
################################################### testing greek characters
# looks better in a larger font
canvas = PDFCanvas('bigtest2.pdf')
x = 10
y = canvas.defaultFont.size * 1.5
drawString(canvas, "α β <chi/> Δ <delta/>", x, y, Font(size=16), color=blue)
print("line starting with alpha should be font size 16")
y = y + 30
drawString(canvas, "ϵ η Γ <gamma/>", x, y, color=green)
y = y + 30
drawString(canvas, "ι κ Λ <lambda/>", x, y, color=blue)
y = y + 30
drawString(canvas, "<u>μ</u> ν <b>Ω</b> <omega/>", x, y, color=green)
print("mu should be underlined, Omega should be big and bold")
y = y + 30
drawString(canvas, "ο Φ φ <phiv/>", x, y, color=blue)
y = y + 30
drawString(canvas, "Π π ϖ <Psi/> ψ ρ", x, y, color=green)
y = y + 30
drawString(canvas, "<u>Σ σ ς <tau/></u>", x, y, color=blue)
print("line starting with sigma should be completely underlined")
y = y + 30
drawString(canvas, "Θ θ ϑ <Xi/> ξ ζ", x, y, color=green)
y = y + 30
drawString(canvas, "That's αll <u>folks</u><super>ω</super>", x, y)
canvas.flush()
#test1()
#test2()
#stringformatTest()
|
bp-kelley/rdkit
|
rdkit/sping/stringformat.py
|
Python
|
bsd-3-clause
| 17,971
|
[
"RDKit"
] |
69590c8d7d6258396f1709ed64621054af5ac042aa9dfcafa1779e608b0110f9
|
#!/usr/bin/env python
"""This module implements radical-bac-fecalc --benchmark.
"""
__author__ = "Ole Weidner"
__email__ = "ole.weidner@rutgers.edu"
__copyright__ = "Copyright 2013-2014, The RADICAL Project at Rutgers"
__license__ = "MIT"
import imp
import os, sys, uuid
import urllib
import optparse
import radical.pilot
from radical.ensemblemd.mdkernels import MDTaskDescription
from radical.ensemblemd.htbac.common import BatchRunner
# ----------------------------------------------------------------------------
#
def run_workload(config, workload):
# """Runs a workload.
# """
server = config.SERVER
dbname = config.DBNAME
maxcpus = config.MAXCPUS
resource = config.RESOURCE
username = config.USERNAME
allocation = config.ALLOCATION
# We cannot allocate more than "maxcpus". If the number of tasks is
# smaller than 'maxcpus', we chose the closest increment of 16. If it
# is larger, we use "maxcpus" and adjust the runtime of the pilot.
# NOTE: currently, we assume (near) homogenous runtime among all tasks.
task_runtime = workload[0]["runtime"]
cores = 0
for task in workload:
cores += task["cores"]
if cores < maxcpus:
pilot_size = cores
pilot_runtime = task_runtime
else:
pilot_size = maxcpus
pilot_runtime = task_runtime * (len(workload)/maxcpus)
if len(workload)%maxcpus > 0:
pilot_runtime += task_runtime
print "\n * Number of tasks: %s" % len(workload)
print " * Pilot size (# cores): %s" % pilot_size
print " * Pilot runtime: %s\n" % pilot_runtime
############################################################
# The pilot description
pdesc = radical.pilot.ComputePilotDescription()
pdesc.resource = resource
pdesc.runtime = pilot_runtime
pdesc.cores = pilot_size
pdesc.project = allocation
pdesc.cleanup = False
############################################################
# Workload definition
tasknum = 0
all_tasks = []
# Create CU descriptions from workload taks...
for task in workload:
tasknum += 1
parmfile = task["parmfile"]
parmfile_basen = os.path.basename(parmfile)
coordinates = task["coordinates"]
coordinates_basen = os.path.basename(coordinates)
conskfile = task["conskfile"]
coordinates_basen = os.path.basename(conskfile)
input = task["input"]
input_basen = os.path.basename(input)
output = task["output"]
mdtd = MDTaskDescription()
mdtd.kernel = "NAMD"
mdtd.arguments = ["{0}".format(input_basen)]
mdtd_bound = mdtd.bind(resource=resource)
mmpbsa_task = radical.pilot.ComputeUnitDescription()
mmpbsa_task.environment = mdtd_bound.environment
mmpbsa_task.pre_exec = mdtd_bound.pre_exec
mmpbsa_task.executable = mdtd_bound.executable
mmpbsa_task.arguments = mdtd_bound.arguments
mmpbsa_task.mpi = mdtd_bound.mpi
mmpbsa_task.cores = task["cores"]
mmpbsa_task.name = task["name"]
mmpbsa_task.input_data = [parmfile, coordinates, conskfile, input]
mmpbsa_task.output_data = ["STDOUT > %s" % output]
all_tasks.append(mmpbsa_task)
############################################################
# Call the batch runner
br = BatchRunner(config=config)
finished_units = br.run(pilot_description=pdesc, cu_descriptions=all_tasks)
if type(finished_units) != list:
finished_units = [finished_units]
print "\nDONE"
print "=============================================================================\n"
for unit in finished_units:
if unit.state == radical.pilot.DONE:
t_start = unit.start_time
t_stop = unit.stop_time
t_run = t_stop - t_start
else:
t_run = "failed"
local_output = unit.description.output_data[0].split(" > ")[1]
print " o Task {0} RUNTIME {1} OUTPUT: {2}".format(unit.description.name, t_run, local_output)
br.close()
|
radical-cybertools/HT-BAC
|
src/radical/ensemblemd/htbac/simchain/workload.py
|
Python
|
mit
| 4,229
|
[
"NAMD"
] |
633386403df052b10a0cabbf7c1f13711d64deeb4130e6055159a319304f301a
|
import datetime
import unittest
from unittest.mock import patch
import numpy as np
import pytz
from data.nemo import Nemo
from data.netcdf_data import NetCDFData
from data.variable import Variable
from data.variable_list import VariableList
from utils.errors import APIError
class TestNemo(unittest.TestCase):
def setUp(self):
self.variable_list_mock = VariableList(
[
Variable(
"votemper",
"Water temperature at CMC",
"Kelvins",
sorted(["deptht", "time_counter", "y", "x"]),
)
]
)
def test_init(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
ds = Nemo(nc_data)
self.assertIs(ds.nc_data, nc_data)
self.assertEqual(ds.variables, nc_data.variables)
def test_depths(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
expected = np.array(
[
0.494025,
1.54138,
2.64567,
3.81949,
5.07822,
6.44061,
7.92956,
9.573,
11.405,
13.4671,
15.8101,
18.4956,
21.5988,
25.2114,
29.4447,
34.4342,
40.3441,
47.3737,
55.7643,
65.8073,
77.8539,
92.3261,
109.729,
130.666,
155.851,
186.126,
222.475,
266.04,
318.127,
380.213,
453.938,
541.089,
643.567,
763.333,
902.339,
1062.44,
1245.29,
1452.25,
1684.28,
1941.89,
2225.08,
2533.34,
2865.7,
3220.82,
3597.03,
3992.48,
4405.22,
4833.29,
5274.78,
5727.92,
],
dtype=np.float32,
)
np.testing.assert_array_equal(ds.depths, expected)
@patch("tests.test_nemo.NetCDFData")
def test_no_depth_variable(self, patch_netcdfdata):
# This is a hack to trigger the no depth variable edge case
patch_netcdfdata.depth_dimensions.return_value = []
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
np.testing.assert_array_equal(ds.depths, np.array([0]))
def test_variables(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
variables = ds.variables
self.assertEqual(len(variables), 3)
self.assertTrue("votemper" in variables)
self.assertEqual(variables["votemper"].name, "Water temperature at CMC")
self.assertEqual(variables["votemper"].unit, "Kelvins")
self.assertEqual(
sorted(variables["votemper"].dimensions),
sorted(["deptht", "time_counter", "y", "x"]),
)
def test_get_point(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
self.assertAlmostEqual(
ds.get_point(13.0, -149.0, 0, "votemper", 2031436800), 299.17, places=2
)
def test_get_raw_point(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
lat, lon, data = ds.get_raw_point(13.0, -149.0, 0, 2031436800, "votemper")
self.assertEqual(len(lat.values.ravel()), 12)
self.assertEqual(len(lon.values.ravel()), 12)
self.assertEqual(len(data.values.ravel()), 12)
self.assertAlmostEqual(data.values[1, 1], 299.3, places=1)
def test_get_profile(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
p, d = ds.get_profile(13.0, -149.0, "votemper", 2031436800)
self.assertAlmostEqual(p[0], 299.17, places=2)
self.assertAlmostEqual(p[10], 299.15, places=2)
self.assertAlmostEqual(p[20], 296.466766, places=6)
self.assertTrue(np.ma.is_masked(p[49]))
def test_get_profile_depths(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
p = ds.get_profile_depths(
13.0,
-149.0,
2031436800,
"votemper",
[0, 10, 25, 50, 100, 200, 500, 1000],
)
self.assertTrue(np.ma.is_masked(p[0]))
self.assertAlmostEqual(p[1], 299.15, places=2)
self.assertAlmostEqual(p[4], 292.48, places=2)
self.assertAlmostEqual(p[7], 277.90, places=2)
def test_bottom_point(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
self.assertAlmostEqual(
ds.get_point(13.0, -149.0, "bottom", "votemper", 2031436800),
274.13,
places=2,
)
def test_get_area(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
a = np.array(
np.meshgrid(np.linspace(5, 10, 10), np.linspace(-150, -160, 10))
)
r = ds.get_area(a, 0, 2031436800, "votemper", "gaussian", 25000, 10)
self.assertAlmostEqual(r[5, 5], 301.285, places=3)
r = ds.get_area(a, 0, 2031436800, "votemper", "bilinear", 25000, 10)
self.assertAlmostEqual(r[5, 5], 301.269, places=3)
r = ds.get_area(a, 0, 2031436800, "votemper", "nearest", 25000, 10)
self.assertAlmostEqual(r[5, 5], 301.28986, places=5)
r = ds.get_area(a, 0, 2031436800, "votemper", "inverse", 25000, 10)
self.assertAlmostEqual(r[5, 5], 301.2795, places=4)
@unittest.skip("IndexError: index 0 is out of bounds for axis 0 with size 0")
def test_get_path_profile(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
p, d, r, dep = ds.get_path_profile(
[[13, -149], [14, -140], [15, -130]], "votemper", 2031436800, 10
)
self.assertEqual(r.shape[0], 50)
self.assertGreater(r.shape[1], 10)
self.assertEqual(r.shape[1], p.shape[1])
self.assertEqual(r.shape[1], len(d))
self.assertEqual(d[0], 0)
def test_get_timeseries_point(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
r = ds.get_timeseries_point(
13.0, -149.0, 0, 2031436800, 2034072000, "votemper"
)
self.assertAlmostEqual(r[0], 299.17, places=2)
self.assertAlmostEqual(r[1], 299.72, places=2)
def test_get_timeseries_profile(self):
nc_data = NetCDFData("tests/testdata/nemo_test.nc")
with Nemo(nc_data) as ds:
r, d = ds.get_timeseries_profile(
13.0, -149.0, 2031436800, 2034072000, "votemper"
)
self.assertAlmostEqual(r[0, 0], 299.17, places=2)
self.assertAlmostEqual(r[0, 10], 299.15, places=2)
self.assertAlmostEqual(r[0, 20], 296.466766, places=6)
self.assertTrue(np.ma.is_masked(r[0, 49]))
self.assertNotEqual(r[0, 0], r[1, 0])
self.assertTrue(np.ma.is_masked(r[1, 49]))
def test_get_profile_raises_when_surface_variable_requested(self):
nc_data = NetCDFData("tests/testdata/salishseacast_ssh_test.nc")
with Nemo(nc_data) as ds:
with self.assertRaises(APIError):
ds.get_profile(None, None, "ssh", None, None)
|
DFO-Ocean-Navigator/Ocean-Data-Map-Project
|
tests/test_nemo.py
|
Python
|
gpl-3.0
| 8,292
|
[
"Gaussian"
] |
1180a69ab9065ad380ec8567da033a367cdf3780d95ea32c6298c1b0a86b2e4a
|
import types
import unittest
import math
import copy
import Numeric
import MLab
import pygsl.testing.rng as rngmodule
import sys
sys.stdout = sys.stderr
rng_types = rngmodule.types_setup()
class _rng_type:
_type = None
for i in rng_types:
tmp = "class %s(_rng_type): _type = rngmodule.%s" % ((i,) * 2)
exec(tmp)
class _rng_basics(unittest.TestCase):
"""
here are things tested like allocation, destruction, initialisation
"""
def test_alloc(self):
"""
allocate the default rng
"""
rng=rngmodule.rng(self._type)
self.failIf(rng.name()=="","name of rng was \"\"")
self.failIf(rng.name() is None,"name of rng was None")
rng=None
def test_uniform(self):
"""
get one value from rng
"""
rng=rngmodule.rng(self._type)
value=rng.uniform()
rng=None
self.failIf(value<0 or value>=1.0,
"value of %f not expected from uniform distribution"%value)
def test_uniform_pos(self):
"""
get one value from rng
"""
rng=rngmodule.rng(self._type)
value=rng.uniform_pos()
rng=None
self.failIf(value<0 or value>1.0,
"value of %f not expected from uniform distribution"%value)
def test_rng_reproduce(self):
rng=rngmodule.rng(self._type)
rng.set(1)
value1=rng.get()
rng.set(1)
value2=rng.get()
self.failIf(value1!=value2,"values from rng not reproducable")
class _rng_distributions(unittest.TestCase):
"""
test different distributions
"""
def setUp(self):
#print "Testing Class ", self.__class__.__name__
sys.stdout.flush()
sys.stderr.flush()
self.rng=rngmodule.rng(self._type)
def tearDown(self):
self.rng=None
def testMin(self):
min = self.rng.min()
assert(type(min) == types.LongType)
def testMax(self):
max = self.rng.max()
assert(type(max) == types.LongType)
def testMinMax(self):
min = self.rng.min()
max = self.rng.max()
assert(min<max)
def testcopy(self):
rng = copy.copy(self.rng)
for i in range(10):
assert(rng.get() == self.rng.get())
def test_uniform_int(self):
for i in range(10):
tmp = self.rng.uniform_int(2)
assert(tmp>=0)
assert(tmp<=2)
def _test_generic_return_generic(self, method, pdf_method, mytype, arraytype, *args):
test = 0
try:
d = method(*args)
assert(type(d) == mytype)
if pdf_method:
p = apply(pdf_method, (d,) + args)
assert(type(p) == types.FloatType)
da = apply(method, args + (10,))
assert(type(da) == Numeric.ArrayType)
assert(len(da.shape) == 1)
assert(da.typecode() == arraytype)
assert(da.shape[0] == 10)
if pdf_method:
pa = apply(pdf_method, (da,) + args)
assert(type(pa) == Numeric.ArrayType)
assert(len(pa.shape) == 1)
assert(pa.typecode() == Numeric.Float)
assert(pa.shape[0] == 10)
test = 1
finally:
if test == 0:
print "I was testing %s and pdf function %s " %(method, pdf_method)
def _test_ui_return_one(self, method, pdf_method, *args):
self._test_generic_return_generic(method, pdf_method, types.LongType,
Numeric.Int, *args)
def _test_double_return_one(self, method, pdf_method, *args):
self._test_generic_return_generic(method, pdf_method, types.FloatType,
Numeric.Float, *args)
def _test_nd_return_one(self, method, pdf_method, n, *args):
test = 0
try:
d = method(*args)
assert(len(d) == n)
for i in d:
assert(type(i) == types.FloatType)
if pdf_method:
p = apply(pdf_method, tuple(d) + args)
assert(type(p) == types.FloatType)
da = apply(method, args + (10,))
assert(type(da) == Numeric.ArrayType)
assert(da.typecode() == Numeric.Float)
assert(len(da.shape) == 2)
assert(da.shape[0] == 10)
assert(da.shape[1] == n)
test = 1
if pdf_method:
pa = apply(pdf_method, (da[:,0], da[:,1]) + args)
assert(type(pa) == Numeric.ArrayType)
assert(pa.typecode() == Numeric.Float)
assert(len(pa.shape) == 1)
assert(pa.shape[0] == 10)
assert(type(p) == types.FloatType)
finally:
if test == 0:
print "I was testing ", method
def _test_ui_return(self, methods, *args):
for i in methods:
tmp = getattr(self.rng, i)
try:
pdf = getattr(rngmodule, i + '_pdf')
except AttributeError:
pass
pdf = getattr(rngmodule, i + '_pdf')
self._test_ui_return_one(tmp, pdf, *args)
def _test_double_return(self, methods, *args):
for i in methods:
tmp = getattr(self.rng, i)
try:
pdf = getattr(rngmodule, i + '_pdf')
except AttributeError:
pass
self._test_double_return_one(tmp, pdf, *args)
def _test_nd_return(self, methods, *args):
for i in methods:
tmp = getattr(self.rng, i)
pdf = None
try:
pdf = getattr(rngmodule, i + '_pdf')
except AttributeError:
pass
self._test_nd_return_one(tmp, pdf, *args)
def test_ui_to_double(self):
self._test_double_return_one(self.rng.gamma_int, None, 1000L)
def test_to_double(self):
t = ('ugaussian',
'ugaussian_ratio_method',
'landau')
self._test_double_return(t)
def test_d_to_double(self):
t = ('gaussian',
'gaussian_ratio_method',
'ugaussian_tail',
'exponential',
'laplace',
'cauchy',
'rayleigh',
'chisq',
'tdist',
'logistic')
self._test_double_return(t, 1.0)
def test_dd_to_double(self):
t = ('gaussian_tail',
'exppow',
'rayleigh_tail',
'levy',
'gamma',
'flat',
'lognormal',
'fdist',
'beta',
'pareto',
'weibull',
'gumbel1',
'gumbel2',
'erlang')
self._test_double_return(t, 2.0, 3.0)
def test_ddd_to_double(self):
self._test_double_return_one(self.rng.levy_skew, None, 0, 1.0, 2.0)
def test_d_to_ui(self):
t = ('poisson',
'bernoulli',
'geometric',
'logarithmic')
self._test_ui_return(t, 2.3)
def test_dd_to_ui(self):
t = ('binomial',
'pascal',
'negative_binomial')
self._test_ui_return(t, 2.0, 4.5)
def test_uiuiui_to_ui(self):
self._test_ui_return_one(self.rng.hypergeometric, rngmodule.hypergeometric_pdf,
4L, 2L, 56L)
def test_ddd_to_dd(self):
self._test_nd_return_one(self.rng.bivariate_gaussian, rngmodule.bivariate_gaussian_pdf,
2, 0, 1, 2)
def test_dir(self):
self._test_nd_return_one(self.rng.dir_2d, None, 2)
self._test_nd_return_one(self.rng.dir_2d_trig_method, None, 2)
self._test_nd_return_one(self.rng.dir_3d, None, 3)
self._test_nd_return_one(self.rng.dir_nd, None, 1, 1)
self._test_nd_return_one(self.rng.dir_nd, None, 2, 2)
self._test_nd_return_one(self.rng.dir_nd, None, 3, 3)
self._test_nd_return_one(self.rng.dir_nd, None, 4, 4)
self._test_nd_return_one(self.rng.dir_nd, None, 5, 5)
self._test_nd_return_one(self.rng.dir_nd, None, 6, 6)
def test_dirichlet(self):
a = Numeric.arange(10) * .1 + .1
d = self.rng.dirichlet(a)
assert(type(d) == Numeric.ArrayType)
assert(d.typecode() == Numeric.Float)
assert(len(d.shape) == 1)
assert(d.shape[0] == a.shape[0])
ra = Numeric.reshape(a, (a.shape[0], -1))
ra = Numeric.transpose(ra)
p = rngmodule.dirichlet_pdf(d,ra)
d = self.rng.dirichlet(a,100)
assert(type(d) == Numeric.ArrayType)
assert(d.typecode() == Numeric.Float)
assert(len(d.shape) == 2)
assert(d.shape[0] == 100)
assert(d.shape[1] == a.shape[0])
def test_multinomial(self):
pass
def test_gaussian(self):
sum=0
count=0
num=10000
accepted_deviation=math.sqrt(num)*5.0
sum = Numeric.add.reduce(self.rng.gaussian(1.0, num))
self.failIf(abs(sum)>accepted_deviation,"the sum of %d gaussian values is %g"%(num,sum))
def test_gaussian_tail(self):
self.rng.gaussian_tail(1.0, 0.5, 1000)
#print "Last rng = ", rng_types[-1]
for i in rng_types[:3]:
tmp = "class %s_rng_basics(%s, _rng_basics): pass" % ((i,) *2)
exec(tmp)
tmp = "class %s_rng_distributions(%s, _rng_distributions): pass" % ((i,) *2)
exec(tmp)
del _rng_basics
del _rng_distributions
if __name__ == "__main__":
unittest.main()
|
juhnowski/FishingRod
|
production/pygsl-0.9.5/testing/tests/rng_test.py
|
Python
|
mit
| 9,957
|
[
"Gaussian"
] |
88d3d0bad6ec903c0cef9be602252487dfea53e70e620bc1ae72401a149e2af4
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
This module is to calculate the ghosecrippen descriptor. If you
have any question please contact me via email.
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com and oriental-cds@163.com
"""
# Core Library modules
import os
import string
# Third party modules
from rdkit import Chem
Version = 1.0
###########################################################################
def _ReadPatts(fileName):
"""
#################################################################
*Internal Use Only*
parses the pattern list from the data file
#################################################################
"""
patts = {}
order = []
with open(fileName, "r") as f:
lines = f.readlines()
for line in lines:
if line[0] != "#":
splitLine = line.split("\t")
if len(splitLine) >= 4 and splitLine[0] != "":
sma = splitLine[1]
if sma != "SMARTS":
sma.replace('"', "")
p = Chem.MolFromSmarts(sma)
if p:
cha = splitLine[0].strip()
if cha not in order:
order.append(cha)
l = patts.get(cha, [])
l.append((sma, p))
patts[cha] = l
else:
print("Problems parsing smarts: %s" % (sma))
return order, patts
###########################################################################
def GhoseCrippenFingerprint(mol, count=False):
"""
#################################################################
Ghose-Crippen substructures or counts based on the definitions of
SMARTS from Ghose-Crippen's paper. (110 dimension)
The result is a dict format.
#################################################################
"""
order, patts = _ReadPatts(
os.path.dirname(os.path.abspath(__file__)) + "/Crippen.txt"
)
GCres = dict()
for sma in patts:
match = mol.GetSubstructMatches(patts[sma][0][1], False, False)
temp = len([i[0] for i in match])
GCres.update({sma: temp})
res = {}
if count == False:
for i in GCres:
if GCres[i] > 0:
res.update({i: 1})
else:
res.update({i: 0})
else:
res = GCres
return res
###############################################################################
if __name__ == "__main__":
smif = ["CCCC", "CCCCC", "CCCCCC", "CC(N)C(=O)O", "CC(N)C(=O)[O-].[Na+]"]
AllDes = []
for i in smif:
mol = Chem.MolFromSmiles(i)
order, patts = _ReadPatts(
os.path.dirname(os.path.abspath(__file__)) + "/Crippen.txt"
)
temp = GhoseCrippenFingerprint(mol, count=True)
AllDes.append(temp)
print(AllDes)
|
gadsbyfly/PyBioMed
|
PyBioMed/PyMolecule/ghosecrippen.py
|
Python
|
bsd-3-clause
| 3,220
|
[
"RDKit"
] |
e4e3aeb416eeb1623863eb7af359e5449c9590d42725dec561792154dfef3674
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type resolution.
This analyzer uses known live values to further infer object types. This
may include for instance constructed objects and object member functions.
In addition, the analyzer also handles user annotations made in the code (for
example, the autograph.set_element_type function).
Requires annotations generated by LiveValuesResolver.
"""
# TODO(mdan): This would be more robust with a CFG.
# Situations with multiple reaching modifications (e.g. modified inside and
# outside a control flow statement) should be more robustly detected and
# analyzed.
# TODO(mdan): Look into using Python AST's type annotation fields instead.
# It would be desirable to use that mechanism if we can.
# Some caveats to consider: We may need to annotate other nodes like
# Attribute. It may also not be feasible for us to faithfully to replicate
# PY3's type annotations where it isn't available. It would also require us
# to design rigorous type definitions that can accommodate Python types
# as well as TensorFLow dtypes and shapes.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.util import tf_inspect
# TODO(mdan): Remove the duplication between this and activity.py.
# In particular, the symbol definitions we track here could as well be tracked
# there because they follow the same rules for visibility.
# TODO(mdan): Use a CFG based Defined analysis instead.
class Scope(object):
"""Tracks symbol value references.
Attributes:
values: A dict mapping string to gast.Node, containing the value that was
most recently assigned to the symbol.
"""
def __init__(self, parent):
"""Create a new scope.
Args:
parent: A Scope or None.
"""
self.parent = parent
self.values = {}
def __repr__(self):
return 'Scope[%s]' % self.values.keys()
def copy(self):
s = Scope(self.parent)
s.values = self.values.copy()
return s
def setval(self, name, value):
self.values[name] = value
def hasval(self, name):
return (name in self.values or
(self.parent is not None and self.parent.hasval(name)))
def getval(self, name):
if name in self.values:
return self.values[name]
if self.parent is not None:
return self.parent.getval(name)
raise KeyError(name)
class TypeInfoResolver(transformer.Base):
"""Annotates symbols with type information where possible.
Nodes currently annotated:
* Call (helps detect class constructors)
* Attribute (helps resolve object methods)
"""
def __init__(self, context):
super(TypeInfoResolver, self).__init__(context)
self.scope = Scope(None)
def visit_FunctionDef(self, node):
self.scope = Scope(self.scope)
node = self.generic_visit(node)
self.scope = self.scope.parent
return node
def _visit_block(self, block):
self.scope = Scope(self.scope)
block = self.visit_block(block)
self.scope = self.scope.parent
return block
def visit_For(self, node):
self.generic_visit(node.target)
self.generic_visit(node.iter)
node.body = self._visit_block(node.body)
node.orelse = self._visit_block(node.orelse)
return node
def visit_While(self, node):
self.generic_visit(node.test)
node.body = self._visit_block(node.body)
node.orelse = self._visit_block(node.orelse)
return node
def visit_If(self, node):
self.generic_visit(node.test)
node.body = self._visit_block(node.body)
node.orelse = self._visit_block(node.orelse)
return node
def _process_function_arg(self, arg_node):
qn = anno.getanno(arg_node, anno.Basic.QN)
arg_name = str(qn)
self.scope.setval(qn, arg_node)
if (len(self.enclosing_entities) == 1 and
arg_name in self.entity_info.arg_types):
# Forge a node to hold the type information, so that method calls on
# it can resolve the type.
type_string, type_obj = self.entity_info.arg_types[arg_name]
anno.setanno(arg_node, 'type', type_obj)
anno.setanno(arg_node, 'type_fqn', tuple(type_string.split('.')))
def visit_arg(self, node):
self._process_function_arg(node.arg)
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Param):
self._process_function_arg(node)
elif isinstance(node.ctx, gast.Load):
qn = anno.getanno(node, anno.Basic.QN)
if self.scope.hasval(qn):
# E.g. if we had
# a = b
# then for future references to `a` we should have definition = `b`
definition = self.scope.getval(qn)
anno.copyanno(definition, node, 'type')
anno.copyanno(definition, node, 'type_fqn')
# TODO(mdan): Remove this when the directives module is in.
anno.copyanno(definition, node, 'element_type')
anno.copyanno(definition, node, 'element_shape')
return node
def _process_variable_assignment(self, target, value):
# Constructors
if isinstance(value, gast.Call):
func = value.func
if anno.hasanno(func, 'live_val'):
func_obj = anno.getanno(func, 'live_val')
if tf_inspect.isclass(func_obj):
anno.setanno(value, 'is_constructor', True)
anno.setanno(value, 'type', func_obj)
anno.setanno(value, 'type_fqn', anno.getanno(func, 'fqn'))
# TODO(mdan): Raise an error if constructor has side effects.
# We can have a whitelist of no-side-effects constructors.
# We can also step inside the constructor and further analyze.
if isinstance(target, (gast.Name, gast.Attribute)):
target_symbol = anno.getanno(target, anno.Basic.QN)
self.scope.setval(target_symbol, value)
elif isinstance(target, gast.Subscript):
pass
else:
raise ValueError('assignment target has unknown type: %s' % target)
def visit_With(self, node):
for item in node.items:
if item.optional_vars is not None:
ast_util.apply_to_single_assignments((item.optional_vars,),
item.context_expr,
self._process_variable_assignment)
self.generic_visit(node)
return node
def visit_Assign(self, node):
self.generic_visit(node)
ast_util.apply_to_single_assignments(node.targets, node.value,
self._process_variable_assignment)
return node
def resolve(node, context):
return TypeInfoResolver(context).visit(node)
|
dongjoon-hyun/tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/type_info.py
|
Python
|
apache-2.0
| 7,393
|
[
"VisIt"
] |
5c6660bd2b66fcf71a7d975e95caeda9f1a45e1ec3661a63e9d378493618f13e
|
"""
INFO
-------------------------------------------------------------------------------------------
Ingest model script uploads the respective model of each estimation method.
INPUT:
-i, --input : Absolute path of model_template zip
-m, --method : Estimation method (e.g shallow_ae_km2)
-ht, --html : String that presents the estimation method to the end user
(e.g Shallow autoencoder clustering (km2) )
Example run:
python ingest_model.py -i /pilot_data/<model_template.zip> -m
"<clustering/classification method>" -ht "<html_repr>"
-------------------------------------------------------------------------------------------
"""
import json
import requests
from operator import attrgetter
from argparse import ArgumentParser
import os
import psycopg2
import datetime
import getpass
from pywebhdfs.webhdfs import PyWebHdfsClient
import gzip,cPickle
if __name__ == '__main__':
parser = ArgumentParser(description='Extract variables from netcdf file')
parser.add_argument('-i', '--input', required=True, type=str,
help='model path')
parser.add_argument('-m', '--method', required=True, type=str,
help='clustering method')
parser.add_argument('-ht', '--html', required=True, type=str,
help='html description')
opts = parser.parse_args()
getter = attrgetter('input','method','html')
inp,method,html = getter(opts)
# Get list of uploaded models
req = requests.get('http://namenode:50070/webhdfs/v1/sc5/models?op=LISTSTATUS')
resp = req.json()
fl = resp['FileStatuses']['FileStatus']
hdfs_list = []
for file in fl:
hdfs_list.append(file['pathSuffix'])
# Load db info
with open('db_info.json','r') as data_file:
dbpar = json.load(data_file)
# Request db password
dpass = getpass.getpass()
# Connect to db
conn = psycopg2.connect("dbname='" + dbpar['dbname'] + "' user='" + dbpar['user'] +
"' host='" + dbpar['host'] + "' port='" + dbpar['port'] + "'password='" + dpass + "'")
cur = conn.cursor()
# Init webhdfs client
hdfs = PyWebHdfsClient(host='namenode', port='50070')
# If model is not already uploaded, then upload
if inp not in hdfs_list:
print inp
hpath = inp.split('/')
hpath = hpath[len(hpath)-1]
# Upload to hdfs
hdfs.create_file('/sc5/models/'+hpath, open(inp,'rb'))
path = "http://namenode:50070/webhdfs/v1/sc5/models/"+hpath+"?op=OPEN"
# Insert to db
cur.execute("INSERT INTO models(origin,filename,hdfs_path,html) VALUES(\'"+method+"\',\'"+hpath+"\',\'"+path+"\',\'"+html+"\')")
# Commit changes
conn.commit()
cur.close()
conn.close()
|
iaklampanos/bde-pilot-2
|
data_ingest/ingest_model.py
|
Python
|
apache-2.0
| 2,793
|
[
"NetCDF"
] |
0bfe5529ca5a20f4620e81729ac17e4f72104072fcd8e73fc0a689b3ad574578
|
import numpy as np
from .shared import StaticContainerStore, StaticContainer
import mdtraj
from openpathsampling.netcdfplus import WeakLRUCache
variables = ['statics']
lazy = ['statics']
storables = ['statics']
dimensions = ['n_atoms', 'n_spatial']
def netcdfplus_init(store):
static_store = StaticContainerStore()
static_store.set_caching(WeakLRUCache(10000))
name = store.prefix + 'statics'
static_store.set_dimension_prefix_store(store)
store.storage.create_store(name, static_store, False)
store.create_variable(
'statics',
'lazyobj.' + name,
description="the snapshot index (0..n_configuration-1) of "
"snapshot '{idx}'.")
@property
def coordinates(snapshot):
"""
Returns
-------
coordinates: numpy.ndarray, shape=(atoms, 3), dtype=numpy.float32
the atomic coordinates of the configuration. The coordinates are
wrapped in a `simtk.unit.Unit`.
"""
if snapshot.statics is not None:
return snapshot.statics.coordinates
return None
@coordinates.setter
def coordinates(self, value):
if value is not None:
sc = StaticContainer(coordinates=value, box_vectors=self.box_vectors)
else:
sc = None
self.statics = sc
@property
def box_vectors(snapshot):
"""
Returns
-------
box_vectors: numpy.ndarray, shape=(3, 3), dtype=numpy.float32
the box_vectors of the configuration. The coordinates are wrapped in a
simtk.unit.Unit.
"""
if snapshot.statics is not None:
return snapshot.statics.box_vectors
return None
@box_vectors.setter
def box_vectors(self, value):
if value is not None:
sc = StaticContainer(box_vectors=value, coordinates=self.coordinates)
else:
sc = None
self.statics = sc
@property
def md(snapshot):
"""
Returns
-------
md : mdtraj.Trajectory
the actual trajectory object. Can be used with all functions from mdtraj
Notes
-----
Rather slow since the topology has to be made each time. Try to avoid it
"""
if snapshot.statics is not None:
n_atoms = snapshot.coordinates.shape[0]
output = np.zeros([1, n_atoms, 3], np.float32)
output[0, :, :] = snapshot.coordinates
return mdtraj.Trajectory(output, snapshot.topology.mdtraj)
@property
def xyz(snapshot):
"""
Returns
-------
xyz : numpy.ndarray, shape=(atoms, 3), dtype=numpy.float32
atomic coordinates without dimensions. Be careful.
"""
import simtk.unit as u
coord = snapshot.coordinates
if type(coord) is u.Quantity:
return coord._value
else:
return coord
|
jhprinz/openpathsampling
|
openpathsampling/engines/features/statics.py
|
Python
|
lgpl-2.1
| 2,708
|
[
"MDTraj"
] |
6700509fecee92372273f537a0132c34d6db38681d29c21fc7fa204613898305
|
from ase.atoms import Atoms
from multiasecalc.lammps.reaxff import ReaxFF
from multiasecalc.lammps.compass import COMPASS
from multiasecalc.lammps.dynamics import LAMMPSOptimizer, LAMMPS_NVT
from multiasecalc.utils import get_datafile
from ase.data import s22
from ase import units
import numpy as np
import ase.io
from ase.io.trajectory import PickleTrajectory
atoms = s22.create_s22_system('Methane_dimer')
atoms.center(vacuum=10.0)
print atoms.positions
atoms.calc = COMPASS(ff_file_path=get_datafile('compass.frc'), debug=True)
optimizer = LAMMPSOptimizer(atoms)
optimizer.run()
print atoms.positions
atoms.calc = ReaxFF(ff_file_path=get_datafile('ffield.reax'), debug=True)
dyn = LAMMPS_NVT(atoms, 1*units.fs, 100, trajectory='test.traj', traj_interval = 2)
dyn.run(5)
atoms.calc = COMPASS(ff_file_path=get_datafile('compass.frc'), debug=True)
dyn.run(10)
trj = PickleTrajectory('test.traj', 'r')
for t in trj: print t.positions
|
csmm/multiase
|
tests/lammps/test_dynamics.py
|
Python
|
gpl-2.0
| 940
|
[
"ASE",
"LAMMPS"
] |
0b5f7f8ee488af7605a1040205dcd6a4ed575460b883179cfbb9036effd62219
|
'''
Created on Apr 2, 2014
Utilities and methods to control rapsbery's GPIO pins
@author: theoklitos
'''
import sys
import time
using_real_pi = True
try:
import RPi.GPIO as GPIO # @UnusedImport
except (RuntimeError, ImportError) as e:
print 'Dependency error: ' + str(e)
print 'Cannot access GPIO pins. You either don\'t have RPi installed or are not using a raspberry-pi.\nIf you are using a raspberry-pi, visit https://pypi.python.org/pypi/RPi.GPIO\nFor now, hardware dummy hardware mode will be enabled.'
using_real_pi = False
import util.dummy_GPIO as GPIO # @Reimport
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
def output_pin(pin_number, state):
""" will set the (boolean) state of the given pin_number # in GPIO.BOARD mode. Throws ValueError """
pin_number_int = int(pin_number)
GPIO.setup(pin_number_int, GPIO.OUT)
GPIO.output(pin_number_int, state)
#print '[Setting GPIO.output pin #' + str(pin_number_int) + " with state: " + str(state) + ']'
def output_pin_for_time(pin_number, state, seconds):
""" will set the (boolean) state of the given pin_number # in GPIO.BOARD mode.
After given seconds have passed, will flip the state around. Throws ValueError. """
output_pin(pin_number, state)
time.sleep(seconds)
GPIO.output(int(pin_number), not state)
def cleanup():
""" will call the RPi.GPIO library to cleanup all the hardware pin_number states """
GPIO.cleanup()
# this module can also be used for some quick GPIO pin_number testing
if __name__ == "__main__":
number_of_arguments = len(sys.argv)
if number_of_arguments == 1: # no args, check all pins
for pin_number in range(1, 27):
output_pin_for_time(pin_number, True, 2)
elif (number_of_arguments >= 1) & (number_of_arguments < 4): # correct args number
pin_number = sys.argv[1]
if sys.argv[2] in ['true', 'True', 'yes', 'Yes', 'on', 'On']:
state = True
elif sys.argv[2] in ['false', 'False', 'no', 'No', 'off', 'Off']:
statte = False
else:
sys.exit('Boolean pin_number state ' + sys.argv[2] + ' not understood.')
if(number_of_arguments == 3):
seconds = sys.argv[3]
output_pin_for_time(pin_number, state, seconds)
else:
output_pin(pin_number, state)
cleanup()
else:
sys.exit('Wrong number of parameters, format should be [pin_number] [state] followed by an optional [time]')
|
Theoklitos/chestfreezer
|
chestfreezer-backend/hardware/chestfreezer_gpio.py
|
Python
|
mit
| 2,608
|
[
"VisIt"
] |
439ed1a77bd6e28409b31962efded729c100ea35ab3d31282e01cd1755ac9645
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division
from __future__ import print_function
from past.builtins import cmp
from builtins import map
from builtins import str
from builtins import range
from past.utils import old_div
import argparse
from prettytable import PrettyTable
import os
from mako.template import Template
from mako.lookup import TemplateLookup
import datetime
import orgparse
import hashlib
import re
import codecs
import json
import tempfile
import time
import subprocess
import shlex
from dulwich.repo import Repo as DRepo
import gc
from couchdb import *
from pg import get_new_idx,get_tags
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
dhtmlgantt_dfmt = '%d-%m-%Y %H:%M'
iso_dfmt='%Y-%m-%dT%H:%M:%S'
P = init_conn()
def gantt_info_row(grow,excl=('status','created_at','summary','parent_id','tid','assignee')):
gantt = dict([(k,v) for k,v in list(grow.items()) if k not in excl])
l = [x for x in [gantt['t_f'],gantt['c_f']] if x is not None]
u = [x for x in [gantt['t_l'],gantt['c_l']] if x is not None]
task_activity_frame=[len(l) and min(l) or None,
len(u) and max(u) or None]
if grow.get('sdo'):
task_activity_frame[0]=datetime.datetime.strptime(grow.get('sdo'),iso_dfmt).date()
if grow.get('edo'):
task_activity_frame[1]=datetime.datetime.strptime(grow.get('edo'),iso_dfmt).date()
# (as a percentage:)
thrs = gantt['t'] and (gantt['t'].days + old_div(float(gantt['t'].seconds),86400)) or 0
wehrs = gantt['we'] and (gantt['we'].days + old_div(float(gantt['we'].seconds),86400)) or None
if wehrs:
complete_estimate = old_div(thrs, wehrs)
else:
complete_estimate = None
gantt['ce'] = complete_estimate
gantt['taf']=task_activity_frame
# this is where we guess delivery date
#if task_activity_frame[0] and not task_activity_frame[1]: raise Exception(gantt)
# if task is done, then duration is the de-facto frame
if grow['status'] in ('DONE','CANCELLED',):
if len([x for x in task_activity_frame if x is not None])>1:
today = datetime.datetime.now().date()
if task_activity_frame[1]>today:
duration = (today-task_activity_frame[0]).days
dt='Db'
else:
duration = (task_activity_frame[1]-task_activity_frame[0]).days
dt='D'
else:
duration = None
dt='DN'
# otherwise, duration is an estimation of when it's going to be complete based on its progress
else:
if complete_estimate:
dursofar = datetime.datetime.now().date() - task_activity_frame[0]
estimate = old_div(float(dursofar.days), complete_estimate)
duration = int(estimate)
dt='E'
#raise Exception('going to estimate ',r['tid'],' based on ',gr,dursofar,estimate)
# if we have no work estimate we cannot make a completionestimation
else:
duration = None
dt='EN'
gantt['dt']=dt
gantt['dur'] = duration
gantt['s']=True
return gantt
def gantt_info(C,tid):
gantt_labels = {'we':'work estimate',
's':'show in gantt',
'dt':'duration estimate type',
'dur':'duration estimate',
't_l':'tracked last',
't_f':'tracked first',
'c_f':'commited first',
'c_l':'committed last',
't':'tracked',
'c':'lines added',
'finish_date':'finish date estimate',
'ce':'completion estimate',
'taf':'task activity frame',
'sdo':'activity start override',
'edo':'activity end override'}
C.execute("select * from gantt where tid=%s",(tid,))
grow = C.fetchone()
if grow:
return gantt_labels,gantt_info_row(grow)
else:
return gantt_labels,{'s':False}
def org_render(ins):
proc = subprocess.Popen([os.path.join(os.path.dirname(__file__),'orgmode-render.php')],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
inss = ins.encode('utf-8')
ops = proc.communicate(input=inss)[0]
return ops.decode('utf-8')
def gso(cmd,close_fds=True,shell=False,executable=None):
if type(cmd)==list:
spl = cmd
else:
spl = shlex.split(cmd) #; spl[0]+='sadf'
p = subprocess.Popen(spl,
stdout=subprocess.PIPE,
close_fds=close_fds,
shell=shell,
executable=executable)
out, err = p.communicate()
return p.returncode,out
def load_templates():
if not os.path.exists(cfg.MAKO_DIR): os.mkdir(cfg.MAKO_DIR)
_prefix = os.path.dirname(__file__)
if cfg.TPLDIR:
tpldir = cfg.TPLDIR
else:
tpldir = os.path.join(_prefix,'templates')
lk = TemplateLookup(directories=['.'])
rt = {}
taskfn = os.path.join(tpldir,'task.org')
assert os.path.exists(taskfn),"%s does not exist ; am in %s"%(taskfn,os.getcwd())
rt['task'] = Template(filename=(taskfn),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['iterations'] = Template(filename=os.path.join(tpldir,'iterations.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['tasks'] = Template(filename=os.path.join(tpldir,'tasks.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['taskindex'] = Template(filename=os.path.join(tpldir,'taskindex.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['iteration'] = Template(filename=os.path.join(tpldir,'iteration.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['new_story_notify'] = Template(filename=os.path.join(tpldir,'new_story_notify.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['change_notify'] = Template(filename=os.path.join(tpldir,'change_notify.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['changes'] = Template(filename=os.path.join(tpldir,'changes.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
rt['demo'] = Template(filename=os.path.join(tpldir,'demo.org'),lookup = lk,module_directory=cfg.MAKO_DIR)
return rt
ckre = re.compile('^'+re.escape('<!-- checksum:')+'([\d\w]{32})'+re.escape(' -->'))
def md5(fn):
st,op = gso('md5sum %s'%fn); assert st==0
op = op.split(' ')
return op[0]
def loadcommits():
global commits
if not len(commits):
if not os.path.exists(commitsfn):
commits={}
else:
commits = json.load(open(commitsfn,'r'))
return commits
tpls={}
def render(tplname,params,outfile=None,mode='w'):
"""helper to renders one of the mako templates defined above"""
global tpls
if not len(tpls):
tpls = load_templates()
t = tpls[tplname]
for par,val in list(params.items()):
try:
if type(val)==str:
val = str(val.decode('utf-8'))
params[par]=val
except:
print(val)
raise
r= t.render(**params)
if outfile:
#print 'working %s'%outfile; print params
fp = codecs.open(outfile,mode,encoding='utf-8') ; fp.write(r) ; fp.close()
#print 'written %s %s'%(tplname,pfn(outfile))
return True
return r
def purge_task(task,force=False):
t = get_task(task)
dn = os.path.dirname(t['path'])
assert os.path.isdir(dn)
ch = get_children(task)
if len(ch) and not force:
raise Exception('will not purge task with children unless --force is used.')
st,op = gso('rm -rf %s'%dn) ; assert st==0
return True
def pfn(fn):
if cfg.CONSOLE_FRIENDLY_FILES:
return 'file://%s'%os.path.abspath(fn)
else:
return fn
linkre = re.compile(re.escape('[[')+'([^\]]+)'+re.escape('][')+'([^\]]+)'+re.escape(']]'))
tokre = re.compile('^\- ([^\:]+)')
stokre = re.compile('^ \- (.+)')
date_formats = ['%Y-%m-%d %a','%Y-%m-%d','%Y-%m-%d %a %H:%M']
def parse_attrs(node,pth,no_tokagg=False):
try:
rt= dict([a[2:].split(' :: ') for a in node.split('\n') if a.startswith('- ') and ' :: ' in a])
tokagg={}
intok=False
for ln in node.split('\n'):
tokres = tokre.search(ln)
if not tokres:
if intok:
stok = stokre.search(ln)
if stok:
stok = stok.group(1)
res = linkre.search(stok)
if res:
url,anchor = res.groups()
tokagg[tok].append({'url':url,'anchor':anchor})
else:
tokagg[tok].append(stok)
else:
raise Exception('wtf is %s (under %s) parsing %s'%(ln,tok,pth))
else:
tok = tokres.group(1)
tokagg[tok]=[]
if tok in ['informed','links','repobranch']:
intok=True
except:
print(node.split('\n'))
raise
for k,v in list(rt.items()):
if k.endswith('date'):
for frm in date_formats:
try:
rt[k]=datetime.datetime.strptime(v.strip('<>[]'),frm)
break
except ValueError:
pass
if k in ['created_at']:
dt = v.strip('<>[]').split('.')
rt[k]=datetime.datetime.strptime(dt[0],'%Y-%m-%d %H:%M:%S')
if len(dt)>1:
rt[k]+=datetime.timedelta(microseconds=int(dt[1]))
if not no_tokagg:
for ta,tv in list(tokagg.items()):
rt[ta]=tv
return rt
taskfiles_cache={}
def flush_taskfiles_cache():
global taskfiles_cache
taskfiles_cache={}
def filterby(fieldname,value,rtl):
raise Exception('filterby',fieldname,value,rtl)
if fieldname in ['tagged']:
values = value.split(',')
else:
values = [value]
while len(values):
value = values.pop()
adir = os.path.join(cfg.DATADIR,fieldname,value)
fcmd = 'find %s -type l -exec basename {} \;'%adir
print(fcmd)
st,op = gso(fcmd) ; assert st==0,fcmd
atids = [atid.replace('.','/') for atid in op.split('\n')]
afiles = [os.path.join(cfg.DATADIR,atid,'task.org') for atid in atids]
rf = set(rtl).intersection(set(afiles))
rtl = list(rf)
return rtl
def get_latest(C,tags='email',newer_than=None,limit=300):
nt = datetime.datetime.strptime(newer_than.translate({None: ':-'}), iso_dfmt)
args = []
qry = """select je.*
from
journal_entries je,
task_tags t
where
1=1"""
if tags:
qry+=" and t.tag in %s"
args.append(tuple(tags))
qry+=""" and je.tid=t.id and
je.created_at>=%s
order by je.created_at desc
limit %s"""
args+=[nt,limit]
C.execute(qry,args)
tv = C.fetchall()
trets = [(t['created_at'],
get_task(C,t['tid']),
[], #TODO: tags
t['assignee'],
t['attrs'],
0 #TODO: num of journal updates
)
for t in tv]
trets.sort(key=lambda x:x[0],reverse=True)
return trets
def intersect(*d):
sets = iter(map(set, d))
result = next(sets)
for s in sets:
result = result.intersection(s)
return result
def get_fns(C,assignee=None,created=None,handled_by=None,informed=None,status=None,tag=None,recurse=True,query=None,newer_than=None,tids=None,recent=False):
"""return task filenames according to provided criteria"""
#raise Exception(assignee,created,informed)
qry = "select * from tasks t,tasks_pri_comb_lean p where t.id=p.id"
conds=[]
trets=[]
cnd=""
if assignee:
cnd+=" and contents->>'assignee'=%s"
conds.append(assignee)
if informed:
cnd+=" and contents->>'informed'=%s"
conds.append(informed)
if handled_by:
cnd+=" and contents->>'handled_by'=%s"
conds.append(handled_by)
if created:
cnd+=" and contents->>'creator'=%s"
conds.append(created)
if status:
cnd+=" and contents->>'status'=%s"
conds.append(status)
if tag:
cnd+=""" and contents->'tags' @> '"%s"'"""
conds.append(tag)
if tids:
cnd+=" and t.id in %s"
conds.append(tuple(tids))
if query:
qitems = [q.strip().lower() for q in query.split(' ') if len(q.strip())]
cndps = ['contents::text ilike %s' for q in qitems]
cndvs = ['%'+q+'%' for q in qitems]
cnd+=" and (%s)"%" or ".join(cndps)
for q in cndvs:
conds.append(q)
if recent:
newer_than=14
if newer_than:
cnd+=" and (contents->>'created_at')::timestamp>=now()-interval '%s day'"
conds.append(newer_than)
if not recurse:
cnd+= ' and parent_id is null'
print('QUERYING',qry+cnd,conds)
C.execute(qry+cnd,conds)
its = C.fetchall()
return its
# if len(trets)==1:
# its = dict([(t._id,t) for t in trets[0]])
# elif len(its):
# print(trets)
# raise NotImplementedError('need intersection between all results here')
# # ids = tuple(its.keys())
# # if len(ids):
# # priqry = "select id,comb_pri from tasks_pri_comb where id in %s"
# # C.execute(priqry,(ids,))
# # pris = C.fetchall()
# # for pri in pris:
# # its[pri['id']].pri = pri['comb_pri']
# # for k,v in list(its.items()):
# # if not hasattr(v,'pri'):
# # its[k].pri = 0
# return list(its.values())
def get_parent(tid,tl=False):
spl = tid.split('/')
if len(spl)>1:
if tl:
return spl[0]
else:
return spl[-2]
else:
return spl[0]
def status_srt(s1,s2):
cst = dict([(cfg.STATUSES[i],i) for i in range(len(cfg.STATUSES))])
return cmp(cst[s1[1]['status']],cst[s2[1]['status']])
def taskid_srt(s1,s2):
cst = dict([(cfg.STATUSES[i],i) for i in range(len(cfg.STATUSES))])
s1i = int(s1[1]['story'].split(cfg.STORY_SEPARATOR)[0])
s2i = int(s2[1]['story'].split(cfg.STORY_SEPARATOR)[0])
return cmp(s1i,s2i)
def hours_srt(s1,s2):
s1v = s1[1].get('total_hours',0)
s2v = s2[1].get('total_hours',0)
if not s1v and not s2v:
s1v = int(s1[1]['story'].split(cfg.STORY_SEPARATOR)[0])
s2v = int(s2[1]['story'].split(cfg.STORY_SEPARATOR)[0])
return cmp(s1v,s2v)
def hours_srt_2(h1,h2):
return cmp(h1[1]['last_tracked'],h2[1]['last_tracked'])*-1
def parse_iteration(pth):
iteration_name = os.path.basename(os.path.dirname(pth))
rt={'path':pth,'name':os.path.basename(os.path.dirname(pth))}
root = orgparse.load(pth)
for node in root[1:]:
head = node.get_heading()
if node.get_heading()=='Attributes':
attrs = parse_attrs(str(node),pth)
for k,v in list(attrs.items()): rt[k]=v
return rt
def get_table_contents(fn,force=False):
assert force==True,"get_table_contents is deprecated. everything is off to pg (%s)."%fn
ffn = os.path.join(fn)
fp = open(ffn,'r') ; gothead=False
def parseline(ln):
return [f.strip() for f in ln.split('|')][1:-1]
rt=[]
while True:
ln = fp.readline()
if not ln: break
if '|' in ln and not gothead:
headers = parseline(ln)
gothead=True
continue
if ln.startswith('|-'): continue
row = parseline(ln)
row = dict([(headers[i],row[i]) for i in range(len(row))])
rt.append(row)
#only active ones:
return rt
def get_participants(DATADIR,disabled=False,sort=False,force=False):
tconts = get_table_contents(os.path.join(DATADIR,'participants.org'),force=force)
rt={}
for row in tconts:
if disabled or row['Active']=='Y':
rt[row['Username']]=row
if sort:
rt = list(rt.items())
rt.sort(lambda r1,r2: cmp(r1[0],r2[0]))
return rt
def get_story_trans():
tconts = get_table_contents(os.path.join(cfg.DATADIR,'taskmap.org'))
rt = {}
for t in tconts:
rt[t['Task']]=t['Target']
return rt
#raise Exception(tconts)
def add_notification(whom,about,what):
send_notification(whom,about,what,how=None,justverify=True)
t = get_task(about,read=True)
if os.path.exists(t['metadata']):
meta = loadmeta(t['metadata'])
else:
meta={}
if 'notifications' not in meta: meta['notifications']=[]
meta['notifications'].append({'whom':whom,'about':about,'what':what,'added':datetime.datetime.now().isoformat()})
savemeta(t['metadata'],meta)
def parse_change(t,body,descr=True):
ch = body.get('change',[])
if u'--- /dev/null' in ch:
verb='created'
else:
verb='changed'
if descr:
app = u' - %s'%t['summary']
else:
app = u''
stchangere=re.compile('^(\-|\+)\* (%s)'%'|'.join(cfg.STATUSES))
stch = [r for r in ch if stchangere.search(r)]
canlines=0
if len(stch)==2:
sw = stchangere.search(stch[0]).group(2)
sn = stchangere.search(stch[1]).group(2)
scdigest=('%s -> %s'%(sw,sn))
app+='; %s'%scdigest
canlines+=1
asgnchangere=re.compile('^(\-|\+)'+re.escape('- assigned to :: ')+'(.+)')
asch = [r for r in ch if asgnchangere.search(r)]
if len(asch)==2:
aw = asgnchangere.search(asch[0]).group(2)
an = asgnchangere.search(asch[1]).group(2)
asdigest=('reassigned %s -> %s'%(aw,an))
app+='; %s'%asdigest
canlines+=1
laddre = re.compile('^(\+)')
laddres = [r for r in ch[4:] if not r.startswith('+++') and laddre.search(r) or False] #skipping diff header
lremre = re.compile('^(\-)')
lremres = [r for r in ch[4:] if not r.startswith('+++') and lremre.search(r) or False] #skipping diff header
if len(laddres)==len(lremres):
if canlines!=len(laddres):
app+='; %sl'%(len(laddres))
elif verb=='changed':
app+=';'
if len(laddres): app+=' +%s'%len(laddres)
if len(lremres): app+='/-%s'%len(lremres)
subject = '%s ch. by %s'%(t['story'],body.get('author_username','Uknown'))+app
return subject
def send_notification(whom,about,what,how=None,justverify=False,body={},nonotify=False):
assert cfg.RENDER_URL,"no RENDER_URL specified in config."
assert cfg.SENDER,"no sender specified in config."
p = get_participants(cfg.DATADIR)
try:
email = p[whom]['email']
except KeyError:
#print '%s not in %s'%(whom,p.keys())
return False
t= get_task(about,read=True)
tpl = what+'_notify'
tf = tempfile.NamedTemporaryFile(delete=False,suffix='.org')
#try to figure out what changed
subject = parse_change(t,body)
#construct the rendered mail template informing of the change
if what=='change':
assert cfg.GITWEB_URL
assert cfg.DOCS_REPONAME
rdt = {'t':t,'url':cfg.RENDER_URL,'recipient':p[whom],'commit':how,'gitweb':cfg.GITWEB_URL,'docsrepo':cfg.DOCS_REPONAME,'body':body}
elif what in ['new_story']:
return False
else:
raise Exception('unknown topic %s for %s'%(what,about))
notify = render(tpl,rdt,tf.name)
#print open(tf.name,'r').read() ; raise Exception('bye')
if justverify:
return False
cmd = 'emacs -batch --visit="%s" --funcall org-export-as-html-batch'%(tf.name)
st,op = gso(cmd) ; assert st==0,cmd
expname = tf.name.replace('.org','.html')
#print 'written %s'%expname
assert os.path.exists(expname)
if body and body.get('authormail'):
sender = body.get('authormail')
else:
sender = cfg.SENDER
subject_utf8 = subject.encode('utf-8')
message = MIMEMultipart('alternative')
message['subject'] = subject_utf8
message['From'] = sender
message['To'] = '%s <%s>'%(p[whom]['Name'],email)
part = MIMEText(ody,'html')
message.attach(part)
message.add_to(email,p[whom]['Name'])
if not cfg.NONOTIFY and not nonotify:
s = smtplib.SMTP(cfg.SMTP_HOST)
s.sendmail(sender,email,message.as_string())
s.quit()
return True
def add_iteration(name,start_date=None,end_date=None):
raise Exception('TODO')
itdir = os.path.join(cfg.DATADIR,name)
itfn = os.path.join(itdir,'iteration.org')
assert not os.path.exists(itdir),"%s exists."%itdir
os.mkdir(itdir)
render('iteration',{'start_date':start_date,'end_date':end_date},itfn)
def add_task(P,C,parent=None,params={},force_id=None,tags=[],user=None,fetch_stamp=None):
print('in add_task')
if parent:
if force_id:
newidx = force_id
else:
newidx = get_new_idx(C,parent)
else:
print('is a top level task')
if force_id:
#make sure we don't have it already
newidx = str(force_id)
else:
print('getting a new index')
newidx = get_new_idx(C)
fullid = newidx
if type(params)==dict:
pars = dict(params)
else:
pars = params.__dict__
if 'created_at' not in pars:
pars['created_at'] = datetime.datetime.now()
if 'creator' not in pars:
pars['creator'] = cfg.CREATOR
if 'status' not in pars:
pars['status'] = cfg.DEFAULT_STATUS
for k in ['summary','assignee','points','detail']:
if k not in pars: pars[k]=None
if pars['summary'] and type(pars['summary'])==list:
pars['summary']=' '.join(pars['summary'])
for ai in cfg.ALWAYS_INFORMED:
if ai not in pars['informed']: pars['informed'].append(ai)
pars['tags']=tags
#print 'rendering'
t = Task()
t._id = fullid
t.path = fullid.split('/')
t.journal=[]
for k in pars:
setattr(t,k,pars[k])
t.save(P,C,user=user,fetch_stamp=fetch_stamp)
return t
def makehtml(notasks=False,files=[]):
pth = cfg.DATADIR
findcmd = 'find %s ! -wholename "*orgparse*" ! -wholename "*templates*" ! -wholename "*.git*" -iname "*.org" -type f'%(pth)
st,op = gso(findcmd) ; assert st==0
if len(files):
orgfiles = files
else:
orgfiles = [fn for fn in op.split('\n') if fn!='']
cnt=0
for orgf in orgfiles:
cnt+=1
if notasks and (os.path.basename(orgf)==cfg.TASKFN or os.path.exists(os.path.join(os.path.dirname(orgf),cfg.TASKFN))):
continue
outfile = os.path.join(os.path.dirname(orgf),os.path.basename(orgf).replace('.org','.html'))
needrun=False
if os.path.exists(outfile): #emacs is darn slow.
#invalidate by checksum
st,op = gso('tail -1 %s'%outfile) ; assert st==0
res = ckre.search(op)
if res and os.path.exists(orgf):
ck = res.group(1)
md = md5(orgf)
if ck!=md:
needrun=True
else:
needrun=True
else:
needrun=True
#print('needrun %s on %s'%(needrun,outfile))
if needrun:
cmd = 'emacs -batch --visit="%s" --funcall org-export-as-html-batch'%(orgf)
st,op = gso(cmd) ; assert st==0,"%s returned %s"%(cmd,op)
print('written %s'%pfn(outfile))
if os.path.exists(orgf):
md = md5(orgf)
apnd = '\n<!-- checksum:%s -->'%(md)
fp = open(outfile,'a') ; fp.write(apnd) ; fp.close()
assert os.path.exists(outfile)
print('processed %s orgfiles.'%cnt)
def by_status(stories):
rt = {}
for s in stories:
st = s[1]['status']
if st not in rt: rt[st]=[]
rt[st].append(s)
for st in rt:
rt[st].sort(hours_srt,reverse=True)
return rt
def get_current_iteration(iterations):
raise Exception('TODO')
nw = datetime.datetime.now() ; current_iteration=None
for itp,it in iterations:
if ('start date' in it and 'end date' in it):
if (it['start date'].date()<=nw.date() and it['end date'].date()>=nw.date()):
current_iteration = (itp,it)
assert current_iteration,"no current iteration"
return current_iteration
def makeindex(C):
recent = [(tf,parse_fn(tf,read=True,gethours=True)) for tf in get_fns(C,recent=True)]
recent.sort(hours_srt,reverse=True)
assignees={}
#create the dir for shortcuts
if not os.path.exists(cfg.SDIR): os.mkdir(cfg.SDIR)
#and render its index in the shortcuts folder
idxstories = [(fn,parse_fn(fn,read=True,gethours=True)) for fn in get_fns(C,recurse=True)]
vardict = {'term':'Index','value':'','stories':by_status(idxstories),'relpath':True,'statuses':cfg.STATUSES,'statusagg':{}}
routfile= os.path.join(cfg.SDIR,'index.org')
#print 'rendering %s'%routfile
render('tasks',vardict,routfile)
#print 'walking iteration %s'%it[0]
taskfiles = get_fns(C,recurse=True)
stories = [(fn,parse_fn(fn,read=True,gethours=True)) for fn in taskfiles]
stories_by_id = dict([(st[1]['id'],st[1]) for st in stories])
stories.sort(taskid_srt,reverse=True)
#let's create symlinks for all those stories to the root folder.
for tl in stories:
tpath = tl[0]
taskid = '-'.join(tl[1]['story'].split(cfg.STORY_SEPARATOR))
spath = os.path.join(cfg.SDIR,taskid)
dpath = '/'+tl[1]['story']
ldest = os.path.join('..',os.path.dirname(tpath))
cmd = 'ln -s %s %s'%(ldest,spath)
needrun=False
if os.path.islink(spath):
ls = os.readlink(spath)
#print 'comparing %s <=> %s'%(ls,ldest)
if ls!=ldest:
os.unlink(spath)
needrun=True
#print 'needrun because neq'
else:
needrun=True
#print 'needrunq because nex %s'%(spath)
if needrun:
st,op = gso(cmd) ; assert st==0,"%s returned %s"%(cmd,st)
shallowstories = [st for st in stories if len(st[1]['story'].split(cfg.STORY_SEPARATOR))==1]
#aggregate subtask statuses
statusagg = {}
for st in stories:
#calcualte children
chids = ([sst[1]['id'] for sst in stories if sst[1]['id'].startswith(st[1]['id']) and len(sst[1]['id'])>len(st[1]['id'])])
if len(chids):
statuses = {}
for chid in chids:
sti = stories_by_id[chid]
if sti['status'] not in statuses: statuses[sti['status']]=0
statuses[sti['status']]+=1
statusagg[st[1]['id']]=statuses
vardict = {'term':'Iteration','value':it[1]['name'],'stories':by_status(shallowstories),'relpath':True,'statuses':cfg.STATUSES,'iteration':False,'statusagg':statusagg} #the index is generated only for the immediate 1-level down stories.
itidxfn = os.path.join(cfg.DATADIR,it[0],'index.org')
fp = open(itidxfn,'w') ; fp.write(open(os.path.join(cfg.DATADIR,it[0],'iteration.org')).read()) ; fp.close()
stlist = render('tasks',vardict,itidxfn,'a')
#we show an iteration index of the immediate 1 level down tasks
for st in stories:
#aggregate assignees
if st[1]['assigned to']:
asgn = st[1]['assigned to']
if asgn not in assignees: assignees[asgn]=0
assignees[asgn]+=1
#storycont = open( st[0],'r').read()
storyidxfn = os.path.join(os.path.dirname(st[0]),'index.org')
#print storyidxfn
ch = get_children(st[1]['story'])
for c in ch:
c['relpath']=os.path.dirname(c['path'].replace(os.path.dirname(st[1]['path'])+'/',''))
#print 'written story idx %s'%pfn(storyidxfn)
pars = {'children':ch,'story':st[1],'TASKFN':cfg.TASKFN,'GITWEB_URL':cfg.GITWEB_URL,'pgd':parsegitdate,'RENDER_URL':cfg.RENDER_URL}
if os.path.exists(pars['story']['metadata']):
pars['meta']=loadmeta(pars['story']['metadata'])
else:
pars['meta']=None
render('taskindex',pars,storyidxfn,'w')
fp = open(storyidxfn,'a') ; fp.write(open(st[1]['path']).read()) ; fp.close()
#print idxcont
participants = get_participants(cfg.DATADIR)
assigned_files={} ; excl=[]
for asfn in ['alltime','current']:
for assignee,storycnt in list(assignees.items()):
if assignee!=None and assignee not in participants:
if assignee not in excl:
#print 'excluding %s'%assignee
excl.append(assignee)
continue
afn = 'assigned-'+assignee+'-'+asfn+'.org'
ofn = os.path.join(cfg.DATADIR,afn)
if assignee not in assigned_files: assigned_files[assignee]={}
assigned_files[assignee][asfn]=afn
tf = get_fns(C,assignee=assignee,recurse=True)
stories = [(fn,parse_fn(fn,read=True,gethours=True,hoursonlyfor=assignee)) for fn in tf]
stories.sort(status_srt)
vardict = {'term':'Assignee','value':'%s (%s)'%(assignee,storycnt),'stories':by_status(stories),'relpath':False,'statuses':cfg.STATUSES,'statusagg':{}}
cont = render('tasks',vardict,ofn)
vardict = {
'stories':stories,
'assigned_files':assigned_files,
'assignees':assignees,
'recent_tasks':recent,
'statusagg':{}
}
idxfn = os.path.join(cfg.DATADIR,'index.org')
itlist = render('iterations',vardict,idxfn)
cfn = os.path.join(cfg.DATADIR,'changes.org')
render('changes',{'GITWEB_URL':cfg.GITWEB_URL,'DOCS_REPONAME':cfg.DOCS_REPONAME,'pfn':parse_fn},cfn)
def list_stories(C,iteration=None,assignee=None,status=None,tag=None,recent=False):
files = get_fns(C,assignee=assignee,status=status,tag=tag,recent=recent)
pt = PrettyTable(['id','summary','assigned to','status','tags'])
pt.align['summary']='l'
cnt=0
for fn in files:
sd = parse_fn(fn,read=True)
if iteration and iteration.startswith('not ') and sd['iteration']==iteration.replace('not ',''):
continue
elif iteration and not iteration.startswith('not ') and sd['iteration']!=str(iteration): continue
if len(sd['summary'])>60: summary=sd['summary'][0:60]+'..'
else: summary = sd['summary']
pt.add_row([sd['story'],summary,sd['assigned to'],sd['status'],','.join(sd.get('tags',''))])
cnt+=1
pt.sortby = 'status'
print(pt)
print('%s stories.'%cnt)
def tokenize(n):
return '%s-%s'%(n['whom'],n.get('how'))
def imp_commits(args):
print('importing commits.')
if not os.path.exists(cfg.REPO_DIR): os.mkdir(cfg.REPO_DIR)
excommits = loadcommits()
for repo in cfg.REPOSITORIES:
print('running repo %s'%repo)
repon = os.path.basename(repo).replace('.git','')
repodir = os.path.join(cfg.REPO_DIR,os.path.basename(repo))
if not os.path.exists(repodir):
print('cloning.')
cmd = 'git clone -b staging %s %s'%(repo,repodir)
st,op = gso(cmd) ; assert st==0,"%s returned %s\n%s"%(cmd,st,op)
prevdir = os.getcwd()
os.chdir(repodir)
#refresh the repo
if not args.nofetch:
print('fetching at %s.'%os.getcwd())
st,op = gso('git fetch -a') ; assert st==0,"git fetch -a returned %s\n%s"%(st,op)
print('running show-branch')
cmd = 'git show-branch -r'
st,op = gso(cmd) ; assert st==0,"%s returned %s\n%s"%(cmd,st,op)
commits=[] ; ignoredbranches=[]
for ln in op.split('\n'):
if ln=='': continue
if ln.startswith('warning:'):
if 'ignoring' not in ln:
print(ln)
else:
ign = re.compile('origin/([^;]+)').search(ln).group(1)
ignoredbranches.append(ign)
continue
if ln.startswith('------'): continue
res = commitre.search(ln)
if res:
exact = res.group(1) ; branch = exact
#strip git niceness to get branch name
for sym in ['~','^']:branch = branch.split(sym)[0]
commits.append([exact,branch,False])
else:
if not re.compile('^(\-+)$').search(ln):
print('cannot extract',ln)
#now go over the ignored branches
if len(ignoredbranches):
for ign in set(ignoredbranches):
st,op = gso('git checkout origin/%s'%(ign)); assert st==0,"checkout origin/%s inside %s returned %s\n%s"%(ign,repodir,st,op)
st,op = gso('git log --pretty=oneline --since=%s'%(datetime.datetime.now()-datetime.timedelta(days=30)).strftime('%Y-%m-%d')) ; assert st==0
for lln in op.split('\n'):
if lln=='': continue
lcid = lln.split(' ')[0]
commits.append([lcid,ign,True])
#print 'added ign %s / %s'%(lcid,ign)
cnt=0 ; branches=[]
print('going over %s commits.'%len(commits))
for relid,branch,isexact in commits:
if isexact:
cmd = 'git show %s | head'%relid
else:
cmd = 'git show origin/%s | head'%relid
st,op = gso(cmd) ; assert st==0,"%s returned %s\n%s"%(cmd,st,op)
if op.startswith('fatal'):
raise Exception('%s returned %s'%(cmd,op))
cres = cre.search(op)
dres = dre.search(op)
if not dres: raise Exception(op)
dt = dres.groups()[0]
cid = cres.group(1)
author,email = are.search(op).groups()
un = cfg.COMMITERMAP(email,author)
storyres = sre.search(op)
if storyres:
task = storyres.group(1)
else:
task = None
cinfo = {'s':dt,'br':[branch],'u':un,'t':task} #'repo':repon, 'cid':cid <-- these are out to save space
if branch not in branches: branches.append(branch)
key = '%s/%s'%(repon,cid)
if key not in excommits:
excommits[key]=cinfo
else:
if branch not in excommits[key]['br']:
excommits[key]['br'].append(branch)
cnt+=1
#print '%s: %s/%s %s by %s on task %s'%(dt,repon,branch,cid,un,task)
print('found out about %s commits, branches %s'%(cnt ,branches))
os.chdir(prevdir)
fp = open(commitsfn,'w')
json.dump(excommits,fp,indent=True,sort_keys=True) ; fp.close()
def loadmeta(fn):
if os.path.exists(fn):
return json.load(open(fn))
else:
return {}
def savemeta(fn,dt):
fp = open(fn,'w')
json.dump(dt,fp,sort_keys=True,indent=True)
fp.close()
numonths = 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec'.split('|')
redt = re.compile('^(Sun|Mon|Tue|Wed|Thu|Fri|Sat) ('+'|'.join(numonths)+') ([0-9]{1,2}) ([0-9]{2})\:([0-9]{2})\:([0-9]{2}) ([0-9]{4}) (\-|\+)([0-9]{4})')
def parsegitdate(s):
dtres = redt.search(s)
wd,mon,dy,hh,mm,ss,yy,tzsign,tzh = dtres.groups()
dt = datetime.datetime(year=int(yy),
month=int(numonths.index(mon)+1),
day=int(dy),
hour=int(hh),
minute=int(mm),
second=int(ss))
return dt
def assign_commits(C):
exc = json.load(open(commitsfn,'r'))
metas={}
print('going over commits.')
for ck,ci in list(exc.items()):
#HEAD actually means staging in our book.
branches = [cibr.replace('HEAD','staging') for cibr in ci['br']]
#check if commit is on staging and exclude from other branches if so
if 'staging' in branches: branches=['staging']
if not ci['t']: continue
repo,cid = ck.split('/')
t = get_task(C,ci['t'],exc=False)
if not t:
strans = get_story_trans()
if ci['t'] in strans:
print('translating %s => %s'%(ci['t'],strans[ci['t']]))
if strans[ci['t']]=='None':
continue
t = get_task(C,strans[ci['t']])
else:
print('could not find task %s, which was referenced in %s: %s'%(ci['t'],ck,ci))
continue
#metadata cache
if t['metadata'] not in metas:
m = loadmeta(t['metadata'])
metas[t['metadata']]=m
m['commits_qty']=0 #we zero it once upon load to be incremented subsequently
else: m = metas[t['metadata']]
dt = parsegitdate(ci['s'])
m['commits_qty']+=1
if not m.get('last_commit') or dt>=parsegitdate(m['last_commit']): m['last_commit']=ci['s']
repocommiter = '-'.join([repo,ci['u']])
if 'commiters' not in m: m['commiters']=[]
if repocommiter not in m['commiters']: m['commiters'].append(repocommiter)
for cibr in branches:
repobranch = '/'.join([repo,cibr])
if 'branches' not in m: m['branches']=[]
if repobranch not in m['branches']: m['branches'].append(repobranch)
lastdatekey = '%s-%s'%(repo,ci['u'])
if 'lastcommits' not in m: m['lastcommits']={}
if lastdatekey not in m['lastcommits'] or parsegitdate(m['lastcommits'][lastdatekey])<dt:
m['lastcommits'][lastdatekey]=ci['s']
for cibr in branches:
lastbranchkey = '%s/%s'%(repo,cibr)
if 'branchlastcommits' not in m: m['branchlastcommits']={}
if lastbranchkey not in m['branchlastcommits'] or parsegitdate(m['branchlastcommits'][lastbranchkey])<dt:
m['branchlastcommits'][lastbranchkey]=ci['s']
print('saving.')
for fn,m in list(metas.items()):
savemeta(fn,m)
print('%s metas touched.'%(len(metas)))
def tasks_validate(C,tasks=None,catch=True,amend=False,checkhours=True,checkreponames=True):
cnt=0 ; failed=0
tasks = [t for t in tasks if t!=None]
p = get_participants(cfg.DATADIR,disabled=True)
firstbad=None
if tasks:
tfs = [get_task(C,taskid)['path'] for taskid in tasks]
else:
tfs = get_fns(C)
for tf in tfs:
try:
t = parse_fn(tf)
if checkreponames and t.get('meta') and t['meta'].get('branchlastcommits'):
for blc in t['meta'].get('branchlastcommits'):
try:
assert '/' in blc,"%s has no /"%(blc)
assert len(blc.split('/'))<=2,"%s has too many /"%(blc)
assert 'HEAD' not in blc,"%s has HEAD"%(blc)
except Exception as e:
if amend:
print('amending %s'%e)
for fn in ['lastcommits','commits_qty','branchlastcommits','commiters','last_commit','branches']:
if t['meta'].get(fn):
del t['meta'][fn]
savemeta(t['metadata'],t['meta'])
else:
raise
if t.get('meta') and t['meta'].get('commiters'):
for blc in t['meta'].get('commiters'):
br,person = blc.split('-')
assert '.' not in person,"bad commiter - %s"%person
if checkhours and t.get('person_hours'):
for person,hrs in t.get('person_hours'):
try:
assert '@' not in person,"hours in person: %s is bad"%(person)
except Exception as e:
if amend:
print('amending %s'%e)
hrsfn = t['metadata'].replace('meta.json','hours.json')
hrsm = loadmeta(hrsfn)
for hdt,items in list(hrsm.items()):
if person in items:
if not firstbad or hdt<firstbad: firstbad=hdt
hrsfn = hrsfn
assert os.path.exists(hrsfn)
savemeta(hrsfn,{})
else:
raise
assert t['summary']
assert t['assigned to']
assert t['created by']
assert t['status']
if t['assigned to'] and t['assigned to']!='None':
assert t['assigned to'] in p
if t['created by'] and t['created by']!='None':
assert t['created by'] in p
#checking for dupes
cmd = "find %s -type f -iregex '^([^\]+)/%s$'"%(cfg.DATADIR,'/'.join([t['id'],'task.org']))
st,op = gso(cmd) ; assert st==0
dfiles = op.split('\n')
assert len(dfiles)==1,"%s is not 1 for %s"%(dfiles,cmd)
#print '%s : %s , %s , %s, %s'%(t['id'],t['summary'] if len(t['summary'])<40 else t['summary'][0:40]+'..',t['assigned to'],t['created by'],t['status'])
cnt+=1
except Exception as e:
if not catch: raise
print('failed validation for %s - %s'%(tf,e))
failed+=1
print('%s tasks in all; %s failed; firstbad=%s'%(cnt,failed,firstbad))
return failed
def addlink(C,tsaves,tid,r):
if tid not in tsaves: tsaves[tid]=get_task(C,tid)
assert r>tid,"%s>%s ?"%(r,tid)
t = tsaves[tid]
if 'cross_links' not in t.__dict__:
t.cross_links=[]
if r not in t.cross_links:
tcheck = get_task(C,r)
print('task.%s -> +%s'%(t._id,r))
t.cross_links.append(r)
return True
return False
def rmlink(C,tsaves,tid,r):
if tid not in tsaves: tsaves[tid]=get_task(C,tid)
t = tsaves[tid]
if t['cross_links'] and r in t.cross_links:
print('task.%s -> -%s'%(t._id,r))
t.cross_links.remove(r)
return True
return False
def get_karma_receivers(C):
karma={}
C.execute("select id,dt,reciever,sum(points) points from karma group by id,dt,reciever order by dt desc")
return C.fetchall()
def get_karma(C,date,user):
C.execute("select * from karma where reciever=%s and dt=%s",(user,date))
return C.fetchall()
def deps_validate(C,tsaves,tid,deps):
print(('deps_validate',tid,deps))
t = tsaves[tid]
# avoid adding task itself as its own dependency
avoid=[tid]
# avoid circular dependencies
C.execute("select tid from tasks_deps_hierarchy where depid=%s",(tid,))
depids = [d['tid'] for d in C.fetchall()]
avoid+=depids
# make sure they indeed are valid tasks
for d in deps:
try:
Task.get(C,d)
except IndexError:
avoid.append(d)
# clean the disallowed deps
for av in avoid:
if str(av) in deps:
deps.remove(av)
t.dependencies.remove(av)
# unique the remainging list
t.dependencies = list(set(t.dependencies))
deps = list(set(deps))
#print(tid,'just retained deps',t.dependencies)
return deps
def rewrite(P,C,tid,o_params={},safe=True,user=None,fetch_stamp=None):
tsaves={} #this dict contains all couchdb task objects we're working with
tsaves[tid] = get_task(C,tid)
assert tid
#print 'working %s'%tid
clinks = list(set(o_params['cross_links']))
deps = list(set(o_params['dependencies']))
t = tsaves[tid]
#raise Exception('clr',o_params['cross_links_raw'])
e = [ce.split("-") for ce in o_params['cross_links_raw'].split(",") if ce!='']
#remove previous cross links
for cxa in e:
cxas = sorted(cxa)
assert len(cxas)==2,"%s wrong length"%cxas
rmlink(C,tsaves,cxas[0],cxas[1])
#because cross links are bidirectional, we want to always set them on the lower of the pair,
#the view Task/crosslinks allows us to see the crosslink from both its ends
clpairs = [sorted([tid,cli]) for cli in clinks]
for clk,cld in clpairs:
addlink(C,tsaves,clk,cld)
params = {
'status':t['status'],
'summary':t['summary'],
# 'created_at':t['created_at'], # DO NOT TOUCH CREATED AT
# 'creator':t['creator'], # DO NOT TOUCH CREATOR!
'tags':t['tags'],
'assignee':t['assignee'],
'dependencies':hasattr(t,'dependencies') and t.dependencies or [],
#'points':t.get('points','?'),
'informed':hasattr(t,'informed') and t.informed or [],
'links':t.links,
'unstructured':t.unstructured.strip(),
'branches':t.branches,
'external_id':hasattr(t,'external_id') and t.external_id or None,
'external_thread_id':hasattr(t,'external_thread_id') and t.external_thread_id or None,
'external_msg_id':hasattr(t,'external_msg_id') and t.external_msg_id or None,
}
for k,v in list(o_params.items()):
if k in ['cross_links','cross_links_raw','created_at','creator']: continue
if k not in ['karma','orig_subj','handled_by']: assert k in params,"%s not in %s"%(k,params)
params[k]=v
for k,v in list(params.items()):
if 'cross_links' in k: continue
if k not in ['informed','external_id','external_thread_id','external_msg_id','karma','orig_subj','handled_by','dependencies']:
assert hasattr(t,k),"task does not have %s"%k
if k=='dependencies': v=list(v)
#print('setattr(',t,k,v,')',type(v))
setattr(t,k,v)
deps = deps_validate(C,tsaves,tid,deps)
#print('deps of tsaves',tid,'is ',tsaves[tid].dependencies)
for tk,ts in list(tsaves.items()):
print(ts,'save(user=%s)'%user)
ts.save(P,C,user=user,fetch_stamp=tk==tid and fetch_stamp or None)
def make_demo(iteration,tree=False,orgmode=False):
from tree import Tree
tf = [parse_fn(tf) for tf in get_fns(C,iteration=iteration,recurse=True)]
def tf_srt(s1,s2):
rt=cmp(len(s1['id'].split(cfg.STORY_SEPARATOR)),len(s2['id'].split(cfg.STORY_SEPARATOR)))
if rt!=0: return rt
return 0
tf.sort(tf_srt)
tr = {'children':{}}
tr2 = Tree('Iteration: '+iteration)
for s in tf:
spointer = tr
spointer2 = tr2
parts = s['id'].split(cfg.STORY_SEPARATOR)
#print 'walking parts %s'%parts
initparts = list(parts)
joinedparts=[]
while len(parts):
prt = parts.pop(0)
joinedparts.append(prt)
tsk = get_task(cfg.STORY_SEPARATOR.join(joinedparts))
tags = (tsk['assigned to'],)+tuple(tsk['tags'])
summary = (tsk['summary'] if len(tsk['summary'])<80 else tsk['summary'][0:80]+'..')
if 'priority' in tsk['tags']: summary='_%s_'%summary
tname = ('[[file:%s][%s]]'%(tsk['path'],prt) if orgmode else prt)+' '+tsk['status']+'\t'+summary+('\t\t:%s:'%(':'.join(tags)) if len(tags) else '')
tpt = Tree(tname)
if prt not in spointer['children']:
spointer['children'][prt]={'children':{}}
spointer2.children = spointer2.children+(tpt,)
spointer=spointer['children'][prt]
fnd=False
for ch in spointer2.children:
if ch.name==tname:
spointer2=ch
fnd=True
assert fnd,"could not find \"%s\" in %s, initparts are %s"%(tname,[ch.name for ch in spointer2.children],initparts)
spointer['item']={'summary':s['summary'],'assignee':s['assigned to'],'status':s['status'],'id':s['id']}
if tree:
print(str(tr2))
else:
render('demo',{'trs':tr,'iteration':iteration,'rurl':cfg.RENDER_URL},'demo-%s.org'%iteration)
def index_assigned(C,tid=None,dirname='assigned',idxfield='assigned to'):
asgndir = os.path.join(cfg.DATADIR,dirname)
if tid:
st,op = gso('find %s -type l -iname %s -exec rm {} \;'%(asgndir,tid.replace('/','.'))) ; assert st==0
tfs = [get_task(tid)['path']]
else:
tfs = get_fns(C)
st,op = gso('rm -rf %s/*'%(asgndir)) ; assert st==0
assert os.path.exists(asgndir),"%s does not exist"%asgndir
print('reindexing %s task files'%(len(tfs)))
acnt=0
for fn in tfs:
#print 'parsing %s'%fn
pfn = parse_fn(fn,read=False,getmeta=False)
#print 'parsed %s ; getting task'%pfn['id']
t = get_task(pfn['id'],read=True)
#print t['id'],t['assigned to']
if type(t[idxfield]) in [str,str]:
myidxs=[t[idxfield]]
else:
myidxs=t[idxfield]
for myidx in myidxs:
blpath = os.path.join(asgndir,myidx)
if not os.path.exists(blpath):
os.mkdir(blpath)
assert os.path.exists(blpath)
# st,op = gso('mkdir %s'%blpath) ; assert st==0
acnt+=1
tpath = os.path.join(blpath,t['id'].replace('/','.'))
lncmd = 'ln -s %s %s'%(fn,tpath)
#print lncmd
if not os.path.exists(tpath):
os.symlink(fn,tpath)
#st,op = gso(lncmd) ; assert st==0,lncmd
assert os.path.exists(tpath)
print('indexed under %s %s'%(acnt,idxfield))
def index_tasks(tid=None,reindex_attr=None):
dnf = {'creators':'created by',
'assigned':'assigned to',
'tagged':'tags'}
if reindex_attr: assert reindex_attr in list(dnf.keys())
for dn,attr_name in list(dnf.items()):
if reindex_attr and reindex_attr!=dn: continue
print('reindexing %s (tid %s)'%(dn,tid))
fdn = os.path.join(cfg.DATADIR,dn)
st,op = gso('rm -rf %s/*'%fdn); assert st==0
index_assigned(tid,dn,attr_name)
def initvars(cfg_ref):
global commits,commitsfn,commitre,cre,are,sre,dre,cfg
cfg=cfg_ref
commits = {}
commitsfn = os.path.join(cfg.DATADIR,'commits.json')
commitre = re.compile('\[origin\/([^\]]+)\]')
cre = re.compile('commit ([0-9a-f]{40})')
are = re.compile('Author: ([^<]*) <([^>]+)>')
sre = re.compile('#([0-9'+re.escape(cfg.STORY_SEPARATOR)+']+)')
dre = re.compile('Date: (.*)')
if __name__=='__main__':
import config as cfg
initvars(cfg)
parser = argparse.ArgumentParser(description='Task Control',prog='tasks.py')
subparsers = parser.add_subparsers(dest='command')
idx = subparsers.add_parser('reindex')
idx.add_argument('--reindex-attr',dest='reindex_attr',action='store')
lst = subparsers.add_parser('list')
lst.add_argument('--assignee',dest='assignee')
lst.add_argument('--status',dest='status')
lst.add_argument('--tag',dest='tag')
lst.add_argument('--recent',dest='recent',action='store_true')
gen = subparsers.add_parser('index')
html = subparsers.add_parser('makehtml')
html.add_argument('--notasks',dest='notasks',action='store_true')
html.add_argument('files',nargs='*')
nw = subparsers.add_parser('new')
nw.add_argument('--parent',dest='parent')
nw.add_argument('--assignee',dest='assignee')
nw.add_argument('--id',dest='id')
nw.add_argument('--tag',dest='tags',action='append')
nw.add_argument('summary',nargs='+')
purge = subparsers.add_parser('purge')
purge.add_argument('tasks',nargs='+')
purge.add_argument('--force',dest='force',action='store_true')
show = subparsers.add_parser('show')
show.add_argument('tasks',nargs='+')
move = subparsers.add_parser('move')
move.add_argument('fromto',nargs='+')
ed = subparsers.add_parser('edit')
ed.add_argument('tasks',nargs='+')
pr = subparsers.add_parser('process_notifications')
pr.add_argument('--nocommit',dest='nocommit',action='store_true')
pr.add_argument('--nonotify',dest='nonotify',action='store_true')
pr.add_argument('--renotify',dest='renotify')
ch = subparsers.add_parser('changes')
ch.add_argument('--notifications',dest='notifications',action='store_true')
ch.add_argument('--feed',dest='feed',action='store_true')
git = subparsers.add_parser('fetch_commits')
git.add_argument('--nofetch',dest='nofetch',action='store_true')
git.add_argument('--import',dest='imp',action='store_true')
git.add_argument('--assign',dest='assign',action='store_true')
git = subparsers.add_parser('makedemo')
git.add_argument('--tree',dest='tree',action='store_true')
git.add_argument('--orgmode',dest='orgmode',action='store_true')
val = subparsers.add_parser('validate')
val.add_argument('--nocatch',action='store_true',default=False)
val.add_argument('--nocheckhours',action='store_true',default=False)
val.add_argument('--amend',action='store_true',default=False)
val.add_argument('tasks',nargs='?',action='append')
commit = subparsers.add_parser('commit')
commit.add_argument('--tasks',dest='tasks',action='store_true')
commit.add_argument('--metas',dest='metas',action='store_true')
commit.add_argument('--hours',dest='hours',action='store_true')
commit.add_argument('--nopush',dest='nopush',action='store_true')
tt = subparsers.add_parser('time_tracking')
tt.add_argument('--from',dest='from_date')
tt.add_argument('--to',dest='to_date')
rwr = subparsers.add_parser('rewrite')
rwr.add_argument('--safe',dest='safe',action='store_true')
rwr.add_argument('tasks',nargs='?',action='append')
args = parser.parse_args()
if args.command=='list':
list_stories(C,assignee=args.assignee,status=args.status,tag=args.tag,recent=args.recent)
if args.command=='reindex':
index_tasks(reindex_attr=args.reindex_attr)
if args.command=='index':
makeindex()
if args.command=='makehtml':
makehtml(notasks=args.notasks,files=args.files)
if args.command=='new':
task = add_task(P,C,parent=args.parent,params=args,force_id=args.id,tags=args.tags)
if args.command=='purge':
for task in args.tasks:
purge_task(task,bool(args.force))
if args.command=='show':
for task in args.tasks:
t = get_task(task)
print(t)
if args.command=='move':
tasks = args.fromto[0:-1]
dest = args.fromto[-1]
for task in tasks:
move_task(task,dest)
if args.command=='edit':
tfiles = [get_task(t)['path'] for t in args.tasks]
cmd = 'emacs '+' '.join(tfiles)
st,op=gso(cmd)
if args.command=='process_notifications':
process_notifications(args)
if args.command=='fetch_commits':
if args.imp:
imp_commits(args)
if args.assign:
assign_commits(C)
if args.command=='makedemo':
make_demo(tree=args.tree,orgmode=args.orgmode)
if args.command=='validate':
tasks_validate(C,args.tasks,catch=not args.nocatch,amend=args.amend,checkhours = not args.nocheckhours)
if args.command=='commit':
prevdir = os.getcwd()
os.chdir(cfg.DATADIR)
st,op = gso('git pull') ; assert st==0
commitm=[]
if args.tasks:
st,op = gso('git add *task.org') ; assert st==0
commitm.append('tasks commit')
if args.metas:
st,op = gso('git add *meta.json') ; assert st==0
commitm.append('metas commit')
if args.hours:
st,op = gso('git add *hours.json') ; assert st==0
commitm.append('hours commit')
st,op = gso('git status') ; assert st==0
print(op)
cmd = 'git commit -m "%s"'%("; ".join(commitm))
st,op = gso(cmd) ;
if 'no changes added to commit' in op and st==256:
print('nothing to commit')
else:
assert st==0,"%s returned %s\n%s"%(cmd,st,op)
if not args.nopush:
cmd = 'git push'
st,op = gso(cmd) ; assert st==0,"%s returned %s\n%s"%(cmd,st,op)
print('pushed to remote')
os.chdir(prevdir)
if args.command=='rewrite':
atasks = [at for at in args.tasks if at]
if not len(atasks):
tasks = [parse_fn(tf)['id'] for tf in get_fns(C)]
else:
tasks = atasks
for tid in tasks:
rewrite(P,C,tid,safe=args.safe)
if args.command=='time_tracking':
if args.from_date:from_date = datetime.datetime.strptime(args.from_date,'%Y-%m-%d').date()
else:from_date = (datetime.datetime.now()-datetime.timedelta(days=1)).date()
if args.to_date:to_date = datetime.datetime.strptime(args.to_date,'%Y-%m-%d').date()
else:to_date = (datetime.datetime.now()-datetime.timedelta(days=1)).date()
files = get_fns(C)
metafiles = [os.path.join(os.path.dirname(fn),'hours.json') for fn in files]
agg={} ; tagg={} ; sagg={} ; pagg={} ; tcache={}
maxparts=0
for mf in metafiles:
m = loadmeta(mf)
tf= parse_fn(mf)
sid = tf['story']
sparts = sid.split(cfg.STORY_SEPARATOR)
tlsid = sparts[0]
if len(sparts)>maxparts: maxparts=len(sparts)
for k in m:
mk = datetime.datetime.strptime(k,'%Y-%m-%d').date()
if mk>=from_date and mk<=to_date:
#print mk,m[k],sid
for person,hours in list(m[k].items()):
if sid not in agg:
agg[sid]={}
if tlsid not in tagg:
tagg[tlsid]={}
if tlsid not in sagg:
sagg[tlsid]={}
if person not in pagg:
pagg[person]=0
if person not in agg[sid]:
agg[sid][person]=0
if person not in tagg[tlsid]:
tagg[tlsid][person]=0
if '--' not in sagg[tlsid]:
sagg[tlsid]['--']=0
agg[sid][person]+=hours
tagg[tlsid][person]+=hours
sagg[tlsid]['--']+=hours
pagg[person]+=hours
print('* per-Participant (time tacked) view')
ptp = PrettyTable(['Person','Hours'])
ptp.sortby='Hours'
htot=0
for person,hours in list(pagg.items()):
ptp.add_row([person,hours])
htot+=hours
ptp.add_row(['TOT',htot])
print(ptp)
for smode in ['detailed','tl','sagg']:
headers = ['Summary','Person','Hours']
if smode=='detailed':
tcols = ['Task %s'%i for i in range(maxparts)] + headers
mpadd=3
cyc = list(agg.items())
print('* Detailed view')
elif smode=='tl':
tcols = ['Task 0'] + headers
mpadd=1
cyc = list(tagg.items())
print('* Top Level Task view')
elif smode=='sagg':
tcols=['Task 0']+ ['Summary','Hours']
mpadd=0
cyc = list(sagg.items())
print('* per-Task view')
pt = PrettyTable(tcols)
pt.align['Summary']='l'
hrs=0
if smode=='sagg':
pt.sortby='Hours'
for sid,people in cyc:
for person,hours in list(people.items()):
if sid not in tcache:
tcache[sid] = get_task(sid)
td = tcache[sid]
summary = td['summary'] if len(td['summary'])<60 else td['summary'][0:60]+'..'
sparts = sid.split(cfg.STORY_SEPARATOR)
while len(sparts)<maxparts:
sparts.append('')
dt = [summary,person,"%4.2f"%hours]
if smode =='detailed':
dt=sparts+dt
elif smode=='tl':
dt=[sparts[0]]+dt
elif smode=='sagg':
dt=[sparts[0]]+[summary,hours]
hrs+=hours
pt.add_row(dt)
if smode!='sagg':
pt.add_row(['--' for i in range(maxparts+mpadd)])
pt.add_row(['TOT']+['--' for i in range(maxparts+mpadd-2)]+["%4.2f"%hrs])
print(pt)
def get_parent_descriptions(tid):
#print 'getting parent descriptions of %s'%tid
#obtain parent descriptions
parents = tid.split('/')
opar=[]
for i in range(len(parents)-1): opar.append('/'.join(parents[:i+1]))
parents = [(pid,get_task(pid)['summary']) for pid in opar]
return parents
def read_current_metastates_worker(items,metainfo=False):
rt={} ;
for i in items:
if i.get('content'):
content={
#'value':org_render(i['content']),
'raw':i['content'],
'updated':i['created_at'],
'updated by':i['creator']}
for attr,attrv in list(i['attrs'].items()):
if metainfo:
rt[attr]={'value':attrv,
'updated':i['created_at'],
'updated by':i['creator']}
else:
rt[attr]=attrv
return rt
def read_current_metastates(t,metainfo=False):
content=None
items = t['journal']
return read_current_metastates_worker(items,metainfo),content
def read_journal(t,date_limit=None,state_limit=None):
assert not date_limit,NotImplementedError('date_limit')
assert not state_limit,NotImplementedError('state_limit')
try:
rt = (t['journal'])
except:
raise Exception(t)
# print t
raise
return rt
def get_all_journals(day=None):
return get_journals(day)
def render_journal_content(user,content,metastates):
now = datetime.datetime.now()
cnt = """\n** <%s> :%s:\n"""%(now.strftime(date_formats[2]),user)
if len(metastates):
cnt+="*** Attributes\n"
for ms,msv in list(metastates.items()):
cnt+="- %s :: %s\n"%(ms,msv)
if len(content):
cnt+="*** Content\n"
cnt+=content.replace('\r','')+'\n'
return cnt
def append_journal_entry(P,C,task,adm,content,metastates={},created_at=None):
try:
assert len(metastates) or len(content)
except TypeError as e:
print(('metastates',metastates,'content',content))
raise
for k,v in list(metastates.items()):
assert k in cfg.METASTATES_FLAT,"%s not in metastates"%k
if type(cfg.METASTATES_FLAT[k])==tuple:
assert v in cfg.METASTATES_FLAT[k],"%s not in %s"%(v,cfg.METASTATES_FLAT[k])
else:
inptp,inpstp = cfg.METASTATES_FLAT[k].split('(')
inpstp = inpstp.split(')')[0]
if inptp=='INPUT':
if inpstp=='number': assert re.compile('^([0-9\.]+)$').search(v)
elif inpstp.lower()=='date':
assert re.compile('^([0-9]{4})-([0-9]{2})-([0-9]{2})$').search(v)
else: raise Exception('unknown inpstp %s'%inpstp)
elif inptp=='ID': #allow unique identifiers in journal entries
pass
else:
raise Exception('unknown inptp %s'%inptp)
tid = task._id
item = {'content':content,'created_at':created_at and created_at or datetime.datetime.now(),'attrs':metastates,'creator':adm}
task.journal.append(item)
task.save(P,C,user=adm)
def metastates_agg(C,nosuper=True,tags_limit=[]):
unqkeys={}
unqtypes={}
unqtypes_nosuper={}
qry = "select * from journal_digest_attrs"
args = []
if tags_limit:
qry+=" where tags && ARRAY[%s]"
args.append(tuple(tags_limit))
C.execute(qry,args)
ts = C.fetchall()
cnt=0 ; col=[]
for t in ts:
tags = set(t['tags'])
if len(tags_limit):
inter = tags.intersection(set(tags_limit))
if len(inter)<len(tags_limit): continue
try:
kv = '='.join([str(t['attr_key']).replace(' ','-'),
t['attr_value'].replace(' ','-')])
except TypeError:
print(t)
raise
if t['id'] not in unqkeys: unqkeys[t['id']]=[]
if kv not in unqkeys[t['id']]:
unqkeys[t['id']].append(kv)
if kv=='status=TODO':
cnt+=1
col.append(t['id'])
#print 'attr',kv.replace('=',' '),t['id']
#raise Exception(len([t for t in ts if get_task(t['id']).status=='TODO']))
import itertools
for tid,kvs in list(unqkeys.items()):
kvss = ','.join(sorted(kvs))
if kvss not in unqtypes: unqtypes[kvss]=[]
if tid not in unqtypes[kvss]:
unqtypes[kvss].append(tid)
else:
raise Exception('%s already in %s'%(tid,kvss))
if nosuper:
for kvs,tids in list(unqtypes.items()):
unqtypes_nosuper[kvs]=metastates_nosuper_qry(kvs,tids,unqkeys,unqtypes)
print('%s (%s) => (%s)'%(kvs,len(unqtypes[kvs]),len(unqtypes_nosuper[kvs])))
#raise Exception(len(unqtypes['status=TODO']),len(unqtypes_nosuper.get('status=TODO',[])))
return unqkeys,unqtypes,unqtypes_nosuper
def metastates_nosuper_qry(arg,its,unqkeys,unqtypes):
#print 'starting off with ',len(its)
nosuper_rt=its
for unqt,tids in list(unqtypes.items()):
if unqt==arg: continue
nosuper_rt=set(nosuper_rt)-set(tids)
#print 'after removal of',unqt,'we are left with',len(nosuper_rt)
return nosuper_rt
def metastates_qry(C,arg,nosuper=True,tags_limit=[]):
sets={}
try:
conds = dict([k.split('=') for k in arg.replace('-',' ').split(',')])
except ValueError:
print(arg,tags_limit)
raise
for cnd,cndv in list(conds.items()):
qry = "select * from journal_digest_attrs where attr_key=%s and attr_value=%s"
args=[cnd,cndv]
C.execute(qry,args)
ts = C.fetchall()
cndp = cnd.replace(' ','-')+' '+cndv.replace(' ','-')
tids=[]
for t in ts:
tags = set(t['tags'])
if len(tags_limit):
inter = tags.intersection(set(tags_limit))
if len(inter)<len(tags_limit): continue
tids.append(t['id'])
tids = set(tids)
sets[cndp]=tids
print(cndp,len(tids))
setvs = list(sets.values())
its = setvs[0].intersection(*setvs)
#print len(its),'intersection items.'
if nosuper:
unqkeys,unqtypes,_ = metastates_agg(C,nosuper=False)
nosuper_rt = metastates_nosuper_qry(arg,its,unqkeys,unqtypes)
else:
nosuper_rt = ()
return sets,its,nosuper_rt
|
SandStormHoldings/ScratchDocs
|
docs.py
|
Python
|
mit
| 67,531
|
[
"VisIt"
] |
e7ce62ed96bb934d98772a535b451839a87e24eb11bc26f58d30b8039d9bade2
|
#!/usr/bin/env python
import cStringIO
from itertools import ifilter
from functools import partial
import logging
import operator as op
import os
import sys
from lib.typecheck import *
import lib.const as C
import lib.visit as v
from .. import add_artifacts
from .. import util
from ..anno import parse_anno
from . import fields_reset, methods_reset, classes_reset, fields, methods, classes, class_lookup
import statement as st
from field import Field
from method import Method
from clazz import Clazz, parse_class, merge_layer, find_base
class Template(v.BaseNode):
def __init__(self, ast):
# reset ids and lists of meta-classes: Field, Method, and Clazz
fields_reset()
methods_reset()
classes_reset()
self._frozen = False
# class declarations in this template
self._classes = [] # [ Clazz... ]
# event involved in this template
self._events = {} # [ event_kind0 : 0, ... ]
self._evt_annotated = False
# aux types for observer patterns
self._obs_auxs = {} # { Aux...1 : [ C1, D1 ], ... }
# aux types for accessor patterns
self._acc_auxs = [] # [ Aux...1, ... ]
# aux types for singleton pattern
self._sng_auxs = [] # [ Aux...1, ... ]
# primitive classes
cls_obj = Clazz(pkg=u"java.lang", name=C.J.OBJ)
cls_obj.sup = None # Object is the root
self._classes.append(cls_obj)
find_obs = lambda anno: anno.by_name(C.A.OBS)
annos = []
pkg = None
mods = []
events = []
for _ast in ast.getChildren():
tag = _ast.getText()
if tag in [C.T.CLS, C.T.ITF, C.T.ENUM]:
clazz = parse_class(_ast)
clazz.annos = annos
if pkg:
for cls in util.flatten_classes([clazz], "inners"): cls.pkg = pkg
clazz.mods = mods
self._classes.append(clazz)
# collect event kinds
for cls in util.flatten_classes([clazz], "inners"):
# 1) class itself is sort of event
if cls.is_event:
events.append(repr(cls))
# 2) might be annotated with explicit event sorts
elif util.exists(find_obs, annos):
_anno = util.find(find_obs, annos)
events.extend(_anno.events)
self._evt_annotated = True
annos = []
pkg = None
mods = []
elif tag == C.T.ANNO:
annos.append(parse_anno(_ast))
elif tag == C.T.PKG:
p_node = util.mk_v_node_w_children(_ast.getChildren())
pkg = util.implode_id(p_node)
else: # modifiers
mods.append(tag)
## parsing done
## post manipulations go below
logging.debug("# class(es): {}".format(len(classes())))
logging.debug("# method(s): {}".format(len(methods())))
logging.debug("# field(s): {}".format(len(fields())))
self.consist()
# remove duplicates in events
events = util.rm_dup(events)
if events:
logging.debug("event(s) in the template: {}: {}".format(len(events), events))
# numbering the event kinds
for i, event in enumerate(events):
self._events[event] = i
# if there exists java.util.EventObject (i.e., cmd == "gui")
# no additional class is required to represent events
evt_obj = class_lookup(C.GUI.EVT)
if evt_obj:
# artificial field to record subtype events' kinds
fld = Field(clazz=evt_obj, mods=[C.mod.PB], typ=C.J.i, name=u"kind")
evt_obj.flds.append(fld)
else:
# if there exists android.os.Message (i.e., cmd == "android")
# no additional class is required, too
msg = class_lookup(C.ADR.MSG)
if msg: pass
# o.w. introduce artificial class Event that implodes all event kinds
# class Event { int kind; E_kind$n$ evt$n$; }
elif events:
cls_e = merge_layer(u"Event", map(class_lookup, events))
cls_e.add_default_init()
self._classes.append(cls_e)
add_artifacts([u"Event"])
# keep snapshots of instances of meta-classes
def freeze(self):
self._flds = fields()
self._mtds = methods()
self._clss = classes()
# restore snapshots of instances of meta-classes
def unfreeze(self):
fields_reset(self._flds)
methods_reset(self._mtds)
classes_reset(self._clss)
@property
def classes(self):
return self._classes
@classes.setter
def classes(self, v):
self._classes = v
def add_classes(self, v):
self._classes.extend(v)
@property
def events(self):
return self._events
@events.setter
def events(self, v):
self._events = v
@property
def is_event_annotated(self):
return self._evt_annotated
# retrieve event's kind index
def get_event_id(self, cname):
# if name appears explicitly, access to its kind index directly
if cname in self._events: return self._events[cname]
# o.w. consider subtype (e.g., implementing an interface)
cls = class_lookup(cname)
c_evts = util.ffilter(map(class_lookup, self._events))
try:
c_evt = util.find(lambda c_evt: cls <= c_evt, c_evts)
return self._events[c_evt.name]
except Exception: return None
# check whether the given type is event sort
def is_event(self, cname):
return self.get_event_id(cname) != None
@property
def obs_auxs(self):
return self._obs_auxs
@obs_auxs.setter
def obs_auxs(self, v):
self._obs_auxs = v
@property
def acc_auxs(self):
return self._acc_auxs
@acc_auxs.setter
def acc_auxs(self, v):
self._acc_auxs = v
@property
def sng_auxs(self):
return self._sng_auxs
@sng_auxs.setter
def sng_auxs(self, v):
self._sng_auxs = v
def __str__(self):
return '\n'.join(map(str, self._classes))
def accept(self, visitor):
visitor.visit(self)
clss = util.flatten_classes(self._classes, "inners")
map(op.methodcaller("accept", visitor), clss)
def jsonify(self):
return [ cls.jsonify() for cls in self._classes ]
# to make the template type-consistent
# collect all the types in the template
# build class hierarchy
# discard interfaces without implementers
# discard methods that refer to undefined types
def consist(self):
clss = util.flatten_classes(self._classes, "inners")
# collect *all* types in the template
# including inners as well as what appear at field/method declarations
# (since we don't care about accessability, just flatten inner classes)
# for easier(?) membership test
# { cls!r: Clazz(cname, ...), ... }
decls = { repr(cls): cls for cls in clss }
def is_defined(tname):
_tname = util.sanitize_ty(tname)
for cls_r in decls.keys():
if decls[cls_r].is_inner:
if _tname == cls_r: return True
if _tname in cls_r.split('_'): return True
else:
if tname == cls_r: return True
return False
def add_decl(tname):
if is_defined(tname): return
logging.debug("adding virtual declaration {}".format(tname))
cls = Clazz(name=tname)
# to avoid weird subtyping, e.g., int < Object
if tname in C.primitives: cls.sup = None
decls[repr(cls)] = cls
# add declarations in nested generics or arrays
if util.is_collection(tname):
map(add_decl, util.of_collection(tname)[1:])
elif util.is_array(tname):
add_decl(util.componentType(tname))
# finding types that occur at field/method declarations
for cls in clss:
for fld in cls.flds:
if not is_defined(fld.typ): add_decl(fld.typ)
for mtd in cls.mtds:
for (ty, nm) in mtd.params:
if not is_defined(ty): add_decl(ty)
# build class hierarchy: fill Clazz.subs
for cls in clss:
if not cls.sup and not cls.itfs: continue
sups = map(util.sanitize_ty, cls.itfs)
if cls.sup: sups.append(util.sanitize_ty(cls.sup))
if not sups: continue
for sup in clss:
if sup == cls: continue
if sup.name in sups or repr(sup) in sups:
if cls not in sup.subs: sup.subs.append(cls)
## discard interfaces that have no implementers, without constants
#for itf in ifilter(op.attrgetter("is_itf"), clss):
# if not itf.subs and not itf.flds:
# logging.debug("discarding {} with no implementers".format(repr(itf)))
# if itf in self._classes:
# self._classes.remove(itf)
# elif itf.outer: # inner interface
# itf.outer.inners.remove(itf)
# del decls[repr(itf)]
## some interfaces might have been discarded, hence retrieve classes again
#clss = util.flatten_classes(self._classes, "inners")
## discard methods that refer to undefined types
#for cls in clss:
# for mtd in cls.mtds[:]: # to remove items, should use a shallow copy
# def undefined_type( (ty, _) ): return not is_defined(ty)
# if util.exists(undefined_type, mtd.params):
# ty, _ = util.find(undefined_type, mtd.params)
# logging.debug("discarding {} due to lack of {}".format(mtd.name, ty))
# cls.mtds.remove(mtd)
# invoke Clazz.mtds_w_anno for all classes
@takes("Template", callable)
@returns(list_of(Method))
def mtds_w_anno(self, cmp_anno):
mtdss = map(lambda cls: cls.mtds_w_anno(cmp_anno), self._classes)
return util.ffilter(util.flatten(mtdss))
# invoke Clazz.mtds_w_mod for all classes
@takes("Template", unicode)
@returns(list_of(Method))
def mtds_w_mod(self, mod):
mtdss = map(lambda cls: cls.mtds_w_mod(mod), self._classes)
return util.ffilter(util.flatten(mtdss))
# find methods with @Harness
# if called with a specific name, will returns the exact method
@takes("Template", optional(str))
@returns( (list_of(Method), Method) )
def harness(self, name=None):
if name:
h_finder = lambda anno: anno.by_attr({"name": C.A.HARNESS, "f": name})
mtds = self.mtds_w_anno(h_finder)
if mtds and len(mtds) == 1: return mtds[0]
_mtds = self.mtds_w_mod(C.mod.HN)
mtds = filter(lambda mtd: mtd.name == name, _mtds)
if mtds and len(mtds) == 1: return mtds[0]
raise Exception("can't find @Harness or harness", name)
else:
h_finder = lambda anno: anno.by_name(C.A.HARNESS)
mtds = self.mtds_w_anno(h_finder) + self.mtds_w_mod(C.mod.HN)
return util.rm_dup(mtds)
# find main()
@property
def main(self):
mtds = []
# assume *main* is not defined in inner classes
for cls in self._classes:
for mtd in cls.mtds:
if C.mod.ST in mtd.mods and mtd.name == C.J.MAIN:
mtds.append(mtd)
n = len(mtds)
if n > 1:
raise Exception("multiple main()s", mtds)
elif 1 == n: return mtds[0]
else: return None
# find the class to which main() belongs
@property
def main_cls(self):
main = self.main
harness = self.harness()
if main: return main.clazz
# assume @Harness methods are defined at the same class
elif harness: return harness[0].clazz
else: raise Exception("None of main() and @Harness is found")
# add main() that invokes @Harness methods
@takes("Template")
@returns(nothing)
def add_main(self):
main_cls = self.main_cls
if any(main_cls.mtd_by_name(u"main")): return
params = [ (u"String[]", u"args") ]
main = Method(clazz=main_cls, mods=C.PBST, name=u"main", params=params)
def to_call(mtd): return mtd.name + "();"
body = '\n'.join(map(to_call, self.harness()))
main.body = st.to_statements(main, body)
main_cls.mtds.append(main)
# find class of certain kind, e.g., Activity
@takes("Template", unicode)
@returns(list_of(Clazz))
def find_cls_kind(self, kind):
cls_kind = class_lookup(kind)
if cls_kind: pred = lambda cls: cls < cls_kind
else: pred = lambda cls: kind in cls.name
return filter(pred, self._classes)
"""
To import lib.*, run as follows:
pasket $ python -m pasket.meta.template
"""
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: python -m pasket.meta.template (template.java | template_folder)+ [opt]"
parser = OptionParser(usage=usage)
parser.add_option("--json",
action="store_true", dest="json", default=False,
help="print AST in a json format")
parser.add_option("--hierarchy",
action="store_true", dest="hierarchy", default=False,
help="print inheritance hierarchy")
parser.add_option("--method",
action="store_true", dest="method", default=False,
help="print declared methods in the template")
parser.add_option("-e", "--event",
action="store_true", dest="event", default=False,
help="print event sorts in the template(s)")
(opt, argv) = parser.parse_args()
if len(argv) < 1:
parser.error("incorrect number of arguments")
pwd = os.path.dirname(__file__)
src_dir = os.path.join(pwd, "..")
root_dir = os.path.join(src_dir, "..")
sys.path.append(root_dir)
## logging configuration
logging.config.fileConfig(os.path.join(src_dir, "logging.conf"))
logging.getLogger().setLevel(logging.DEBUG)
tmpl_files = []
for arg in argv:
tmpl_files.extend(util.get_files_from_path(arg, "java"))
ast = util.toAST(tmpl_files)
tmpl = Template(ast)
if opt.json:
import json
print json.dumps(tmpl.jsonify(), indent=2)
if opt.hierarchy:
def toStringTree(cls, depth=0):
buf = cStringIO.StringIO()
buf.write("%*s" % (depth, ""))
if cls.is_class: buf.write("[c] ")
elif cls.is_itf: buf.write("[i] ")
elif cls.is_enum: buf.write("[e] ")
buf.write(repr(cls))
if cls.itfs: buf.write(" : " + ", ".join(map(str, cls.itfs)))
buf.write('\n')
for sub in cls.subs:
buf.write(toStringTree(sub, depth+4))
for inner in cls.inners:
buf.write(toStringTree(inner, depth+2))
return buf.getvalue()
tmpl.consist()
bases = util.rm_dup(map(find_base, classes()))
for cls in bases:
print toStringTree(cls, 0)
if opt.method:
for mtd in methods():
print mtd.signature
if opt.event:
for evt in tmpl.events:
print evt
if not sum([opt.json, opt.hierarchy, opt.method, opt.event]):
print str(tmpl)
|
plum-umd/pasket
|
pasket/meta/template.py
|
Python
|
mit
| 14,075
|
[
"VisIt"
] |
a5391e4e49d9ee4da47e96214bbda90046b3a9c9df2123f18dbc5211e88d8cf3
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
import math
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# for fft, the best implementation is given for N = 2^p
size = 16
# collection for test
collection = NumericalComplexCollection(size)
# Fill the data with artificial values
# Create a complex gaussian sample
for index in range(size):
realPart = 0.1 * (index + 1.0) / size
imagPart = 0.3 * (index + 1.0) / size
collection[index] = realPart + 1j * imagPart
# Instanciation of FFT class
myFFT = KissFFT()
print("myFFT = ", myFFT)
# Initial transformation
print("collection = ", collection)
# FFT transform
transformedCollection = NumericalComplexCollection(
myFFT.transform(collection))
print("FFT result = ", transformedCollection)
# Inverse transformation
inverseTransformedCollection = NumericalComplexCollection(
myFFT.inverseTransform(transformedCollection))
print("FFT back=", inverseTransformedCollection)
# 2D case now
N = 8
distribution = Normal(N)
sample = distribution.getSample(2 * N)
# cleaning parameter
threshold = 1e-14
# FFT transform
transformedSample = myFFT.transform2D(sample)
print("2D FFT result = ", repr(transformedSample.clean(threshold)))
# Inverse transformation
inverseTransformedSample = myFFT.inverseTransform2D(transformedSample)
print("2D FFT back=", repr(inverseTransformedSample.clean(threshold)))
# 3D case
elements = [RandomGenerator.Generate() for i in range(N * N * N)]
tensor = ComplexTensor(N, N, N, elements)
# FFT transform
transformedTensor = myFFT.transform3D(tensor)
print("3D FFT result = ", repr(transformedTensor.clean(threshold)))
# Inverse transformation
inverseTransformedTensor = myFFT.inverseTransform3D(transformedTensor)
print("3D FFT back=", repr(inverseTransformedTensor.clean(threshold)))
except:
import sys
print("t_KissFFT_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
aurelieladier/openturns
|
python/test/t_KissFFT_std.py
|
Python
|
lgpl-3.0
| 2,094
|
[
"Gaussian"
] |
86212575f9e0f189801742005af96fa310845601b883b1e553130bcb0b4f68ae
|
'''
Usage:
pipeline.py <r1> <r2> --config <config> [-o <outdir>] [--log <log>]
Options:
-c <config>, --config <config>
--log <log>
'''
import sh
from itertools import imap, izip, tee, chain, groupby, ifilter, starmap
from toolz.dicttoolz import keymap,valfilter,keyfilter,merge
from toolz.itertoolz import mapcat
from docopt import docopt
from path import Path
import types
import yaml
import sys
from functools import partial
import shutil
import os
from Bio import SeqIO
from operator import itemgetter as get
from collections import Counter
import csv
from ete2 import NCBITaxa
import plumbum
#import numpy as np
#import matplotlib.pyplot as plt
# TODO: Log commands as doing them
# TODO: BLAST Contig results with mapped reads to get abundance:
# - Does abyss record number of reads into contig? # no, it's just length and "kmer coverage"
# - could duplicate contig blast entry for each read that maps to it and pass to krona
def lzw(sequence):
# https://github.com/betegonm/gen/blob/64aef21cfeefbf27b1e2bd6587c555d4df4f6913/gen.py#L294
output = []
table = dict(dict((chr(i), i) for i in range(256)))
s = ''
for ch in sequence:
it = s + ch
if it in table:
s = it
else:
output.append(table[s])
table[it] = len(table)
s = ch
output.append(table[s])
return len(output)
#############
# Utilities #
#############
class Config:
def __init__(self, entries):
self.__dict__.update(entries)
for k,v in self.__dict__.items():
if type(v) == types.DictType:
setattr(self, k, Config(v))
class Sh_(object):
def __getattr__(self, attr):
#cmd = getattr(sh, attr)
def command(*args, **kwargs):
#fixedargs = keymap("-{}".format, kwargs)
bools = valfilter(lambda x: type(x) is bool, kwargs)
vargs = keymap("-{}".format, keyfilter(lambda x: x not in ['_err', '_out'], valfilter(lambda x: not type(x) is bool, kwargs)))
#bools.update(vargs)
fixedargs = chain(vargs.items())
getattr(sh, attr)(*(list(args) + list(fixedargs)), **bools)
return command
sh_ = Sh_()
############
# Parts #
############
def star(log, cfg, in1, in2):
sh.STAR('--readFilesIn', in1, in2, #readFilesIn=unlist(in1, in2),
genomeDir=cfg.star.starDB,
outSAMtype="SAM",
outReadsUnmapped="Fastx",
_out=log, _err=log)
def pricefilter(log, cfg, in1, in2, o1, o2):
cfg = cfg.pricefilter
sh_.PriceSeqFilter('-fp', in1, in2,
'-op', o1, o2,
'-rqf', cfg.highQualPercent, cfg.highQualMin,
rnf=cfg.calledPercent)
def cdhitdup(log, cfg, r1, r2, o1, o2):
sh_.cd_hit_dup(i=r1, i2=r2, o=o1, o2=o2, e=cfg.cdhitdup.minDifference, _err=log, _out=log)
# LZW!
def bowtie_sensitive(log, cfg, r1, r2, o1):
args = {'1' : r1, '2' : r2,
'very_sensitive_local' : True,
'un_conc' : Path(o1).splitext()[0],
'x' : cfg.bowtie2.bowtieDB,
'_err' : log, '_out' : log}
sh.bowtie2(**args)
def rapsearch(log, cfg, fq, out):
out = out.splitext()[0] # rapsearch adds an m8 extension
sh.rapsearch(o=out, d=cfg.rapsearch.rapsearchDB, q=fq, _err=log, _out=log)
def blastn(log, cfg, fq, out):
print "attempting blast with %s %s" % (fq, out)
#sh_.blastn(outfmt=6, db=cfg.ncbi.ntDB, query=fq, _err=log, _out=out)
sh.blastn('-max_target_seqs', '1', outfmt=6, db=cfg.ncbi.ntDB, query=fq, _err=log, _out=out, _long_prefix='-')
def krona(log, cfg, blast, out):
sh.ktImportBLAST(blast, o=out, _err=log, _out=log) # probably need config for kronadb!
def blastx(log, cfg, fq, out):
sh.blastx('-max_target_seqs', '1', outfmt=6, db=cfg.ncbi.nrDB, query=fq, _err=log, _out=out, _long_prefix='-')
def abyss(log, cfg, r1, r2, out):
dir = out.dirname()
f1 = dir.relpathto(r1)
f2 = dir.relpathto(r2)
prefix=out.basename().split('-')[0]
print f1, f2, 'name=%s' % prefix, 'k=%s' % 25
sh.run_abyss(f1, f2, 'name=%s' % prefix, 'k=%s' % 25, C=dir, _err=log, _out=log)
#def lzw_filter(log, cfg, r1, r2, out1, out2):
def lzw_filter_single(min_complexity, x):
un_comp_len = len(str(x.seq))
comp_len = sum(imap(len, sh.gzip(f=True, _in=str(x.seq))))
complexity = comp_len / float(un_comp_len)
return complexity >= min_complexity
def unzip(seq):
t1, t2 = tee(seq)
return imap(get(0), t1), imap(get(1), t2)
def filter_pair(func, r1, r2, o1, o2, format):
fwd = SeqIO.parse(r1, format)
rev = SeqIO.parse(r2, format)
filtered = ((x, y) for (x, y) in izip(fwd, rev)
if func(x) and func(y))
res1, res2 = unzip(filtered)
with open(o1, 'w') as f1:
with open(o2, 'w') as f2:
SeqIO.write(res1, f1, format)
SeqIO.write(res2, f2, format)
def lzw_filter_fastq(log, cfg, r1, r2, out1, out2):
lzw_func = partial(lzw_filter_single, cfg.lzwfilter.maxCompressionScore)
filter_pair(lzw_func, r1, r2, out1, out2, 'fastq')
def sum_sam_by_ref(log, cfg, sam):
res = sh.samtools.view(sam, F=260)
refs = imap(lambda x: x.split('\t')[2], res)
return Counter(refs)
def dup_blast(log, sam, blst, out):
counter = sum_sam_by_ref(None, None, sam)
log.write("Skipped Contigs:\n======\n")
with open(out, 'w') as f:
with open(blst, 'r') as blast:
for k,v in groupby(blast, lambda x: x.split('\t')[0]):
if k not in counter:
contig_length = next(v).split('\t')[3]
log.write("Contig %s of length %s had no mapped reads.\n" % (k, contig_length))
else:
f.writelines(list(v) * counter[k])
log.write("======\n")
# # because SeqIO is slow for writing single records
# for (x, y) in izip(fwd, rev):
# if func(x) and func(y):
# '@
# f1.write(x.format(form))
# f2.write(y.format(form))
#comp_len = sh.wc(sh.gzip(f=True, _in=str(x.seq)), c=True)
#sh.run_abyss(r1, r2, 'name=%s' % prefix, 'k=%s' % 25, _err=log, _out=log)
# args = ["in=%s %s" % (r1, r2), 'name=%s' % prefix, 'k=%s' % 25]
# subprocess.call('abyss-pe ' + ' '.join(args))
#subprocess.call('abyss-pe', ' '.join(args))
# print ['abyss-pe'] + args
#subprocess.call(['abyss-pe'] + args) #, stdout=log, stderr=log, shell=True)
# ex = "abyss-pe in=%s %s" % (r1, r2) + ' name=%s ' % prefix + ' k=%s' % 25
# subprocess.call(ex, stdout=log, stderr=log, shell=True)
# sh.abyss_pe('in=\'%s %s\'' % (r1, r2), 'name=%s' % prefix, 'k=%s' % 25, # '-n' dryrun
# _err=log, _out=log)
# sh.abyss_pe("in='%s %s'" % (r1, r2), name=prefix, k=25, # '-n' dryrun
# _err=log, _out=log, _long_prefix='', _short_prefix='')
# abyss-pe k=25 name=test in='test-data/reads1.fastq test-data/reads2.fastq'
# taxid = 1056490
def blastdbcmd(**opts):
cmd_opts = keymap('-{}'.format, opts).items()
process = plumbum.local['blastdbcmd'][cmd_opts]
print process
for line in process.popen().iter_lines(retcode=None):
yield line[0]
def get_taxid(db, seqids): # (Path, str) -> dict[str,str]
#res = sh.blastdbcmd(db=db, outfmt="'%g %T'", entry="'%s'" % seqid, _long_prefix='-')
max_ids = 1000/80
if len(seqids) > max_ids:
xs = [seqids[i*max_ids:(i+1)*max_ids] \
for i in range(len(seqids) / max_ids)]
xs.extend(seqids[sum(map(len, xs))-1:])
else: xs = [seqids]
# xs = chain(*xs)
# print map(len, xs)
print seqids
print xs
res = mapcat(lambda x: blastdbcmd(db=db, outfmt="'%g %T'", entry="'%s'" % ','.join(x)), xs)
#res = blastdbcmd(db=db, outfmt="'%g %T'", entry="'%s'" % ','.join(xs))
#print res
#print res
res = ifilter(bool, res)
res = imap(lambda s: s.strip("'"), res)
return dict(imap(unicode.split, res))
def taxonomy(ncbi, taxid):
lineage = ncbi.get_lineage(taxid)
ranks = ncbi.get_rank(lineage)
names = ncbi.get_taxid_translator(lineage)
def make_d(lineage, ranks, names):
for lin in lineage:
if ranks[lin] == 'no rank':
continue
yield (ranks[lin], names[lin])
return dict(make_d(lineage, ranks, names))
def dictmap(f, d): return starmap(f, d.items())
def blast2summary_dict(db, blastpath): # (Path, Path) -> list[dict]
"""Reading in a blast output file, lookup all seqids to get taxids with a single blastdbcmd.
Then, lookup the taxonomy using ETE2 via the taxid, and add that info to the blast info."""
rows = csv.DictReader(open(blastpath), delimiter='\t',fieldnames=['qseqid', 'sseqid','pid', 'alnlen','gapopen','qstart','qend','sstart','send','evalue','bitscore'])
rows = list(rows)
seqids = map(get('sseqid'), rows)
taxids = get_taxid(db, seqids)
gis = (s.split('|')[1] for s in seqids)
matches = dict((taxids[gi], row) for gi, row in zip(gis,rows) if gi in taxids)
ncbi = NCBITaxa() # downloads database and creates SQLite database if needed
return dictmap(lambda tid,row: merge(row, taxonomy(ncbi, tid)), matches)
def blast2summary(db, blastpath, outpath): # (Path,Path,Path) -> None
with_taxonomies = list(blast2summary_dict(db, blastpath))
head = with_taxonomies[0]
print with_taxonomies
with open(outpath, 'w') as out:
writer = csv.DictWriter(out, head.keys(), delimiter='\t')
#writer = csv.DictWriter(out, fieldnames=head.keys(), delimiter='\t')
writer.writeheader()
for row in with_taxonomies:
writer.writerow(row)
############
# Pipeline #
############
def run(cfg, input1, input2, log=None):
p = partial(os.path.join, cfg.outdir)
_star1 = p("Unmapped.out.mate1")
_star2 = p("Unmapped.out.mate2")
star1 = p("star.r1.fq")
star2 = p("star.r2.fq")
psf1 = p( "psf.r1.fq" )
psf2 = p( "psf.r2.fq" )
cd1 = p( "cd.r1.fq" )
cd2 = p( "cd.r2.fq" )
lzw1 = "lzw.r1"
lzw2 = "lzw.r2"
_bowtie1 = p( "bowtie.1.r1" )
_bowtie2 = p( "bowtie.2.r1" )
contigs = p("abyss-contigs.fa")
contigs_sam = 'contigs.sam'
contig_nr = p('contigs.nr.blast')
contig_nt = p('contigs.nt.blast')
dup_nt = p('contigs.nt.blast.dup')
dup_nr = p('conrigs.nr.blast.dup')
contig_kronaNT = p('contigs.nt.html')
contig_kronaNR = p('contigs.nr.html')
contig_nt_tsv = p("contigs.nt.tsv")
conrig_nr_tsv = p("conrigs.nr.tsv")
nt_tsv = p('nt.tsv')
nr_tsv = p('nr.tsv')
# bowtie1 = p( "bowtie.r1.fa" )
# bowtie2 = p( "bowtie.r2.fa" )
# nr1 = p( "rapsearch.r1.blast.m8" ) # rapsearch automatically adds .m8 extension
# nr2 = p( "rapsearch.r2.blast.m8" ) # rapsearch automatically adds .m8 extension
#
# nt1 = p( "r1.blast" )
# nt2 = p( "r2.blast" )
# kronaNT1 = p( "r1.NT.html" )
# kronaNT2 = p( "r2.NT.html" )
# kronaNR1 = p( "r1.NR.html" )
# kronaNR2 = p( "r2.NR.html" )
if not log:
log = sys.stdout
need = lambda p: not os.path.exists(p)
if need(_star1):
star(log, cfg, input1, input2)
if need(star1):
shutil.copy(_star1, star1)
shutil.copy(_star2, star2)
if need(psf1):
pricefilter(log, cfg, star1, star2, psf1, psf2)
if need(cd1):
cdhitdup(log, cfg, psf1, psf2, cd1, cd2)
if need(lzw1):
lzw_filter_fastq(log, cfg, cd1, cd2, lzw1, lzw2)
if need(_bowtie1):
bowtie_sensitive(log, cfg, lzw1, lzw2, _bowtie1)
if need(contigs):
abyss(log, cfg, _bowtie1, _bowtie2, contigs)
contigs_index = 'contigs-b2'
sh.bowtie2_build(contigs, contigs_index)
sh.bowtie2(**{'1' : _bowtie1, '2' : _bowtie2, 'x' : contigs_index,
'_err' : log, '_out' : contigs_sam})
if need(contig_nt):
blastn(log, cfg, contigs, contig_nt)
# TODO: fix below
dup_blast(log, contigs_sam, contig_nt, dup_nt)
if need(contig_nr):
blastx(log, cfg, contigs, contig_nr)
dup_blast(log, contigs_sam, contig_nr, dup_nr)
if need(contig_kronaNT):
krona(log, cfg, contig_nt, contig_kronaNT)
if need(contig_kronaNR):
krona(log, cfg, contig_nr, contig_kronaNR)
if need(contig_nt_tsv):
blast2summary(cfg.ncbi.ntDB, contig_nt, contig_nt_tsv)
# if need(bowtie1):
# SeqIO.convert(_bowtie1, 'fastq', bowtie1, 'fasta')
# SeqIO.convert(_bowtie2, 'fastq', bowtie2, 'fasta')
#
# if need(nt1):
# blastn(log, cfg, bowtie1, nt1)
# blastn(log, cfg, bowtie2, nt2)
#
# if need(nr1):
# #rapsearch(log, cfg, bowtie1, nr1)
# #rapsearch(log, cfg, bowtie2, nr2)
# blastx(log, cfg, bowtie1, nr1)
# blastx(log, cfg, bowtie2, nr2)
#
# if need(kronaNT1):
# krona(log, cfg, nt1, kronaNT1)
# krona(log, cfg, nt2, kronaNT2)
# krona(log, cfg, nr1, kronaNR1)
# krona(log, cfg, nr2, kronaNR2)
def main():
args = docopt(__doc__, version='Version 1.0')
cfg = args['--config']
cfg = yaml.load(open(cfg))
cfg = Config(cfg)
cfg.outdir = args['-o'] or "."
if args['--log']:
_log = Path(args['--log'])
if _log.exists():
print "Removing old log file %s" % _log
_log.remove()
log = open(args['--log'], 'a')
else:
log = sys.stdout
run(cfg, args['<r1>'], args['<r2>'], log)
# try:
# run(cfg, args['<r1>'], args['<r2>'], log)
# except Exception as e:
# log.write(str(e))
if args['--log']: log.close()
sys.exit(0)
if __name__ == '__main__': main()
|
averagehat/Pathogen.hs
|
pipeline.py
|
Python
|
gpl-2.0
| 13,378
|
[
"BLAST",
"Bowtie"
] |
c46a01fb1afa51dcc813710e769a391b62c42573ef8e800205f11960e79fd75d
|
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import math
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['VFM', 'VFM_HFM', 'HFM', 'HFM_Watchpoint', 'Watchpoint', 'Watchpoint_Mask', 'Mask', 'Watchpoint2']
for el_name in names:
if el_name == 'VFM':
# VFM: ellipsoidMirror 50.0m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_VFM_p,
_q=v.op_VFM_q,
_ang_graz=v.op_VFM_ang,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
elif el_name == 'VFM_HFM':
# VFM_HFM: drift 50.0m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_HFM_L,
))
pp.append(v.op_VFM_HFM_pp)
elif el_name == 'HFM':
# HFM: ellipsoidMirror 50.2m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_HFM_p,
_q=v.op_HFM_q,
_ang_graz=v.op_HFM_ang,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
elif el_name == 'HFM_Watchpoint':
# HFM_Watchpoint: drift 50.2m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_Watchpoint_L,
))
pp.append(v.op_HFM_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 50.4m
pass
elif el_name == 'Watchpoint_Mask':
# Watchpoint_Mask: drift 50.4m
el.append(srwlib.SRWLOptD(
_L=v.op_Watchpoint_Mask_L,
))
pp.append(v.op_Watchpoint_Mask_pp)
elif el_name == 'Mask':
# Mask: mask 50.6m
el.append(srwlib.srwl_opt_setup_mask(
_delta=v.op_Mask_delta,
_atten_len=v.op_Mask_atten_len,
_thick=v.op_Mask_thick,
_grid_sh=v.op_Mask_grid_sh,
_grid_dx=v.op_Mask_grid_dx,
_grid_dy=v.op_Mask_grid_dy,
_pitch_x=v.op_Mask_pitch_x,
_pitch_y=v.op_Mask_pitch_y,
_grid_nx=v.op_Mask_grid_nx,
_grid_ny=v.op_Mask_grid_ny,
_mask_Nx=v.op_Mask_mask_Nx,
_mask_Ny=v.op_Mask_mask_Ny,
_grid_angle=v.op_Mask_gridTiltAngle,
_hx=v.op_Mask_hx,
_hy=v.op_Mask_hy,
_mask_x0=v.op_Mask_mask_x0,
_mask_y0=v.op_Mask_mask_y0,
))
pp.append(v.op_Mask_pp)
elif el_name == 'Watchpoint2':
# Watchpoint2: watch 50.6m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = [
['name', 's', 'Mask example', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
['gbm_x', 'f', 0.0, 'average horizontal coordinates of waist [m]'],
['gbm_y', 'f', 0.0, 'average vertical coordinates of waist [m]'],
['gbm_z', 'f', 0.0, 'average longitudinal coordinate of waist [m]'],
['gbm_xp', 'f', 0.0, 'average horizontal angle at waist [rad]'],
['gbm_yp', 'f', 0.0, 'average verical angle at waist [rad]'],
['gbm_ave', 'f', 9000.0, 'average photon energy [eV]'],
['gbm_pen', 'f', 0.001, 'energy per pulse [J]'],
['gbm_rep', 'f', 1, 'rep. rate [Hz]'],
['gbm_pol', 'f', 1, 'polarization 1- lin. hor., 2- lin. vert., 3- lin. 45 deg., 4- lin.135 deg., 5- circ. right, 6- circ. left'],
['gbm_sx', 'f', 3e-06, 'rms beam size vs horizontal position [m] at waist (for intensity)'],
['gbm_sy', 'f', 3e-06, 'rms beam size vs vertical position [m] at waist (for intensity)'],
['gbm_st', 'f', 1e-13, 'rms pulse duration [s] (for intensity)'],
['gbm_mx', 'f', 0, 'transverse Gauss-Hermite mode order in horizontal direction'],
['gbm_my', 'f', 0, 'transverse Gauss-Hermite mode order in vertical direction'],
['gbm_ca', 's', 'c', 'treat _sigX, _sigY as sizes in [m] in coordinate representation (_presCA="c") or as angular divergences in [rad] in angular representation (_presCA="a")'],
['gbm_ft', 's', 't', 'treat _sigT as pulse duration in [s] in time domain/representation (_presFT="t") or as bandwidth in [eV] in frequency domain/representation (_presFT="f")'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.002, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 2048, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.002, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 2048, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 2, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0.0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0.0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
['wm_fbk', '', '', 'create backup file(s) with propagated multi-e intensity distribution vs horizontal and vertical position and other radiation characteristics', 'store_true'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'g', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# VFM: ellipsoidMirror
['op_VFM_hfn', 's', 'None', 'heightProfileFile'],
['op_VFM_dim', 's', 'x', 'orientation'],
['op_VFM_p', 'f', 50.0, 'firstFocusLength'],
['op_VFM_q', 'f', 0.4, 'focalLength'],
['op_VFM_ang', 'f', 0.003, 'grazingAngle'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.999995500003375, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', -0.002999995500002025, 'tangentialVectorY'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_HFM: drift
['op_VFM_HFM_L', 'f', 0.20000000000000284, 'length'],
# HFM: ellipsoidMirror
['op_HFM_hfn', 's', 'None', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_p', 'f', 50.0, 'firstFocusLength'],
['op_HFM_q', 'f', 0.2, 'focalLength'],
['op_HFM_ang', 'f', 0.003, 'grazingAngle'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_size_tang', 'f', 0.2, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_HFM_nvx', 'f', 0.999995500003375, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.002999995500002025, 'normalVectorZ'],
['op_HFM_tvx', 'f', -0.002999995500002025, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_Watchpoint: drift
['op_HFM_Watchpoint_L', 'f', 0.19999999999999574, 'length'],
# Watchpoint_Mask: drift
['op_Watchpoint_Mask_L', 'f', 0.20000000000000284, 'length'],
# Mask: mask
['op_Mask_delta', 'f', 1.0, 'refractiveIndex'],
['op_Mask_atten_len', 'f', 1.0, 'attenuationLength'],
['op_Mask_thick', 'f', 1.0, 'maskThickness'],
['op_Mask_grid_sh', 'f', 0, 'gridShape'],
['op_Mask_grid_dx', 'f', 5e-06, 'horizontalGridDimension'],
['op_Mask_grid_dy', 'f', 5e-06, 'verticalGridDimension'],
['op_Mask_pitch_x', 'f', 2e-05, 'horizontalGridPitch'],
['op_Mask_pitch_y', 'f', 2e-05, 'verticalGridPitch'],
['op_Mask_gridTiltAngle', 'f', 0.4363323129985824, 'gridTiltAngle'],
['op_Mask_hx', 'f', 7.319999999999999e-07, 'horizontalSamplingInterval'],
['op_Mask_hy', 'f', 7.319999999999999e-07, 'verticalSamplingInterval'],
['op_Mask_mask_x0', 'f', 0.0, 'horizontalMaskCoordinate'],
['op_Mask_mask_y0', 'f', 0.0, 'verticalMaskCoordinate'],
['op_Mask_mask_Nx', 'i', 1024, 'horizontalPixelsNumber'],
['op_Mask_mask_Ny', 'i', 1024, 'verticalPixelsNumber'],
['op_Mask_grid_nx', 'i', 21, 'horizontalGridsNumber'],
['op_Mask_grid_ny', 'i', 21, 'verticalGridsNumber'],
#---Propagation parameters
['op_VFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_Watchpoint'],
['op_Watchpoint_Mask_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Watchpoint_Mask'],
['op_Mask_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Mask'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
def main():
v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options(varParam), use_sys_argv=True)
op = set_optics(v)
v.si = True
v.si_pl = 'xy'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
|
mkeilman/sirepo
|
tests/template/srw_generate_data/mask-example.py
|
Python
|
apache-2.0
| 25,057
|
[
"Gaussian"
] |
661f428ee68da258dcca73107d1edba26d9b1da6297879583b239246b23b66e5
|
#!/usr/bin/env python3
# This file contains examples of usage of scipy.optimize module
from __future__ import division
import numpy as np
from numpy.testing import assert_allclose
from scipy import optimize
# -- root --
# `root` is a function that solves a vector equation f(x) = 0. It providing
# interface to various root-finding methods. All of them perfroms numerical
# iteration, so `root` needs an initial approach to a solution.
# - 1-D example -
def parabola(x):
"""Equation with a solution of +-sqrt(2)"""
return x**2 - 2
result = optimize.root(parabola, 1) # 1 is an initial guess
assert isinstance(result, optimize.OptimizeResult)
assert result.success
assert_allclose(np.sqrt(2), result.x[0]) # .x is an array even in 1-D case
# In some cases the optimization will not be success:
# Using the default method (at least for scipy v. 1.0):
result = optimize.root(parabola, 0, method='hybr')
assert not result.success
# The optimization can be speeded up by providing Jacobian of the function:
def parabola_jac(x):
"""Jacobian of parabola()"""
return 2*x
result_w_jac = optimize.root(parabola, 1e-5, jac=parabola_jac, tol=1e-9)
assert result_w_jac.success
result_wo_jac = optimize.root(parabola, 1e-5, tol=1e-9)
assert result_wo_jac.success
assert_allclose(result_wo_jac.x[0], result_w_jac.x[0])
# Compare number of function calls:
assert result_w_jac.nfev < result_wo_jac.nfev
# But call of Jacobian also takes some time!
assert result_w_jac.nfev + result_w_jac.njev < result_wo_jac.nfev
# - 2-D example -
# Let's find a solution of an equation $x^2 + bx + c = 0$. Of course, the right
# way to do it is using of polynomial root finding algorithm, but think about
# it as an simple example.
# Vieta's formulas give $x_0 + x_1 = -b$ and $x_0 x_1 = c$:
def vieta(x, b, c):
return [x[0] + x[1] + b,
x[0] * x[1] - c]
def vieta_jac(x, b, c):
return [
[1, 1],
[x[1], x[0]]
]
b, c = -1, -1
result = optimize.root(vieta, [0, 1], args=(b, c), jac=vieta_jac)
assert result.success
# `numpy.roots(p)` provides roots of polynomial
# `p[0] * x**(len(p)-1) + p[1] * x**(len(p) - 2) ... + p[-1]`:
roots = np.roots((1, b, c))
assert_allclose(np.sort(roots), np.sort(result.x))
# -- minimize --
# `minimize` provides a common interface to a collection of minimization
# methods. See `lmfit` package that provides prettier interface to augmented
# collection of methods: https://lmfit.github.io/lmfit-py/
# - Multivariate function -
def func(x, a, b):
"""Negative Gaussian function with argument a*x+b and sigma=10"""
y = np.dot(a, x) + b
return -np.exp(-np.sum(np.square(y))/200)
a = np.array([[1, 2, 3], [1, 1, 1], [0, 0, 1]])
b = np.array([3, 2, 1])
result = optimize.minimize(func, np.ones_like(b), args=(a, b))
assert result.success
x = np.linalg.solve(a, -b)
assert_allclose(x, result.x, rtol=1e-3)
# - Various optimization problems -
# A lot of optimization problems can be solved using machine learning methods.
# Our days there are a lot of machine learning libraries and frameworks, e.g.
# see `scikit-learn` and `TensorFlow`:
# http://scikit-learn.org
# https://www.tensorflow.org
# # Here we solve a simple problem of cluster analysis using the most simple
# # approach. Let's consider that we have two comparable sets of objects in
# # N-dimensional space and we search the best separating hyperplane between
# # them.
# from scipy.stats import multivariate_normal as m_normal
# mean1 = np.array([1, 2,3])
# mean2 = np.array([-3,-3,5])
# cov1 = np.diag([1.5, 2.0, 2.5])
# cov2 = np.array([
# [ 1.0, -0.3, 0.3],
# [-0.3, 2.5, 0.3],
# [ 0.3, -0.3, 3.0],
# ])
# size = 100
# random_state = np.random.RandomState(13) # Lucky number
# y1 = m_normal.rvs(mean=mean1, cov=cov1, random_state=random_state)
# y2 = m_normal.rvs(mean=mean2, cov=cov2, random_state=random_state)
|
hombit/scientific_python
|
scientific_python/d_scipy/optimize.py
|
Python
|
mit
| 3,894
|
[
"Gaussian"
] |
65d3c944078fc6495e5b709ccb6dabd40406499445d0df3143b08ca8203406d0
|
""" The CS! (Configuration Service)
The following options can be set for the Configuration Service.
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN Server
:end-before: ##END
:dedent: 2
:caption: Service options
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
from DIRAC.ConfigurationSystem.private.ServiceInterface import ServiceInterface
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Security.Properties import CS_ADMINISTRATOR
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
gServiceInterface = None
gPilotSynchronizer = None
def initializeConfigurationHandler(serviceInfo):
global gServiceInterface
gServiceInterface = ServiceInterface(serviceInfo['URL'])
return S_OK()
class ConfigurationHandler(RequestHandler):
""" The CS handler
"""
types_getVersion = []
@classmethod
def export_getVersion(cls):
return S_OK(gServiceInterface.getVersion())
types_getCompressedData = []
@classmethod
def export_getCompressedData(cls):
sData = gServiceInterface.getCompressedConfigurationData()
return S_OK(sData)
types_getCompressedDataIfNewer = [six.string_types]
@classmethod
def export_getCompressedDataIfNewer(cls, sClientVersion):
sVersion = gServiceInterface.getVersion()
retDict = {'newestVersion': sVersion}
if sClientVersion < sVersion:
retDict['data'] = gServiceInterface.getCompressedConfigurationData()
return S_OK(retDict)
types_publishSlaveServer = [six.string_types]
@classmethod
def export_publishSlaveServer(cls, sURL):
gServiceInterface.publishSlaveServer(sURL)
return S_OK()
types_commitNewData = [six.string_types]
def export_commitNewData(self, sData):
global gPilotSynchronizer
credDict = self.getRemoteCredentials()
if 'DN' not in credDict or 'username' not in credDict:
return S_ERROR("You must be authenticated!")
return gServiceInterface.updateConfiguration(sData, credDict['username'])
types_forceGlobalConfigurationUpdate = []
auth_forceGlobalConfigurationUpdate = [CS_ADMINISTRATOR]
def export_forceGlobalConfigurationUpdate(self):
"""
Attempt to request all the configured services to update their configuration
:return: S_OK
"""
return gServiceInterface.forceGlobalUpdate()
types_writeEnabled = []
@classmethod
def export_writeEnabled(cls):
return S_OK(gServiceInterface.isMaster())
types_getCommitHistory = []
@classmethod
def export_getCommitHistory(cls, limit=100):
if limit > 100:
limit = 100
history = gServiceInterface.getCommitHistory()
if limit:
history = history[:limit]
return S_OK(history)
types_getVersionContents = [list]
@classmethod
def export_getVersionContents(cls, versionList):
contentsList = []
for version in versionList:
retVal = gServiceInterface.getVersionContents(version)
if retVal['OK']:
contentsList.append(retVal['Value'])
else:
return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message']))
return S_OK(contentsList)
types_rollbackToVersion = [six.string_types]
def export_rollbackToVersion(self, version):
retVal = gServiceInterface.getVersionContents(version)
if not retVal['OK']:
return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message']))
credDict = self.getRemoteCredentials()
if 'DN' not in credDict or 'username' not in credDict:
return S_ERROR("You must be authenticated!")
return gServiceInterface.updateConfiguration(retVal['Value'],
credDict['username'],
updateVersionOption=True)
|
yujikato/DIRAC
|
src/DIRAC/ConfigurationSystem/Service/ConfigurationHandler.py
|
Python
|
gpl-3.0
| 3,844
|
[
"DIRAC"
] |
55b5d169ff4531205afd35fef57f7fc746541f9b00bc1acf8eabdfc6a4383335
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (Lesser GPL)
#
# Copyright (C) 2009-2012 Rosen Diankov <rosen.diankov@gmail.com>
#
# ikfast is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# ikfast is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. _ikfast_compiler:
IKFast: The Robot Kinematics Compiler
-------------------------------------
.. image:: ../../images/ikfast_robots.jpg
:width: 640
IKFast analytically solves robot inverse kinematics equations and generates optimized C++ files.
The inverse kinematics equations arise from attemping to place the robot end effector coordinate
system in the world while maintaining joint and user-specified constraints. User-specified constraints make up many different `IK Types`_, each of them having advantages depending on the task.
IKFast will work with any number of joints arranged in a chain; this is defined by the `Robot.Manipulator`. For chains containing more degrees of freedom (DOF) than the IK type requires, the user can set arbitrary values of a subset of the joints until the number of unknown joints matches the degrees of freedom of the IK type.
It is not trivial to create hand-optimized inverse kinematics solutions for arms that can capture all degenerate cases, having closed-form IK speeds up many tasks including planning algorithms, so it really is a must for most robotics researchers.
Closed-form solutions are necessary for motion planning due to two reasons:
- Numerical inverse kinematics solvers will always be much slower than closed form solutions. Planners require being able to process thousands of configurations per second. The closed-form code generated by ikfast can produce solutions on the order of **~4 microseconds**! As a comparison, most numerical solutions are on the order of 10 milliseconds (assuming good convergence).
- The null space of the solution set can be explored because all solutions are computed.
Features
========
- Can handle robots with arbitrary joint complexity like non-intersecting axes.
- All possible discrete solutions calculated (can be up to 16).
- Generated C++ code **independent** of OpenRAVE or any other library.
- Automatically detects degenerate cases where 2 or more axes align and cause infinite solutions.
- Invalid solutions are detected by checking if square roots are given negative values or arc sines and arc cosines are given inputs exceeding the [-1,1] range.
- All divide by zero conditions are automatically checked and handled.
.. _ikfast_types:
IK Types
--------
The following inverse kinematics types are supported:
* **Transform6D** - end effector reaches desired 6D transformation
* **Rotation3D** - end effector reaches desired 3D rotation
* **Translation3D** - end effector origin reaches desired 3D translation
* **Direction3D** - direction on end effector coordinate system reaches desired direction
* **Ray4D** - ray on end effector coordinate system reaches desired global ray
* **Lookat3D** - direction on end effector coordinate system points to desired 3D position
* **TranslationDirection5D** - end effector origin and direction reaches desired 3D translation and direction. Can be thought of as Ray IK where the origin of the ray must coincide.
* **TranslationXY2D** - end effector origin reaches desired XY translation position, Z is ignored. The coordinate system with relative to the base link.
* **TranslationLocalGlobal6D** - local point on end effector origin reaches desired 3D global point. Because both local point and global point can be specified, there are 6 values.
* **TranslationXAxisAngle4D**, **TranslationYAxisAngle4D**, **TranslationZAxisAngle4D** - end effector origin reaches desired 3D translation, manipulator direction makes a specific angle with x/y/z-axis (defined in the manipulator base link's coordinate system)
* **TranslationXAxisAngleZNorm4D**, **TranslationYAxisAngleXNorm4D**, **TranslationZAxisAngleYNorm4D** - end effector origin reaches desired 3D translation, manipulator direction needs to be orthogonal to z, x, or y axis and be rotated at a certain angle starting from the x, y, or z axis (defined in the manipulator base link's coordinate system)
The possible solve methods are defined by `ikfast.IKFastSolver.GetSolvers()`
Usage
-----
The main file ikfast.py can be used both as a library and as an executable program. For advanced users, it is also possible to use run ikfast.py as a stand-alone program, which makes it mostly independent of the OpenRAVE run-time.
**However, the recommended way of using IKFast** is through the OpenRAVE :mod:`.databases.inversekinematics` database generator which directly loads the IK into OpenRAVE as an interface.
Stand-alone Executable
======================
To get help and a description of the ikfast arguments type
.. code-block:: bash
python `openrave-config --python-dir`/openravepy/_openravepy_/ikfast.py --help
A simple example to generate IK for setting the 3rd joint free of the Barrett WAM is
.. code-block:: bash
python `openrave-config --python-dir`/openravepy/_openravepy_/ikfast.py --robot=robots/barrettwam.robot.xml --baselink=0 --eelink=7 --savefile=ik.cpp --freeindex=2
Through Python
==============
IKFast can also be used as a library in python. Generating 6D IK for the Barrett WAM while setting the 3rd joint free can be achieved with:
.. code-block:: python
env = Environment()
kinbody = env.ReadRobotXMLFile('robots/barrettwam.robot.xml')
env.Add(kinbody)
solver = ikfast.IKFastSolver(kinbody=kinbody)
chaintree = solver.generateIkSolver(baselink=0,eelink=7,freeindices=[2],solvefn=ikfast.IKFastSolver.solveFullIK_6D)
code = solver.writeIkSolver(chaintree)
open('ik.cpp','w').write(code)
.. _ikfast_generatedcpp:
Using Generated IK Files
========================
The common usage is to generate a C++ file that can be compiled into a stand-alone shared object/DLL, an executable program, or linked in statically to a bigger project. For more complex kinematics, LAPACK_ is needed. Here is the header file, which can be found in `share/openrave-X.Y/python/ikfast.h <../../coreapihtml/ikfast_8h.html>`_.
Compiling with GCC
~~~~~~~~~~~~~~~~~~
The most basic command is:
.. code-block:: bash
gcc -lstdc++ -o ik ik.cpp
This will generate a small program that outputs all solutions given the end effector with respect to the robot base.
Using gcc, this requires "-llapack" to be added. For MSVC++, users will have to compile lapack and link it themselves.
Compiling with MSVC
~~~~~~~~~~~~~~~~~~~
`LAPACK For Windows`_ should be installed in order to get complex kinematics linking correctly.
Details
-------
Terminology:
- **solve joints** - the joints to solve for using inverse kinematics
- **free joints** - the joints that are specified before the IK is run, these values are known at runtime, but not known at IK generation time.
The top level class is `ikfast.IKFastSolver` and generates an Abstract Syntax Tree (AST) using definitions from `ikfast.AST`. The AST is then passed to the language-specific generators defined in `ikfast.CodeGenerators`.
Internal symbolic math uses sympy_. Infinite precision fractions are used in order to keep track of linearly independent equations and when they evaluate to 0. The infinite precision fractions are converted to decimals in the generators.
.. _LAPACK: http://www.netlib.org/lapack/
.. _`LAPACK For Windows`: http://icl.cs.utk.edu/lapack-for-windows/
.. _sympy: http://code.google.com/p/sympy/
Open Issues
-----------
1. currently ikfast does not handle big decimal numbers well. for example defining the axes or anchors as 1.032513241 will produce very big fractions and make things slow.
2. there are cases when axes align and there are infinite solutions. although ikfast can detect such cases, we need a lot more work in this area.
3. for 6D ik, there are still mechanisms it cannot solve, please send the kinematics model if such a situation is encountered.
4. there are 10 different types of IK, currently ray4d IK needs a lot of work.
FAQ
---
Q. **ikfast has been running for more than an hour, will it ever finish?**
A. Most likely not, usually an iksolver finishes within 10 minutes.
----
"""
from __future__ import with_statement # for python 2.5
from sympy import __version__ as sympy_version
if sympy_version < '0.7.0':
raise ImportError('ikfast needs sympy 0.7.x or greater')
sympy_smaller_073 = sympy_version < '0.7.3'
__author__ = 'Rosen Diankov'
__copyright__ = 'Copyright (C) 2009-2012 Rosen Diankov <rosen.diankov@gmail.com>'
__license__ = 'Lesser GPL, Version 3'
__version__ = '70' # also in ikfast.h
import sys, copy, time, math, datetime
import __builtin__
from optparse import OptionParser
try:
from openravepy.metaclass import AutoReloader
from openravepy import axisAngleFromRotationMatrix
except:
axisAngleFromRotationMatrix = None
class AutoReloader:
pass
import numpy # required for fast eigenvalue computation
from sympy import *
try:
import mpmath # on some distributions, sympy does not have mpmath in its scope
except ImportError:
pass
try:
import re # for latex cleanup
except ImportError:
pass
try:
from math import isinf, isnan
except ImportError:
# python 2.5
from numpy import isinf as _isinf
from numpy import isnan as _isnan
def isinf(x): return _isinf(float(x))
def isnan(x): return _isnan(float(x))
from operator import itemgetter
from itertools import izip, chain, product
try:
from itertools import combinations, permutations
except ImportError:
def combinations(items,n):
if n == 0: yield[]
else:
_internal_items=list(items)
for i in xrange(len(_internal_items)):
for cc in combinations(_internal_items[i+1:],n-1):
yield [_internal_items[i]]+cc
def permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
import logging
log = logging.getLogger('openravepy.ikfast')
try:
# not necessary, just used for testing
import swiginac
using_swiginac = True
except ImportError:
using_swiginac = False
CodeGenerators = {}
# try:
# import ikfast_generator_vb
# CodeGenerators['vb'] = ikfast_generator_vb.CodeGenerator
# CodeGenerators['vb6'] = ikfast_generator_vb.CodeGeneratorVB6
# CodeGenerators['vb6special'] = ikfast_generator_vb.CodeGeneratorVB6Special
# except ImportError:
# pass
try:
import ikfast_generator_cpp
CodeGenerators['cpp'] = ikfast_generator_cpp.CodeGenerator
IkType = ikfast_generator_cpp.IkType
except ImportError:
pass
# changes to sympy:
# core/power.py Pow
def Pow_eval_subs(self, old, new):
if self == old:
return new
if old.func is self.func and self.base == old.base:
coeff1, terms1 = self.exp.as_coeff_mul()
coeff2, terms2 = old.exp.as_coeff_mul()
if terms1==terms2:
# pow = coeff1/coeff2
# if pow.is_Integer or self.base.is_commutative:
# return Pow(new, pow) # (x**(2*y)).subs(x**(3*y),z) -> z**(2/3)
# only divide if coeff2 is a divisor of coeff1
if coeff1.is_integer and coeff2.is_integer and (coeff1/coeff2).is_integer:
return new ** (coeff1/coeff2) # (x**(2*y)).subs(x**(3*y),z) -> z**(2/3*y)
if old.func is C.exp:
coeff1, terms1 = old.args[0].as_coeff_mul()
coeff2, terms2 = (self.exp*C.log(self.base)).as_coeff_mul()
if terms1==terms2:
# only divide if coeff2 is a divisor of coeff1
if coeff1.is_integer and coeff2.is_integer and (coeff1/coeff2).is_integer:
return new ** (coeff1/coeff2) # (x**(2*y)).subs(exp(3*y*log(x)),z) -> z**(2/3*y)
return Pow(self.base._eval_subs(old, new), self.exp._eval_subs(old, new))
if sympy_smaller_073:
power.Pow._eval_subs = Pow_eval_subs
# simplify/simplify.py
# def custom_trigsimp_nonrecursive(expr, deep=False):
# """
# A nonrecursive trig simplifier, used from trigsimp.
#
# == Usage ==
# trigsimp_nonrecursive(expr) -> reduces expression by using known trig
# identities
#
# == Notes ==
#
# deep ........ apply trigsimp inside functions
#
# == Examples ==
# >>> from sympy import cos, sin, log
# >>> from sympy.simplify.simplify import trigsimp, trigsimp_nonrecursive
# >>> from sympy.abc import x, y
# >>> e = 2*sin(x)**2 + 2*cos(x)**2
# >>> trigsimp(e)
# 2
# >>> trigsimp_nonrecursive(log(e))
# log(2*cos(x)**2 + 2*sin(x)**2)
# >>> trigsimp_nonrecursive(log(e), deep=True)
# log(2)
#
# """
# from sympy.core.basic import S
# sin, cos, tan, cot = C.sin, C.cos, C.tan, C.cot
#
# if expr.is_Function:
# if deep:
# return expr.func(trigsimp_nonrecursive(expr.args[0], deep))
# elif expr.is_Mul:
# ret = S.One
# for x in expr.args:
# ret *= trigsimp_nonrecursive(x, deep)
#
# return ret
# elif expr.is_Pow:
# return Pow(trigsimp_nonrecursive(expr.base, deep),
# trigsimp_nonrecursive(expr.exp, deep))
# elif expr.is_Add:
# # TODO this needs to be faster
#
# # The types of trig functions we are looking for
# a,b,c = map(Wild, 'abc')
# matchers = (
# (a*sin(b)**2, a - a*cos(b)**2),
# (a*tan(b)**2, a*(1/cos(b))**2 - a),
# (a*cot(b)**2, a*(1/sin(b))**2 - a)
# )
#
# # Scan for the terms we need
# ret = S.Zero
# for term in expr.args:
# term = trigsimp_nonrecursive(term, deep)
# res = None
# for pattern, result in matchers:
# res = term.match(pattern)
# if res is not None:
# ret += result.subs(res)
# break
# if res is None:
# ret += term
#
# # Reduce any lingering artifacts, such as sin(x)**2 changing
# # to 1-cos(x)**2 when sin(x)**2 was "simpler"
# artifacts = (
# (a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos),
# (a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos),
# (a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin)
# )
#
# expr = ret
# for pattern, result, ex in artifacts:
# # Substitute a new wild that excludes some function(s)
# # to help influence a better match. This is because
# # sometimes, for example, 'a' would match sec(x)**2
# a_t = Wild('a', exclude=[ex])
# pattern = pattern.subs(a, a_t)
# result = result.subs(a, a_t)
# if expr.is_number:
# continue
# try:
# m = expr.match(pattern)
# except (TypeError):
# break
#
# while m is not None:
# if m[a_t] == 0 or -m[a_t] in m[c].args or m[a_t] + m[c] == 0:
# break
# expr = result.subs(m)
# if expr.is_number:
# continue
# try:
# m = expr.match(pattern)
# except (TypeError):
# break
#
#
# return expr
# return expr
#
# simplify.simplify.trigsimp_nonrecursive = custom_trigsimp_nonrecursive
class AST:
"""Abstarct Syntax Tree class definitions specific for evaluating complex math equations.
"""
class SolverBase(object):
def GetChildrenOfType(self, classinstance):
return []
def GetZeroThreshold(self):
"""returns the threshold to use to check for zeros
"""
return None
class SolverSolution(SolverBase):
"""Contains equations for evaluating one unknown variable. The variable can have multiple solutions, and the solution is only valid if every equation in checkforzeros is non-zero
"""
jointname = None
jointeval = None
jointevalcos = None
jointevalsin = None
AddPiIfNegativeEq = None
isHinge = True
checkforzeros = None
thresh = None
AddHalfTanValue = False
dictequations = None
presetcheckforzeros = None
equationsused = None
"""Meaning of FeasibleIsZeros:
If set to false, then solution is feasible only if all of these equations evalute to non-zero.
If set to true, solution is feasible only if all these equations evaluate to zero.
"""
FeasibleIsZeros = False
score = None
def __init__(self, jointname, jointeval=None,jointevalcos=None,jointevalsin=None,AddPiIfNegativeEq=None,isHinge=True,thresh=0.000001):
self.jointname = jointname
self.jointeval = jointeval
self.jointevalcos = jointevalcos
self.jointevalsin = jointevalsin
self.AddPiIfNegativeEq = AddPiIfNegativeEq
self.isHinge=isHinge
self.thresh = thresh
self.presetcheckforzeros = []
self.dictequations = []
self.equationsused = []
assert(self.checkValidSolution())
def subs(self,solsubs):
if self.jointeval is not None:
self.jointeval = [e.subs(solsubs) for e in self.jointeval]
if self.jointevalcos is not None:
self.jointevalcos = [e.subs(solsubs) for e in self.jointevalcos]
if self.jointevalsin is not None:
self.jointevalsin = [e.subs(solsubs) for e in self.jointevalsin]
if self.checkforzeros is not None:
self.checkforzeros = [e.subs(solsubs) for e in self.checkforzeros]
self.dictequations = [(s,v.subs(solsubs)) for s,v in self.dictequations]
self.presetcheckforzeros = [e.subs(solsubs) for e in self.presetcheckforzeros]
self.equationsused = [e.subs(solsubs) for e in self.equationsused]
if not self.checkValidSolution():
raise IKFastSolver.CannotSolveError('substitution produced invalid results')
return self
def generate(self, generator):
assert(self.checkValidSolution())
return generator.generateSolution(self)
def end(self, generator):
return generator.endSolution(self)
def numsolutions(self):
n=0
if self.jointeval is not None:
n += len(self.jointeval)
if self.jointevalcos is not None:
n += 2*len(self.jointevalcos)
if self.jointevalsin is not None:
n += 2*len(self.jointevalsin)
return n
def checkValidSolution(self):
valid=True
if self.jointeval is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointeval])
if self.jointevalsin is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalsin])
if self.jointevalcos is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalcos])
return valid
def getPresetCheckForZeros(self):
return self.presetcheckforzeros
def getEquationsUsed(self):
return self.equationsused
def GetZeroThreshold(self):
return self.thresh
class SolverPolynomialRoots(SolverBase):
"""find all roots of the polynomial and plug it into jointeval. poly should be Poly
"""
jointname = None
poly = None
jointeval = None
jointevalcos = None # not used
jointevalsin = None # not used
checkforzeros = None
postcheckforzeros = None # fail if any zero
postcheckfornonzeros = None # fail if any nonzero
postcheckforNumDenom = None # list of (A,B) pairs where Ax=B was used. Fail if A==0&&B!=0
postcheckforrange = None # checks that value is within [-1,1]
dictequations = None
thresh = 1e-8
isHinge = True
FeasibleIsZeros = False
AddHalfTanValue = False
score = None
equationsused = None
def __init__(self, jointname, poly=None, jointeval=None,isHinge=True):
self.poly = poly
assert(self.poly.degree(0)>0)
self.jointname=jointname
self.jointeval = jointeval
self.isHinge = isHinge
self.dictequations = []
self.equationsused = []
def numsolutions(self):
return self.poly.degree(0)
def subs(self,solsubs):
if self.jointeval is not None:
self.jointeval = [e.subs(solsubs) for e in self.jointeval]
if self.checkforzeros is not None:
self.checkforzeros = [e.subs(solsubs) for e in self.checkforzeros]
if self.postcheckforzeros is not None:
self.postcheckforzeros = [e.subs(solsubs) for e in self.postcheckforzeros]
if self.postcheckfornonzeros is not None:
self.postcheckfornonzeros = [e.subs(solsubs) for e in self.postcheckfornonzeros]
if self.postcheckforrange is not None:
self.postcheckforrange = [e.subs(solsubs) for e in self.postcheckforrange]
if self.postcheckforNumDenom is not None:
self.postcheckforNumDenom = [e.subs(solsubs) for e in self.postcheckforNumDenom]
self.dictequations = [(s,v.subs(solsubs)) for s,v in self.dictequations]
self.equationsused = [e.subs(solsubs) for e in self.equationsused]
if self.poly is not None:
self.poly = Poly(self.poly.subs(solsubs),*self.poly.gens)
assert(self.checkValidSolution())
return self
def generate(self, generator):
return generator.generatePolynomialRoots(self)
def end(self, generator):
return generator.endPolynomialRoots(self)
def checkValidSolution(self):
if self.poly is not None:
valid = IKFastSolver.isValidSolution(self.poly.as_expr())
if self.jointeval is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointeval])
return valid
def getPresetCheckForZeros(self):
# make sure that all the coefficients containing higher-order variables are not 0
zeroeq = S.Zero
for monom, coeff in self.poly.terms():
if monom[0] > 0:
zeroeq += abs(coeff.subs(self.dictequations))
return [zeroeq]#self.poly.LC()]
def getEquationsUsed(self):
return self.equationsused
def GetZeroThreshold(self):
return self.thresh
class SolverCoeffFunction(SolverBase):
"""Evaluate a set of coefficients and pass them to a custom function which will then return all possible values of the specified variables in jointnames.
"""
jointnames = None
jointeval = None
isHinges = True
exportvar = None
exportcoeffeqs = None
rootmaxdim = None
exportfnname = None
jointevalcos = None # used for half angles
jointevalsin = None # used for half angles
checkforzeros = None
FeasibleIsZeros = False
score = None
presetcheckforzeros = None
dictequations = None
equationsused = None
def __init__(self, jointnames, jointeval=None, exportvar=None, exportcoeffeqs=None,exportfnname=None,isHinges=None,rootmaxdim=16,jointevalcos=None,jointevalsin=None):
self.jointnames=jointnames
self.jointeval = jointeval
self.isHinges = isHinges
self.exportvar=exportvar
self.exportcoeffeqs=exportcoeffeqs
self.exportfnname=exportfnname
self.rootmaxdim=rootmaxdim
self.jointevalsin=jointevalsin
self.jointevalcos=jointevalcos
self.presetcheckforzeros = []
self.dictequations = []
self.equationsused = []
def numsolutions(self):
return self.rootmaxdim
def subs(self,solsubs):
if self.jointeval is not None:
self.jointeval = [e.subs(solsubs) for e in self.jointeval]
if self.jointevalcos is not None:
self.jointevalcos = [e.subs(solsubs) for e in self.jointevalcos]
if self.jointevalsin is not None:
self.jointevalsin = [e.subs(solsubs) for e in self.jointevalsin]
if self.checkforzeros is not None:
self.checkforzeros = [e.subs(solsubs) for e in self.checkforzeros]
self.dictequations = [(s,v.subs(solsubs)) for s,v in self.dictequations]
self.presetcheckforzeros = [e.subs(solsubs) for e in self.presetcheckforzeros]
self.equationsused = [e.subs(solsubs) for e in self.equationsused]
#if self.poly is not None:
# self.poly = Poly(self.poly.subs(solsubs)...)
assert(self.checkValidSolution())
return self
def generate(self, generator):
return generator.generateCoeffFunction(self)
def end(self, generator):
return generator.endCoeffFunction(self)
def checkValidSolution(self):
#if self.poly is not None:
# valid = IKFastSolver.isValidSolution(self.poly.as_expr())
if self.jointeval is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointeval])
if self.jointevalcos is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalcos])
if self.jointevalsin is not None:
valid &= all([IKFastSolver.isValidSolution(e) for e in self.jointevalsin])
return valid
def getPresetCheckForZeros(self):
return self.presetcheckforzeros
def getEquationsUsed(self):
return self.equationsused
class SolverMatrixInverse(SolverBase):
"""Take the inverse of a large matirx and set the coefficients of the inverse to the symbols in Asymbols.
"""
A = None
Asymbols = None # has to be same size as B
checkforzeros = None
def __init__(self, A, Asymbols):
self.A = A
self.Asymbols = Asymbols
def subs(self,solsubs):
return self
def generate(self, generator):
return generator.generateMatrixInverse(self)
def end(self, generator):
return generator.endMatrixInverse(self)
def checkValidSolution(self):
return True
def getsubs(self,psubs):
Anew = self.A.subs(psubs).inv()
subs = []
for i in range(self.A.shape[0]):
for j in range(self.A.shape[1]):
if self.Asymbols[i][j] is not None:
subs.append((self.Asymbols[i][j],Anew[i,j]))
return subs
class SolverConditionedSolution(SolverBase):
"""set solutions based on evaluating equations
"""
dictequations = None
solversolutions = None # a list of solutions. If the solution's checkforzeros evaluates to all zeros, then that solution us used
thresh=0.000001
def __init__(self, solversolutions):
self.solversolutions = solversolutions
self.dictequations = []
def subs(self,solsubs):
for s in self.solversolutions:
s.subs(solsubs)
return self
def generate(self, generator):
return generator.generateConditionedSolution(self)
def end(self, generator):
return generator.endConditionedSolution(self)
def GetChildrenOfType(self, classinstance):
nodes = []
for childnode in self.solversolutions:
if isinstance(childnode, classinstance):
nodes.append(childnode)
nodes += childnode.GetChildrenOfType(classinstance)
return nodes
def GetZeroThreshold(self):
return self.thresh
class SolverBranchConds(SolverBase):
"""
take certain branches depending if a set of equations evaluate to zero.
Each branch can also have dictequations
"""
jointbranches = None # list of (checkzeroequations, branch, dictequations)
thresh = 0.000001
def __init__(self, jointbranches):
self.jointbranches = jointbranches
def generate(self, generator):
return generator.generateBranchConds(self)
def end(self, generator):
return generator.endBranchConds(self)
def GetChildrenOfType(self, classinstance):
nodes = []
for checkzeroequations, branch, extradictequations in self.jointbranches:
for childnode in branch:
if isinstance(childnode, classinstance):
nodes.append(childnode)
nodes += childnode.GetChildrenOfType(classinstance)
return nodes
def GetZeroThreshold(self):
return self.thresh
class SolverCheckZeros(SolverBase):
jointname = None
jointcheckeqs = None # only used for evaluation
zerobranch = None
nonzerobranch = None
anycondition=None
dictequations=None
thresh=None # a threshold of 1e-6 breaks hiro ik
equationsused = None
def __init__(self, jointname, jointcheckeqs, zerobranch, nonzerobranch,thresh=None,anycondition=True):
self.jointname = jointname
self.jointcheckeqs = jointcheckeqs
self.zerobranch = zerobranch
self.nonzerobranch = nonzerobranch
if thresh is None:
self.thresh = 0.000001
else:
self.thresh = thresh
self.anycondition = anycondition
self.dictequations = []
def generate(self, generator):
return generator.generateCheckZeros(self)
def end(self, generator):
return generator.endCheckZeros(self)
def getPresetCheckForZeros(self):
return []
def checkValidSolution(self):
for branch in self.nonzerobranch:
if not branch.checkValidSolution():
return False
for branch in self.zerobranch:
if not branch.checkValidSolution():
return False
return True
def numsolutions(self):
return 1
def subs(self,solsubs):
for branch in self.nonzerobranch:
if hasattr(branch,'subs'):
branch.subs(solsubs)
for branch in self.zerobranch:
if hasattr(branch,'subs'):
branch.subs(solsubs)
return self
def getEquationsUsed(self):
return self.equationsused
def GetChildrenOfType(self, classinstance):
nodes = []
for childnode in self.nonzerobranch + self.zerobranch:
if isinstance(childnode, classinstance):
nodes.append(childnode)
nodes += childnode.GetChildrenOfType(classinstance)
return nodes
def GetZeroThreshold(self):
return self.thresh
class SolverFreeParameter(SolverBase):
jointname = None
jointtree = None
def __init__(self, jointname, jointtree):
self.jointname = jointname
self.jointtree = jointtree
def generate(self, generator):
return generator.generateFreeParameter(self)
def end(self, generator):
return generator.endFreeParameter(self)
class SolverRotation(SolverBase):
T = None
jointtree = None
functionid=0
def __init__(self, T, jointtree):
self.T = T
self.jointtree = jointtree
self.dictequations = []
def generate(self, generator):
return generator.generateRotation(self)
def end(self, generator):
return generator.endRotation(self)
class SolverFunction(SolverBase):
jointtree = None
name='innerfn'
def __init__(self, name, jointtree):
self.name = name
self.jointtree = jointtree
self.dictequations = []
def generate(self, generator):
return generator.generateFunction(self)
def end(self, generator):
return generator.endFunction(self)
class SolverStoreSolution(SolverBase):
"""Called when all the unknowns have been solved to add a solution.
"""
alljointvars = None
checkgreaterzero = None # used for final sanity checks to ensure IK solution is consistent
thresh = 0
offsetvalues = None
isHinge = None
def __init__(self, alljointvars,checkgreaterzero=None,isHinge=None):
self.alljointvars = alljointvars
self.checkgreaterzero = checkgreaterzero
self.isHinge=isHinge
if isHinge is None:
log.warn('SolverStoreSolution.isHinge is not initialized')
self.isHinge = [True]*len(self.alljointvars)
def generate(self, generator):
return generator.generateStoreSolution(self)
def end(self, generator):
return generator.endStoreSolution(self)
class SolverSequence(SolverBase):
jointtrees = None
def __init__(self, jointtrees):
self.jointtrees = jointtrees
def generate(self, generator):
return generator.generateSequence(self)
def end(self, generator):
return generator.endSequence(self)
def GetChildrenOfType(self, classinstance):
nodes = []
for tree in self.jointtrees:
for childnode in tree:
if isinstance(childnode, classinstance):
nodes.append(childnode)
nodes += childnode.GetChildrenOfType(classinstance)
return nodes
class SolverBreak(SolverBase):
"""Terminates this scope"""
comment = None # a comment for the reason of the break
varsubs = None # variable substitutions that were valid at the break
othersolvedvars = None # the solved variables already
solsubs = None # the substitutions of the solved variables
endbranchtree = None # a node that points to the end of the tree
def __init__(self, comment, varsubs=list(), othersolvedvars=list(), solsubs=list(), globalsymbols=list(), endbranchtree=None):
self.comment = comment
self.varsubs = list(varsubs)
self.othersolvedvars = list(othersolvedvars)
self.solsubs = list(solsubs)
self.endbranchtree = endbranchtree
def generate(self,generator):
return generator.generateBreak(self)
def end(self,generator):
return generator.endBreak(self)
def checkValidSolution(self):
return True
class SolverIKChainTransform6D(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Tfk = None
Tee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Tee, jointtree,Tfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Tee = Tee
self.jointtree = jointtree
self.Tfk = Tfk
self.dictequations = []
def generate(self, generator):
return generator.generateChain(self)
def end(self, generator):
return generator.endChain(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Tfk = Tleft*self.Tfk
self.Tee = Tleftinv*self.Tee
class SolverIKChainRotation3D(SolverBase):
solvejointvars = None
freejointvars = None
Rfk = None
Ree = None
jointtree = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Ree, jointtree,Rfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Ree = Ree
self.Rfk=Rfk
self.jointtree = jointtree
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainRotation3D(self)
def end(self, generator):
return generator.endIKChainRotation3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Rfk = Tleft[0:3,0:3]*self.Rfk
self.Ree = Tleftinv[0:3,0:3]*self.Ree
class SolverIKChainTranslation3D(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Pee = None
dictequations = None
uselocaltrans = False
def __init__(self, solvejointvars, freejointvars, Pee, jointtree,Pfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.jointtree = jointtree
self.Pfk=Pfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainTranslation3D(self)
def end(self, generator):
return generator.endIKChainTranslation3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:3,0:3]*self.Pfk+Tleft[0:3,3]
self.Pee = Tleftinv[0:3,0:3]*self.Pee+Tleftinv[0:3,3]
class SolverIKChainTranslationXY2D(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Pee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Pee, jointtree,Pfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.jointtree = jointtree
self.Pfk=Pfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainTranslationXY2D(self)
def end(self, generator):
return generator.endIKChainTranslationXY2D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:2,0:2]*self.Pfk+Tleft[0:2,3]
self.Pee = Tleftinv[0:2,0:2]*self.Pee+Tleftinv[0:2,3]
class SolverIKChainDirection3D(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Dfk = None
Dee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Dee, jointtree,Dfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Dee = Dee
self.jointtree = jointtree
self.Dfk=Dfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainDirection3D(self)
def end(self, generator):
return generator.endIKChainDirection3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Dfk = Tleft[0:3,0:3]*self.Dfk
self.Dee = Tleftinv[0:3,0:3]*self.Dee
class SolverIKChainRay(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Dfk = None
Pee = None
Dee = None
dictequations = None
is5dray = False # if True, then full 3D position becomes important and things shouldn't be normalized
def __init__(self, solvejointvars, freejointvars, Pee, Dee, jointtree,Pfk=None,Dfk=None,is5dray=False):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.Dee = Dee
self.jointtree = jointtree
self.Pfk = Pfk
self.Dfk = Dfk
self.dictequations = []
self.is5dray=is5dray
def generate(self, generator):
return generator.generateIKChainRay(self)
def end(self, generator):
return generator.endIKChainRay(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:3,0:3]*self.Pfk+Tleft[0:3,3]
self.Dfk = Tleft[0:3,0:3]*self.Dfk
self.Pee = Tleftinv[0:3,0:3]*self.Pee+Tleftinv[0:3,3]
self.Dee = Tleftinv[0:3,0:3]*self.Dee
class SolverIKChainLookat3D(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Dfk = None
Pee = None
dictequations = None
def __init__(self, solvejointvars, freejointvars, Pee, jointtree,Pfk=None,Dfk=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.jointtree = jointtree
self.Pfk=Pfk
self.Dfk=Dfk
self.dictequations = []
def generate(self, generator):
return generator.generateIKChainLookat3D(self)
def end(self, generator):
return generator.endIKChainLookat3D(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:3,0:3]*self.Pfk+Tleft[0:3,3]
self.Dfk = Tleft[0:3,0:3]*self.Dfk
self.Pee = Tleftinv[0:3,0:3]*self.Pee+Tleftinv[0:3,3]
class SolverIKChainAxisAngle(SolverBase):
solvejointvars = None
freejointvars = None
jointtree = None
Pfk = None
Pee = None
dictequations = None
angleee=None
anglefk=None
iktype=None
def __init__(self, solvejointvars, freejointvars, Pee, angleee,jointtree,Pfk=None,anglefk=None,iktype=None):
self.solvejointvars = solvejointvars
self.freejointvars = freejointvars
self.Pee = Pee
self.anglefk=anglefk
self.jointtree = jointtree
self.Pfk=Pfk
self.angleee=angleee
self.dictequations = []
self.iktype=iktype
def generate(self, generator):
return generator.generateSolverIKChainAxisAngle(self)
def end(self, generator):
return generator.endSolverIKChainAxisAngle(self)
def leftmultiply(self,Tleft,Tleftinv):
self.Pfk = Tleft[0:2,0:2]*self.Pfk+Tleft[0:2,3]
self.Pee = Tleftinv[0:2,0:2]*self.Pee+Tleftinv[0:2,3]
assert(0) # need to change angle
from sympy.core import function # for sympy 0.7.1+
class fmod(function.Function):
"""defines floating-point mod"""
nargs = 2
is_real = True
is_Function = True
class atan2check(atan2):
"""defines floating-point mod"""
nargs = 2
is_real = True
is_Function = True
class GinacUtils:
@staticmethod
def ConvertToGinac(eq,localsymbolmap):
if eq.is_Add:
geq = None
for arg in eq.args:
geq2 = GinacUtils.ConvertToGinac(arg,localsymbolmap)
if geq is None:
geq = geq2
else:
geq += geq2
return geq if geq is not None else swiginac.numeric(0)
elif eq.is_Mul:
geq = None
for arg in eq.args:
geq2 = GinacUtils.ConvertToGinac(arg,localsymbolmap)
if geq is None:
geq = geq2
else:
geq *= geq2
return geq if geq is not None else swiginac.numeric(1)
elif eq.is_Pow:
gbase = GinacUtils.ConvertToGinac(eq.base,localsymbolmap)
if eq.exp == S.One:
return gbase
elif eq.exp == -S.One:
return 1/gbase
else:
return pow(gbase,GinacUtils.ConvertToGinac(eq.exp,localsymbolmap))
elif eq.is_number:
return swiginac.numeric(str(eq))
elif eq.is_Symbol:
if str(eq) in localsymbolmap:
return localsymbolmap[str(eq)]
else:
gsym = swiginac.symbol(str(eq))
localsymbolmap[str(eq)] = gsym
return gsym
raise ValueError('unknown equation %s'%str(eq))
@staticmethod
def ConvertFromGinac(geq):
if isinstance(geq, swiginac.add):
return Add(*[GinacUtils.ConvertFromGinac(geq.op(i)) for i in range(geq.nops())])
elif isinstance(geq, swiginac.mul):
return Mul(*[GinacUtils.ConvertFromGinac(geq.op(i)) for i in range(geq.nops())])
elif isinstance(geq, swiginac.power):
ebase = GinacUtils.ConvertFromGinac(geq.op(0))
if geq.op(1) == 1:
return ebase
elif geq.op(1) == -1:
return S.One/ebase
else:
return Pow(ebase,GinacUtils.ConvertFromGinac(geq.op(1)),evaluate=False)
elif isinstance(geq, swiginac.numeric):
if geq.is_integer():
return Integer(str(geq))
elif geq.is_rational():
return Rational(str(geq))
else:
return geq.eval()
elif isinstance(geq, swiginac.symbol):
return Symbol(str(geq))
else:
raise ValueError('unknown equation %s'%str(eq))
@staticmethod
def ConvertMatrixToGinac(M,name='M', localsymbolmap={}):
gM = swiginac.symbolic_matrix(M.shape[0],M.shape[1],'M')
for i in range(M.shape[0]):
for j in range(M.shape[1]):
gM[i,j] = GinacUtils.ConvertToGinac(M[i,j],localsymbolmap)
return gM
@staticmethod
def GetPolyTermsFromGinac(geq, gothersymbols, othersymbols):
"""return a dict of monom:coeff items
"""
terms = {}
for i, gothersymbol in enumerate(gothersymbols):
for degree in range(geq.ldegree(gothersymbol),geq.degree(gothersymbol)+1):
monomprefix = (0,)*i + (degree,)
gcoeff = geq.coeff(gothersymbol,degree)
if i+1 < len(gothersymbols):
newterms = GinacUtils.GetPolyTermsFromGinac(gcoeff,gothersymbols[i+1:],othersymbols[i+1:])
for newmonom, newcoeff in newterms.iteritems():
assert(len(newmonom)==len(gothersymbols)-i-1)
terms[monomprefix+newmonom] = newcoeff
else:
# ConvertFromGinac is very slow
terms[monomprefix] = gcoeff#GinacUtils.ConvertFromGinac(gcoeff)
return terms
@staticmethod
def SolveUpperTriangular(gA, gB, name='X'):
"""solves for gA * X = gB.
All parameters have to be ginac objects
"""
gX = swiginac.symbolic_matrix(gB.rows(),gB.cols(),name)
for i in reversed(xrange(gA.rows())):
if gA[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
gX[i, 0] = (gB[i, 0] - sum(gA[i, k] * gX[k, 0] for k in xrange(i+1, gA.rows()))) / gA[i, i]
return gX
class IKFastSolver(AutoReloader):
"""Solves the analytical inverse kinematics equations. The symbol naming conventions are as follows:
cjX - cos joint angle
constX - temporary constant used to simplify computations
dummyX - dummy intermediate variables to solve for
gconstX - global constant that is also used during ik generation phase
htjX - half tan of joint angle
jX - joint angle
pX - end effector position information
rX - end effector rotation information
sjX - sin joint angle
tconstX - second-level temporary constant
tjX - tan of joint angle
"""
class CannotSolveError(Exception):
"""thrown when ikfast fails to solve a particular set of equations with the given knowns and unknowns
"""
def __init__(self,value=u''):
self.value = value
def __unicode__(self):
return u'%s: %s'%(self.__class__.__name__, self.value)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return '<%s(%r)>'%(self.__class__.__name__, self.value)
def __eq__(self, r):
return self.value == r.value
def __ne__(self, r):
return self.value != r.value
class IKFeasibilityError(Exception):
"""thrown when it is not possible to solve the IK due to robot not having enough degrees of freedom. For example, a robot with 5 joints does not have 6D IK
"""
def __init__(self,equations,checkvars):
self.equations=equations
self.checkvars=checkvars
def __str__(self):
s = "Not enough equations to solve variables %s!\nThis means one of several things: not enough constraints to solve all variables, or the manipulator does not span the target IK space. This is not an ikfast failure, it just means the robot kinematics are invalid for this type of IK. Equations that are not uniquely solvable are:\n"%str(self.checkvars)
for eq in self.equations:
s += str(eq) + '\n'
return s
class JointAxis:
__slots__ = ['joint','iaxis']
class Variable:
__slots__ = ['var','svar','cvar','tvar','htvar']
def __init__(self, var):
self.name = var.name
self.var = var
self.svar = Symbol("s%s"%var.name)
self.cvar = Symbol("c%s"%var.name)
self.tvar = Symbol("t%s"%var.name)
self.htvar = Symbol("ht%s"%var.name)
self.vars = [self.var,self.svar,self.cvar,self.tvar,self.htvar]
self.subs = [(cos(self.var),self.cvar),(sin(self.var),self.svar),(tan(self.var),self.tvar),(tan(self.var/2),self.htvar)]
self.subsinv = [(self.cvar,cos(self.var)),(self.svar, sin(self.var)),(self.tvar,tan(self.tvar))]
def getsubs(self,value):
return [(self.var,value)]+[(s,v.subs(self.var,value).evalf()) for v,s in self.subs]
class DegenerateCases:
def __init__(self):
self.handleddegeneratecases = []
def clone(self):
clone=IKFastSolver.DegenerateCases()
clone.handleddegeneratecases = self.handleddegeneratecases[:]
return clone
def addcasesconds(self,newconds,currentcases):
for case in newconds:
newcases = set(currentcases)
newcases.add(case)
assert(not self.hascases(newcases))
self.handleddegeneratecases.append(newcases)
def addcases(self,currentcases):
if not self.hascases(currentcases):
self.handleddegeneratecases.append(currentcases)
else:
log.warn('case already added') # sometimes this can happen, but it isn't a bug, just bad bookkeeping
def RemoveCases(self, currentcases):
for i, handledcases in enumerate(self.handleddegeneratecases):
if handledcases == currentcases:
self.handleddegeneratecases.pop(i)
return True
return False
def gethandledconds(self,currentcases):
handledconds = []
for handledcases in self.handleddegeneratecases:
if len(currentcases)+1==len(handledcases) and currentcases < handledcases:
handledconds.append((handledcases - currentcases).pop())
return handledconds
def hascases(self,currentcases):
for handledcases in self.handleddegeneratecases:
if handledcases == currentcases:
return True
return False
def __init__(self, kinbody=None,kinematicshash='',precision=None):
self.usinglapack = False
self.useleftmultiply = True
self.freevarsubs = []
self.degeneratecases = None
self.kinematicshash = kinematicshash
self.testconsistentvalues = None
self.maxcasedepth = 4 # the maximum depth of special/degenerate cases to process before system gives up
self.globalsymbols = [] # global symbols for substitutions
self._scopecounter = 0 # a counter for debugging purposes that increaes every time a level changes
self._dodebug = False
if precision is None:
self.precision=8
else:
self.precision=precision
self.kinbody = kinbody
self._iktype = None # the current iktype processing
self.axismap = {}
self.axismapinv = {}
with self.kinbody:
for idof in range(self.kinbody.GetDOF()):
axis = IKFastSolver.JointAxis()
axis.joint = self.kinbody.GetJointFromDOFIndex(idof)
axis.iaxis = idof-axis.joint.GetDOFIndex()
name = str('j%d')%idof
self.axismap[name] = axis
self.axismapinv[idof] = name
def convertRealToRational(self, x,precision=None):
if precision is None:
precision=self.precision
if Abs(x) < 10**-precision:
return S.Zero
r0 = Rational(str(round(Float(float(x),30),precision)))
if x == 0:
return r0
r1 = 1/Rational(str(round(Float(1/float(x),30),precision)))
return r0 if len(str(r0)) < len(str(r1)) else r1
def ConvertRealToRationalEquation(self, eq, precision=None):
if eq.is_Add:
neweq = S.Zero
for subeq in eq.args:
neweq += self.ConvertRealToRationalEquation(subeq,precision)
elif eq.is_Mul:
neweq = self.ConvertRealToRationalEquation(eq.args[0],precision)
for subeq in eq.args[1:]:
neweq *= self.ConvertRealToRationalEquation(subeq,precision)
elif eq.is_Function:
newargs = [self.ConvertRealToRationalEquation(subeq,precision) for subeq in eq.args]
neweq = eq.func(*newargs)
elif eq.is_number:
if eq.is_irrational:
# don't touch it since it could be pi!
neweq = eq
else:
neweq = self.convertRealToRational(eq,precision)
else:
neweq=eq
return neweq
def normalizeRotation(self,M):
"""error from openrave can be on the order of 1e-6 (especially if they are defined diagonal to some axis)
"""
right = Matrix(3,1,[self.convertRealToRational(x,self.precision-3) for x in M[0,0:3]])
right = right/right.norm()
up = Matrix(3,1,[self.convertRealToRational(x,self.precision-3) for x in M[1,0:3]])
up = up - right*right.dot(up)
up = up/up.norm()
d = right.cross(up)
for i in range(3):
# don't round the rotational part anymore since it could lead to unnormalized rotations!
M[0,i] = right[i]
M[1,i] = up[i]
M[2,i] = d[i]
M[i,3] = self.convertRealToRational(M[i,3])
M[3,i] = S.Zero
M[3,3] = S.One
return M
def GetMatrixFromNumpy(self,T):
return Matrix(4,4,[x for x in T.flat])
def RoundMatrix(self, T):
"""given a sympy matrix, will round the matrix and snap all its values to 15, 30, 45, 60, and 90 degrees.
"""
if axisAngleFromRotationMatrix is not None:
Teval = T.evalf()
axisangle = axisAngleFromRotationMatrix([[Teval[0,0], Teval[0,1], Teval[0,2]], [Teval[1,0], Teval[1,1], Teval[1,2]], [Teval[2,0], Teval[2,1], Teval[2,2]]])
angle = sqrt(axisangle[0]**2+axisangle[1]**2+axisangle[2]**2)
axisangle /= angle
if abs(angle) < 10**(-self.precision):
# rotation is identity
M = eye(4)
else:
log.debug('rotation angle: %f, axis=[%f,%f,%f]', (angle*180/pi).evalf(),axisangle[0],axisangle[1],axisangle[2])
accurateaxisangle = Matrix(3,1,[self.convertRealToRational(x,self.precision-3) for x in axisangle])
accurateaxisangle = accurateaxisangle/accurateaxisangle.norm()
# angle is not a multiple of 90, can get long fractions. so check if there's any way to simplify it
if abs(angle-3*pi/2) < 10**(-self.precision+2):
quat = [-S.One/sqrt(2), accurateaxisangle[0]/sqrt(2), accurateaxisangle[1]/sqrt(2), accurateaxisangle[2]/sqrt(2)]
elif abs(angle-pi) < 10**(-self.precision+2):
quat = [S.Zero, accurateaxisangle[0], accurateaxisangle[1], accurateaxisangle[2]]
elif abs(angle-2*pi/3) < 10**(-self.precision+2):
quat = [Rational(1,2), accurateaxisangle[0]*sqrt(3)/2, accurateaxisangle[1]*sqrt(3)/2, accurateaxisangle[2]*sqrt(3)/2]
elif abs(angle-pi/2) < 10**(-self.precision+2):
quat = [S.One/sqrt(2), accurateaxisangle[0]/sqrt(2), accurateaxisangle[1]/sqrt(2), accurateaxisangle[2]/sqrt(2)]
elif abs(angle-pi/3) < 10**(-self.precision+2):
quat = [sqrt(3)/2, accurateaxisangle[0]/2, accurateaxisangle[1]/2, accurateaxisangle[2]/2]
elif abs(angle-pi/4) < 10**(-self.precision+2):
# cos(pi/8) = sqrt(sqrt(2)+2)/2
# sin(pi/8) = sqrt(-sqrt(2)+2)/2
quat = [sqrt(sqrt(2)+2)/2, sqrt(-sqrt(2)+2)/2*accurateaxisangle[0], sqrt(-sqrt(2)+2)/2*accurateaxisangle[1], sqrt(-sqrt(2)+2)/2*accurateaxisangle[2]]
elif abs(angle-pi/6) < 10**(-self.precision+2):
# cos(pi/12) = sqrt(2)/4+sqrt(6)/4
# sin(pi/12) = -sqrt(2)/4+sqrt(6)/4
quat = [sqrt(2)/4+sqrt(6)/4, (-sqrt(2)/4+sqrt(6)/4)*accurateaxisangle[0], (-sqrt(2)/4+sqrt(6)/4)*accurateaxisangle[1], (-sqrt(2)/4+sqrt(6)/4)*accurateaxisangle[2]]
else:
# could not simplify further
#assert(0)
return self.normalizeRotation(T)
M = self.GetMatrixFromQuat(quat)
for i in range(3):
M[i,3] = self.convertRealToRational(T[i,3],self.precision)
return M
return self.normalizeRotation(Matrix(4,4,[x for x in T.flat]))
def numpyVectorToSympy(self,v,precision=None):
return Matrix(len(v),1,[self.convertRealToRational(x,precision) for x in v])
@staticmethod
def rodrigues(axis, angle):
return IKFastSolver.rodrigues2(axis,cos(angle),sin(angle))
@staticmethod
def GetMatrixFromQuat(quat):
"""quaternion is [cos(angle/2), v*sin(angle/2)]
return 4x4 matrix with rotation component set
"""
M = eye(4)
qq1 = 2*quat[1]*quat[1]
qq2 = 2*quat[2]*quat[2]
qq3 = 2*quat[3]*quat[3]
M[0,0] = 1 - qq2 - qq3
M[0,1] = 2*(quat[1]*quat[2] - quat[0]*quat[3])
M[0,2] = 2*(quat[1]*quat[3] + quat[0]*quat[2])
M[1,0] = 2*(quat[1]*quat[2] + quat[0]*quat[3])
M[1,1]= 1 - qq1 - qq3
M[1,2]= 2*(quat[2]*quat[3] - quat[0]*quat[1])
M[2,0] = 2*(quat[1]*quat[3] - quat[0]*quat[2])
M[2,1] = 2*(quat[2]*quat[3] + quat[0]*quat[1])
M[2,2] = 1 - qq1 - qq2
return M
@staticmethod
def rodrigues2(axis, cosangle, sinangle):
skewsymmetric = Matrix(3, 3, [S.Zero,-axis[2],axis[1],axis[2],S.Zero,-axis[0],-axis[1],axis[0],S.Zero])
return eye(3) + sinangle * skewsymmetric + (S.One-cosangle)*skewsymmetric*skewsymmetric
@staticmethod
def affineInverse(affinematrix):
T = eye(4)
T[0:3,0:3] = affinematrix[0:3,0:3].transpose()
T[0:3,3] = -affinematrix[0:3,0:3].transpose() * affinematrix[0:3,3]
return T
@staticmethod
def affineSimplify(T):
return Matrix(T.shape[0],T.shape[1],[trigsimp(x.expand()) for x in T])
@staticmethod
def multiplyMatrix(Ts):
Tfinal = eye(4)
for T in Ts:
Tfinal = Tfinal*T
return Tfinal
@staticmethod
def equal(eq0,eq1):
return expand(eq0-eq1) == S.Zero
def chop(self,expr,precision=None):
return expr
def IsHinge(self,axisname):
if axisname[0]!='j' or not axisname in self.axismap:
log.info('IsHinge returning false for variable %s'%axisname)
return False # dummy joint most likely for angles
return self.axismap[axisname].joint.IsRevolute(self.axismap[axisname].iaxis)
def IsPrismatic(self,axisname):
if axisname[0]!='j' or not axisname in self.axismap:
log.info('IsPrismatic returning false for variable %s'%axisname)
return False # dummy joint most likely for angles
return self.axismap[axisname].joint.IsPrismatic(self.axismap[axisname].iaxis)
def forwardKinematicsChain(self, chainlinks, chainjoints):
"""The first and last matrices returned are always non-symbolic
"""
with self.kinbody:
assert(len(chainjoints)+1==len(chainlinks))
Links = []
Tright = eye(4)
jointvars = []
jointinds = []
for i,joint in enumerate(chainjoints):
if len(joint.GetName()) == 0:
raise self.CannotSolveError('chain %s:%s contains a joint with no name!'%(chainlinks[0].GetName(),chainlinks[-1].GetName()))
if chainjoints[i].GetHierarchyParentLink() == chainlinks[i]:
TLeftjoint = self.GetMatrixFromNumpy(joint.GetInternalHierarchyLeftTransform())
TRightjoint = self.GetMatrixFromNumpy(joint.GetInternalHierarchyRightTransform())
axissign = S.One
else:
TLeftjoint = self.affineInverse(self.GetMatrixFromNumpy(joint.GetInternalHierarchyRightTransform()))
TRightjoint = self.affineInverse(self.GetMatrixFromNumpy(joint.GetInternalHierarchyLeftTransform()))
axissign = -S.One
if joint.IsStatic():
Tright = self.affineSimplify(Tright * TLeftjoint * TRightjoint)
else:
Tjoints = []
for iaxis in range(joint.GetDOF()):
if joint.GetDOFIndex() >= 0:
var = Symbol(self.axismapinv[joint.GetDOFIndex()])
cosvar = cos(var)
sinvar = sin(var)
jointvars.append(var)
elif joint.IsMimic(iaxis):
# get the mimic equation
var = joint.GetMimicEquation(iaxis)
# this needs to be reduced!
cosvar = cos(var)
sinvar = sin(var)
else:
raise ValueError('cannot solve for mechanism when a non-mimic passive joint %s is in chain'%str(joint))
Tj = eye(4)
jaxis = axissign*self.numpyVectorToSympy(joint.GetInternalHierarchyAxis(iaxis))
if joint.IsRevolute(iaxis):
Tj[0:3,0:3] = self.rodrigues2(jaxis,cosvar,sinvar)
elif joint.IsPrismatic(iaxis):
Tj[0:3,3] = jaxis*(var)
else:
raise ValueError('failed to process joint %s'%joint.GetName())
Tjoints.append(Tj)
if axisAngleFromRotationMatrix is not None:
axisangle = axisAngleFromRotationMatrix(numpy.array(numpy.array(Tright * TLeftjoint),numpy.float64))
angle = sqrt(axisangle[0]**2+axisangle[1]**2+axisangle[2]**2)
if angle > 0:
axisangle /= angle
log.debug('rotation angle of Links[%d]: %f, axis=[%f,%f,%f]', len(Links), (angle*180/pi).evalf(),axisangle[0],axisangle[1],axisangle[2])
Links.append(self.RoundMatrix(Tright * TLeftjoint))
for Tj in Tjoints:
jointinds.append(len(Links))
Links.append(Tj)
Tright = TRightjoint
Links.append(self.RoundMatrix(Tright))
# before returning the final links, try to push as much translation components
# outwards to both ends. Sometimes these components can get in the way of detecting
# intersecting axes
if len(jointinds) > 0:
iright = jointinds[-1]
Ttrans = eye(4)
Ttrans[0:3,3] = Links[iright-1][0:3,0:3].transpose() * Links[iright-1][0:3,3]
Trot_with_trans = Ttrans * Links[iright]
separated_trans = Trot_with_trans[0:3,0:3].transpose() * Trot_with_trans[0:3,3]
for j in range(0,3):
if separated_trans[j].has(*jointvars):
Ttrans[j,3] = Rational(0)
else:
Ttrans[j,3] = separated_trans[j]
Links[iright+1] = Ttrans * Links[iright+1]
Links[iright-1] = Links[iright-1] * self.affineInverse(Ttrans)
log.info("moved translation %s to right end",Ttrans[0:3,3].transpose())
if len(jointinds) > 1:
ileft = jointinds[0]
separated_trans = Links[ileft][0:3,0:3] * Links[ileft+1][0:3,3]
Ttrans = eye(4)
for j in range(0,3):
if not separated_trans[j].has(*jointvars):
Ttrans[j,3] = separated_trans[j]
Links[ileft-1] = Links[ileft-1] * Ttrans
Links[ileft+1] = self.affineInverse(Ttrans) * Links[ileft+1]
log.info("moved translation %s to left end",Ttrans[0:3,3].transpose())
if len(jointinds) > 3: # last 3 axes always have to be intersecting, move the translation of the first axis to the left
ileft = jointinds[-3]
separated_trans = Links[ileft][0:3,0:3] * Links[ileft+1][0:3,3]
Ttrans = eye(4)
for j in range(0,3):
if not separated_trans[j].has(*jointvars):
Ttrans[j,3] = separated_trans[j]
Links[ileft-1] = Links[ileft-1] * Ttrans
Links[ileft+1] = self.affineInverse(Ttrans) * Links[ileft+1]
log.info("moved translation on intersecting axis %s to left",Ttrans[0:3,3].transpose())
return Links, jointvars
def countVariables(self,expr,var):
"""Counts number of terms variable appears in"""
if not expr.is_Add:
if expr.has(var):
return 1
return 0
num = 0
for term in expr.args:
if term.has(var):
num += 1
return num
@staticmethod
def isValidPowers(expr):
if expr.is_Pow:
if not expr.exp.is_number or expr.exp < 0:
return False
return IKFastSolver.isValidPowers(expr.base)
elif expr.is_Add or expr.is_Mul or expr.is_Function:
return all([IKFastSolver.isValidPowers(arg) for arg in expr.args])
else:
return True
@staticmethod
def rotateDirection(sourcedir,targetdir):
sourcedir /= sqrt(sourcedir.dot(sourcedir))
targetdir /= sqrt(targetdir.dot(targetdir))
rottodirection = sourcedir.cross(targetdir)
fsin = sqrt(rottodirection.dot(rottodirection))
fcos = sourcedir.dot(targetdir)
M = eye(4)
if fsin > 1e-6:
M[0:3,0:3] = IKFastSolver.rodrigues(rottodirection*(1/fsin),atan2(fsin,fcos))
elif fcos < 0: # hand is flipped 180, rotate around x axis
rottodirection = Matrix(3,1,[S.One,S.Zero,S.Zero])
rottodirection -= sourcedir * sourcedir.dot(rottodirection)
M[0:3,0:3] = IKFastSolver.rodrigues(rottodirection.normalized(), atan2(fsin, fcos))
return M
@staticmethod
def has(eqs,*sym):
return any([eq.has(*sym) for eq in eqs]) if len(sym) > 0 else False
def trigsimp(self, eq,trigvars):
"""recurses the sin**2 = 1-cos**2 equation for every trig var
"""
trigsubs = [(sin(v)**2,1-cos(v)**2) for v in trigvars if self.IsHinge(v.name)]
eq=expand(eq)
curcount = eq.count_ops()
while True:
eq=eq.subs(trigsubs).expand()
newcount = eq.count_ops()
if IKFastSolver.equal(curcount,newcount):
break
curcount=newcount
return eq
def SimplifyAtan2(self, eq, incos=False, insin=False, epsilon=None):
"""simplifies equations like sin(atan2(y,x)) to y/sqrt(x**2+y**2)
Sometimes can get equations like
sin(-atan2(-r21, -r20))
cos(-atan2(-r21, -r20) + 3.14159265358979)
which means the operations internally have to be carried over
"""
processed = False # if incos or insin set to True, then this flag specifies whether the function was already taking into account or not.
if eq.is_Add:
if incos:
lefteq = eq.args[1]
if len(eq.args) > 2:
for ieq in range(2,len(eq.args)):
lefteq += eq.args[ieq]
neweq = self.SimplifyAtan2(eq.args[0], incos=True) * self.SimplifyAtan2(lefteq, incos=True) - self.SimplifyAtan2(eq.args[0], insin=True) * self.SimplifyAtan2(lefteq, insin=True)
processed = True
elif insin:
lefteq = eq.args[1]
if len(eq.args) > 2:
for ieq in range(2,len(eq.args)):
lefteq += eq.args[ieq]
neweq = self.SimplifyAtan2(eq.args[0], incos=True) * self.SimplifyAtan2(lefteq, insin=True) + self.SimplifyAtan2(eq.args[0], insin=True) * self.SimplifyAtan2(lefteq, incos=True)
processed = True
else:
neweq = S.Zero
for subeq in eq.args:
neweq += self.SimplifyAtan2(subeq)
# call simplify in order to take in common terms
if self.codeComplexity(neweq) > 80:
neweq2 = neweq
else:
#log.info('complexity: %d', self.codeComplexity(neweq))
neweq2 = simplify(neweq)
if neweq2 != neweq:
neweq = self.SimplifyAtan2(neweq2)
else:
try:
#print 'simplifying',neweq
neweq = self.SimplifyTransform(neweq)
except PolynomialError:
# ok if neweq is too complicated
pass
elif eq.is_Mul:
if incos and len(eq.args) == 2:
num = None
if eq.args[0].is_integer:
num = eq.args[0]
eq2 = eq.args[1]
elif eq.args[1].is_integer:
num = eq.args[1]
eq2 = eq.args[0]
if num is not None:
if num == S.One:
neweq = self.SimplifyAtan2(eq2,incos=True)
processed = True
if num == -S.One:
neweq = self.SimplifyAtan2(eq2,incos=True)
processed = True
elif insin and len(eq.args) == 2:
num = None
if eq.args[0].is_integer:
num = eq.args[0]
eq2 = eq.args[1]
elif eq.args[1].is_integer:
num = eq.args[1]
eq2 = eq.args[0]
if num is not None:
if num == S.One:
neweq = self.SimplifyAtan2(eq2,insin=True)
processed = True
if num == -S.One:
neweq = -self.SimplifyAtan2(eq2,insin=True)
processed = True
if not processed:
neweq = self.SimplifyAtan2(eq.args[0])
for subeq in eq.args[1:]:
neweq *= self.SimplifyAtan2(subeq)
elif eq.is_Function:
if incos and eq.func == atan2:
yeq = self.SimplifyTransform(self.SimplifyAtan2(eq.args[0]))
xeq = self.SimplifyTransform(self.SimplifyAtan2(eq.args[1]))
neweq = xeq / sqrt(self.SimplifyTransform(yeq**2+xeq**2))
processed = True
elif insin and eq.func == atan2:
yeq = self.SimplifyTransform(self.SimplifyAtan2(eq.args[0]))
xeq = self.SimplifyTransform(self.SimplifyAtan2(eq.args[1]))
neweq = yeq / sqrt(self.SimplifyTransform(yeq**2+xeq**2))
processed = True
elif eq.func == cos:
neweq = self.SimplifyAtan2(eq.args[0], incos=True)
elif eq.func == sin:
neweq = self.SimplifyAtan2(eq.args[0], insin=True)
else:
newargs = [self.SimplifyAtan2(subeq) for subeq in eq.args]
neweq = eq.func(*newargs)
elif eq.is_Pow:
neweq = None
if eq.exp.is_number and eq.exp-0.5 == S.Zero:
if eq.base.is_Pow and eq.base.exp.is_number and eq.base.exp-2 == S.Zero:
# should be abs(eq.base.base), but that could make other simplifications more difficult?
neweq = abs(self.SimplifyAtan2(eq.base.base))
if neweq is None:
neweq = self.SimplifyAtan2(eq.base)**self.SimplifyAtan2(eq.exp)
elif eq.is_number:
if epsilon is None:
epsilon = 1e-15
if insin:
neweq = sin(eq)
elif incos:
neweq = cos(eq)
else:
neweq = eq
processed = True
if abs(neweq.evalf()) <= epsilon:
neweq = S.Zero
else:
neweq=eq
if not processed and insin:
return sin(neweq)
elif not processed and incos:
return cos(neweq)
return neweq
def codeComplexity(self,expr):
complexity = 1
if expr.is_Add:
for term in expr.args:
complexity += self.codeComplexity(term)
elif expr.is_Mul:
for term in expr.args:
complexity += self.codeComplexity(term)
elif expr.is_Pow:
complexity += self.codeComplexity(expr.base)+self.codeComplexity(expr.exp)
elif expr.is_Function:
complexity += 1
for term in expr.args:
complexity += self.codeComplexity(term)
return complexity
def ComputePolyComplexity(self, peq):
"""peq is a polynomial
"""
complexity = 0
for monoms,coeff in peq.terms():
coeffcomplexity = self.codeComplexity(coeff)
for m in monoms:
if m > 1:
complexity += 2
elif m > 0:
complexity += 1
complexity += coeffcomplexity + 1
return complexity
def sortComplexity(self,exprs):
exprs.sort(lambda x, y: self.codeComplexity(x)-self.codeComplexity(y))
return exprs
def checkForDivideByZero(self,eq):
"""returns the equations to check for zero
"""
checkforzeros = []
try:
if eq.is_Function:
if eq.func == atan2:
# atan2 is only a problem when both numerator and denominator are 0!
#checkforzeros.append((eq.args[0]**2+eq.args[1]**2).expand())
# have to re-substitute given the global symbols
# args[0] and args[1] are very complicated, then there's no reason to do this check
substitutedargs = []
for argeq in eq.args:
argeq2 = self._SubstituteGlobalSymbols(argeq)
if self.codeComplexity(argeq2) < 200:
substitutedargs.append(self.SimplifyAtan2(argeq2))
else:
substitutedargs.append(argeq2)
# has to be greater than 20 since some const coefficients can be simplified
if self.codeComplexity(substitutedargs[0]) < 30 and self.codeComplexity(substitutedargs[1]) < 30:
if not substitutedargs[0].is_number or substitutedargs[0] == S.Zero:
if not substitutedargs[1].is_number or substitutedargs[1] == S.Zero:
sumeq = substitutedargs[0]**2+substitutedargs[1]**2
if self.codeComplexity(sumeq) < 400:
testeq = self.SimplifyAtan2((substitutedargs[0]**2+substitutedargs[1]**2).expand())
else:
testeq = sumeq
testeq2 = abs(substitutedargs[0])+abs(substitutedargs[1])
if self.codeComplexity(testeq) < self.codeComplexity(testeq2):
testeqmin = testeq
else:
testeqmin = testeq2
if testeqmin.is_Mul:
checkforzeros += testeqmin.args
else:
checkforzeros.append(testeqmin)
if checkforzeros[-1].evalf() == S.Zero:
raise self.CannotSolveError('equation evaluates to 0, so can never be ok')
log.info('adding atan2(%r, %r) = %r all zeros check', substitutedargs[0], substitutedargs[1], checkforzeros[-1])
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
elif eq.is_Add:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
elif eq.is_Mul:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
elif eq.is_Pow:
for arg in eq.args:
checkforzeros += self.checkForDivideByZero(arg)
if eq.exp.is_number and eq.exp < 0:
checkforzeros.append(eq.base)
except AssertionError,e:
log.warn('%s',e)
if len(checkforzeros) > 0:
newcheckforzeros = []
for eqtemp in checkforzeros:
# check for abs(x**y), in that case choose x
if eqtemp.is_Function and eqtemp.func == Abs:
eqtemp = eqtemp.args[0]
while eqtemp.is_Pow:
eqtemp = eqtemp.base
#self.codeComplexity(eqtemp)
if self.codeComplexity(eqtemp) < 1000:
checkeq = self.removecommonexprs(eqtemp,onlygcd=False,onlynumbers=True)
if self.CheckExpressionUnique(newcheckforzeros,checkeq):
newcheckforzeros.append(checkeq)
else:
# not even worth checking since the equation is so big...
newcheckforzeros.append(eqtemp)
return newcheckforzeros
return checkforzeros
def ComputeSolutionComplexity(self,sol,solvedvars,unsolvedvars):
# for all solutions, check if there is a divide by zero
sol.checkforzeros = sol.getPresetCheckForZeros()
sol.score = 20000*sol.numsolutions()
try:
# multiby by 400 in order to prioritize equations with less solutions
if hasattr(sol,'jointeval') and sol.jointeval is not None:
for s in sol.jointeval:
sol.score += self.codeComplexity(s)
sol.checkforzeros += self.checkForDivideByZero(s.subs(sol.dictequations))
subexprs = sol.jointeval
elif hasattr(sol,'jointevalsin') and sol.jointevalsin is not None:
for s in sol.jointevalsin:
sol.score += self.codeComplexity(s)
sol.checkforzeros += self.checkForDivideByZero(s.subs(sol.dictequations))
subexprs = sol.jointevalsin
elif hasattr(sol,'jointevalcos') and sol.jointevalcos is not None:
for s in sol.jointevalcos:
sol.score += self.codeComplexity(s)
sol.checkforzeros += self.checkForDivideByZero(s.subs(sol.dictequations))
subexprs = sol.jointevalcos
else:
return sol.score
# have to also check solution dictionary
for s,v in sol.dictequations:
sol.score += self.codeComplexity(v)
sol.checkforzeros += self.checkForDivideByZero(v.subs(sol.dictequations))
def checkpow(expr,sexprs):
score = 0
if expr.is_Pow:
sexprs.append(expr.base)
if expr.base.is_finite is not None and not expr.base.is_finite:
return oo # infinity
if expr.exp.is_number and expr.exp < 0:
# check if exprbase contains any variables that have already been solved
containsjointvar = expr.base.has(*solvedvars)
cancheckexpr = not expr.base.has(*unsolvedvars)
score += 10000
if not cancheckexpr:
score += 100000
elif not self.isValidSolution(expr):
return oo # infinity
return score
sexprs = subexprs[:]
while len(sexprs) > 0:
sexpr = sexprs.pop(0)
if sexpr.is_Add:
for arg in sexpr.args:
if arg.is_Mul:
for arg2 in arg.args:
sol.score += checkpow(arg2,sexprs)
else:
sol.score += checkpow(arg,sexprs)
elif sexpr.is_Mul:
for arg in sexpr.args:
sol.score += checkpow(arg,sexprs)
elif sexpr.is_Function:
sexprs += sexpr.args
elif not self.isValidSolution(sexpr):
log.warn('not valid: %s',sexpr)
sol.score = oo # infinity
else:
sol.score += checkpow(sexpr,sexprs)
except AssertionError, e:
log.warn('%s',e)
sol.score=1e10
newcheckforzeros = []
for eqtemp in sol.checkforzeros:
if self.codeComplexity(eqtemp) < 1000:
checkeq = self.removecommonexprs(eqtemp,onlygcd=False,onlynumbers=True)
if self.CheckExpressionUnique(newcheckforzeros,checkeq):
newcheckforzeros.append(checkeq)
else:
newcheckforzeros.append(eqtemp)
sol.checkforzeros = newcheckforzeros
return sol.score
def checkSolvability(self,AllEquations,checkvars,othervars):
pass
def checkSolvabilityReal(self,AllEquations,checkvars,othervars):
"""returns true if there are enough equations to solve for checkvars
"""
subs = []
checksymbols = []
allsymbols = []
for var in checkvars:
subs += self.Variable(var).subs
checksymbols += self.Variable(var).vars
allsymbols = checksymbols[:]
for var in othervars:
subs += self.Variable(var).subs
allsymbols += self.Variable(var).vars
found = False
for testconsistentvalue in self.testconsistentvalues:
psubvalues = [(s,v) for s,v in testconsistentvalue if not s.has(*checksymbols)]
eqs = [eq.subs(self.globalsymbols).subs(subs).subs(psubvalues) for eq in AllEquations]
usedsymbols = [s for s in checksymbols if self.has(eqs,s)]
eqs = [Poly(eq,*usedsymbols) for eq in eqs if eq != S.Zero]
# check if any equations have monos of degree more than 1, if yes, then quit with success since 0.6.7 sympy solver will freeze
numhigherpowers = 0
for eq in eqs:
for monom in eq.monoms():
if any([m > 1 for m in monom]):
numhigherpowers += 1
if numhigherpowers > 0:
log.info('checkSolvability has %d higher powers, returning solvable if > 6'%numhigherpowers)
if numhigherpowers > 6:
found = True
break
for var in checkvars:
varsym = self.Variable(var)
if self.IsHinge(var.name):
if varsym.cvar in usedsymbols and varsym.svar in usedsymbols:
eqs.append(Poly(varsym.cvar**2+varsym.svar**2-1,*usedsymbols))
# have to make sure there are representative symbols of all the checkvars, otherwise degenerate solution
setusedsymbols = set(usedsymbols)
if any([len(setusedsymbols.intersection(self.Variable(var).vars)) == 0 for var in checkvars]):
continue
try:
sol=solve_poly_system(eqs)
if sol is not None and len(sol) > 0 and len(sol[0]) == len(usedsymbols):
found = True
break
except:
pass
if not found:
raise self.IKFeasibilityError(AllEquations,checkvars)
def writeIkSolver(self,chaintree,lang=None):
"""write the ast into a specific langauge, prioritize c++
"""
if lang is None:
if CodeGenerators.has_key('cpp'):
lang = 'cpp'
else:
lang = CodeGenerators.keys()[0]
log.info('generating %s code...'%lang)
return CodeGenerators[lang](kinematicshash=self.kinematicshash,version=__version__).generate(chaintree)
def generateIkSolver(self, baselink, eelink, freeindices=None,solvefn=None):
if solvefn is None:
solvefn = IKFastSolver.solveFullIK_6D
chainlinks = self.kinbody.GetChain(baselink,eelink,returnjoints=False)
chainjoints = self.kinbody.GetChain(baselink,eelink,returnjoints=True)
LinksRaw, jointvars = self.forwardKinematicsChain(chainlinks,chainjoints)
for T in LinksRaw:
log.info('[' + ','.join(['[%s, %s, %s, %s]'%(T[i,0],T[i,1],T[i,2],T[i,3]) for i in range(3)]) + ']')
self.degeneratecases = None
if freeindices is None:
# need to iterate through all combinations of free joints
assert(0)
isolvejointvars = []
solvejointvars = []
self.ifreejointvars = []
self.freevarsubs = []
self.freevarsubsinv = []
self.freevars = []
self.freejointvars = []
self.invsubs = []
for i,v in enumerate(jointvars):
var = self.Variable(v)
axis = self.axismap[v.name]
dofindex = axis.joint.GetDOFIndex()+axis.iaxis
if dofindex in freeindices:
# convert all free variables to constants
self.ifreejointvars.append(i)
self.freevarsubs += [(cos(var.var), var.cvar), (sin(var.var), var.svar)]
self.freevarsubsinv += [(var.cvar,cos(var.var)), (var.svar,sin(var.var))]
self.freevars += [var.cvar,var.svar]
self.freejointvars.append(var.var)
else:
solvejointvars.append(v)
isolvejointvars.append(i)
self.invsubs += [(var.cvar,cos(v)),(var.svar,sin(v))]
self._solvejointvars = solvejointvars
self._jointvars = jointvars
# set up the destination symbols
self.Tee = eye(4)
for i in range(0,3):
for j in range(0,3):
self.Tee[i,j] = Symbol("r%d%d"%(i,j))
self.Tee[0,3] = Symbol("px")
self.Tee[1,3] = Symbol("py")
self.Tee[2,3] = Symbol("pz")
r00,r01,r02,px,r10,r11,r12,py,r20,r21,r22,pz = self.Tee[0:12]
self.pp = Symbol('pp')
self.ppsubs = [(self.pp,px**2+py**2+pz**2)]
self.npxyz = [Symbol('npx'),Symbol('npy'),Symbol('npz')]
self.npxyzsubs = [(self.npxyz[i],px*self.Tee[0,i]+py*self.Tee[1,i]+pz*self.Tee[2,i]) for i in range(3)]
# cross products between columns of self.Tee
self.rxp = []
self.rxpsubs = []
for i in range(3):
self.rxp.append([Symbol('rxp%d_%d'%(i,j)) for j in range(3)])
c = self.Tee[0:3,i].cross(self.Tee[0:3,3])
self.rxpsubs += [(self.rxp[-1][j],c[j]) for j in range(3)]
self.pvars = self.Tee[0:12]+self.npxyz+[self.pp]+self.rxp[0]+self.rxp[1]+self.rxp[2]
self._rotsymbols = list(self.Tee[0:3,0:3])
# add positions
ip = 9
inp = 12
ipp = 15
irxp = 16
self._rotpossymbols = self._rotsymbols + list(self.Tee[0:3,3])+self.npxyz+[self.pp]+self.rxp[0]+self.rxp[1]+self.rxp[2]
# groups of rotation variables that are unit vectors
self._rotnormgroups = []
for i in range(3):
self._rotnormgroups.append([self.Tee[i,0],self.Tee[i,1],self.Tee[i,2],S.One])
self._rotnormgroups.append([self.Tee[0,i],self.Tee[1,i],self.Tee[2,i],S.One])
self._rotposnormgroups = list(self._rotnormgroups)
self._rotposnormgroups.append([self.Tee[0,3],self.Tee[1,3],self.Tee[2,3],self.pp])
# dot product of rotation rows and columns is always 0
self._rotdotgroups = []
for i,j in combinations(range(3),2):
self._rotdotgroups.append([[i,j],[i+3,j+3],[i+6,j+6],S.Zero])
self._rotdotgroups.append([[3*i,3*j],[3*i+1,3*j+1],[3*i+2,3*j+2],S.Zero])
self._rotposdotgroups = list(self._rotdotgroups)
for i in range(3):
self._rotposdotgroups.append([[i,ip],[i+3,ip+1],[i+6,ip+2],self.npxyz[i]])
self._rotposdotgroups.append([[3*i+0,inp],[3*i+1,inp+1],[3*i+2,inp+2],self.Tee[i,3]])
self._rotcrossgroups = []
# cross products of rotation rows and columns always yield the left over vector
for i,j,k in [(0,1,2),(0,2,1),(1,2,0)]:
# column
self._rotcrossgroups.append([[i+3,j+6],[i+6,j+3],k])
self._rotcrossgroups.append([[i+6,j],[i,j+6],k+3])
self._rotcrossgroups.append([[i,j+3],[i+3,j],k+6])
# row
self._rotcrossgroups.append([[3*i+1,3*j+2],[3*i+2,3*j+1],3*k])
self._rotcrossgroups.append([[3*i+2,3*j],[3*i,3*j+2],3*k+1])
self._rotcrossgroups.append([[3*i,3*j+1],[3*i+1,3*j],3*k+2])
# swap if sign is negative: if j!=1+i
if j!=1+i:
for crossgroup in self._rotcrossgroups[-6:]:
crossgroup[0],crossgroup[1] = crossgroup[1],crossgroup[0]
# add positions
self._rotposcrossgroups = list(self._rotcrossgroups)
for i in range(3):
# column i cross position
self._rotposcrossgroups.append([[i+3,ip+2],[i+6,ip+1],irxp+3*i+0])
self._rotposcrossgroups.append([[i+6,ip+0],[i,ip+2],irxp+3*i+1])
self._rotposcrossgroups.append([[i,ip+1],[i+3,ip+0],irxp+3*i+2])
self.Teeinv = self.affineInverse(self.Tee)
LinksLeft = []
if self.useleftmultiply:
while not self.has(LinksRaw[0],*solvejointvars):
LinksLeft.append(LinksRaw.pop(0))
LinksLeftInv = [self.affineInverse(T) for T in LinksLeft]
self.testconsistentvalues = None
self.gsymbolgen = cse_main.numbered_symbols('gconst')
self.globalsymbols = []
self._scopecounter = 0
# before passing to the solver, set big numbers to constant variables, this will greatly reduce computation times
# numbersubs = []
# LinksRaw2 = []
# for Torig in LinksRaw:
# T = Matrix(Torig)
# #print axisAngleFromRotationMatrix(numpy.array(numpy.array(T[0:3,0:3]),numpy.float64))
# for i in range(12):
# ti = T[i]
# if ti.is_number and len(str(ti)) > 30:
# matchnumber = self.MatchSimilarFraction(ti,numbersubs)
# if matchnumber is None:
# sym = self.gsymbolgen.next()
# log.info('adding global symbol %s=%s'%(sym,ti))
# numbersubs.append((sym,ti))
# T[i] = sym
# else:
# T[i] = matchnumber
# LinksRaw2.append(T)
# if len(numbersubs) > 10:
# log.info('substituting %d global symbols',len(numbersubs))
# LinksRaw = LinksRaw2
# self.globalsymbols += numbersubs
chaintree = solvefn(self, LinksRaw, jointvars, isolvejointvars)
if self.useleftmultiply:
chaintree.leftmultiply(Tleft=self.multiplyMatrix(LinksLeft), Tleftinv=self.multiplyMatrix(LinksLeftInv[::-1]))
chaintree.dictequations += self.globalsymbols
return chaintree
def MatchSimilarFraction(self,num,numbersubs,matchlimit = 40):
"""returns None if no appropriate match found
"""
for c,v in numbersubs:
if self.equal(v,num):
return c
# nothing matched, so check gcd
largestgcd = S.One
retnum = None
for c,v in numbersubs:
curgcd = gcd(v,num)
if len(str(curgcd)) > len(str(largestgcd)):
newfraction = (num/v)
if len(str(newfraction)) <= matchlimit:
largestgcd = curgcd
retnum = c * newfraction
return retnum
def ComputeConsistentValues(self,jointvars,T,numsolutions=1,subs=None):
"""computes a set of substitutions that satisfy the IK equations
"""
possibleangles = [S.Zero, pi.evalf()/2, asin(3.0/5).evalf(), asin(4.0/5).evalf(), asin(5.0/13).evalf(), asin(12.0/13).evalf()]
possibleanglescos = [S.One, S.Zero, Rational(4,5), Rational(3,5), Rational(12,13), Rational(5,13)]
possibleanglessin = [S.Zero, S.One, Rational(3,5), Rational(4,5), Rational(5,13), Rational(12,13)]
testconsistentvalues = []
varsubs = []
for jointvar in jointvars:
varsubs += self.Variable(jointvar).subs
for isol in range(numsolutions):
inds = [0]*len(jointvars)
if isol < numsolutions-1:
for j in range(len(jointvars)):
inds[j] = (isol+j)%len(possibleangles)
valsubs = []
for i,ind in enumerate(inds):
v,s,c = possibleangles[ind],possibleanglessin[ind],possibleanglescos[ind]
var = self.Variable(jointvars[i])
valsubs += [(var.var,v),(var.cvar,c),(var.svar,s),(var.tvar,s/c),(var.htvar,s/(1+c))]
psubs = []
for i in range(12):
psubs.append((self.pvars[i],T[i].subs(varsubs).subs(self.globalsymbols+valsubs)))
for s,v in self.ppsubs+self.npxyzsubs+self.rxpsubs:
psubs.append((s,v.subs(psubs)))
allsubs = valsubs+psubs
if subs is not None:
allsubs += [(dvar,var.subs(varsubs).subs(valsubs)) for dvar,var in subs]
testconsistentvalues.append(allsubs)
return testconsistentvalues
def solveFullIK_Direction3D(self,LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One])):
"""basedir needs to be filled with a 3elemtn vector of the initial direction to control"""
self._iktype = 'direction3d'
basedir = Matrix(3,1,[Float(x,30) for x in rawbasedir])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i])
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
self.Tfinal = zeros((4,4))
self.Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution(jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 2:
raise self.CannotSolveError('need 2 joints')
log.info('ikfast direction3d: %s',solvejointvars)
Daccum = self.Tee[0,0:3].transpose()
numvarsdone = 2
Ds = []
Dsee = []
for i in range(len(Links)-1):
T = self.multiplyMatrix(Links[i:])
D = T[0:3,0:3]*basedir
hasvars = [self.has(D,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Ds.append(D)
Dsee.append(Daccum)
numvarsdone -= 1
Tinv = self.affineInverse(Links[i])
Daccum = Tinv[0:3,0:3]*Daccum
AllEquations = self.buildEquationsFromTwoSides(Ds,Dsee,jointvars,uselength=False)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
tree = self.SolveAllEquations(AllEquations,curvars=solvejointvars,othersolvedvars = self.freejointvars[:],solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
return AST.SolverIKChainDirection3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Dee=self.Tee[0,0:3].transpose().subs(self.freevarsubs), jointtree=tree,Dfk=self.Tfinal[0,0:3].transpose())
def solveFullIK_Lookat3D(self,LinksRaw, jointvars, isolvejointvars,rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
"""basedir,basepos needs to be filled with a direction and position of the ray to control the lookat
"""
self._iktype = 'lookat3d'
basedir = Matrix(3,1,[Float(x,30) for x in rawbasedir])
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i])
basepos = basepos-basedir*basedir.dot(basepos)
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
self.Tfinal = zeros((4,4))
self.Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
self.Tfinal[0:3,3] = T[0:3,0:3]*basepos+T[0:3,3]
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 2:
raise self.CannotSolveError('need 2 joints')
log.info('ikfast lookat3d: %s',solvejointvars)
Paccum = self.Tee[0:3,3]
numvarsdone = 2
Positions = []
Positionsee = []
for i in range(len(Links)-1):
T = self.multiplyMatrix(Links[i:])
P = T[0:3,0:3]*basepos+T[0:3,3]
D = T[0:3,0:3]*basedir
hasvars = [self.has(P,v) or self.has(D,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(P.cross(D))
Positionsee.append(Paccum.cross(D))
numvarsdone -= 1
Tinv = self.affineInverse(Links[i])
Paccum = Tinv[0:3,0:3]*Paccum+Tinv[0:3,3]
frontcond = (Links[-1][0:3,0:3]*basedir).dot(Paccum-(Links[-1][0:3,0:3]*basepos+Links[-1][0:3,3]))
for v in jointvars:
frontcond = frontcond.subs(self.Variable(v).subs)
endbranchtree = [AST.SolverStoreSolution (jointvars,checkgreaterzero=[frontcond],isHinge=[self.IsHinge(var.name) for var in jointvars])]
AllEquations = self.buildEquationsFromTwoSides(Positions,Positionsee,jointvars,uselength=True)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
tree = self.SolveAllEquations(AllEquations,curvars=solvejointvars,othersolvedvars = self.freejointvars[:],solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
chaintree = AST.SolverIKChainLookat3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3].subs(self.freevarsubs), jointtree=tree,Dfk=self.Tfinal[0,0:3].transpose(),Pfk=self.Tfinal[0:3,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_Rotation3D(self,LinksRaw, jointvars, isolvejointvars, Rbaseraw=eye(3)):
self._iktype = 'rotation3d'
Rbase = eye(4)
for i in range(3):
for j in range(3):
Rbase[i,j] = self.convertRealToRational(Rbaseraw[i,j])
Tfirstright = LinksRaw[-1]*Rbase
Links = LinksRaw[:-1]
LinksInv = [self.affineInverse(link) for link in Links]
self.Tfinal = self.multiplyMatrix(Links)
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 3:
raise self.CannotSolveError('need 3 joints')
log.info('ikfast rotation3d: %s',solvejointvars)
AllEquations = self.buildEquationsFromRotation(Links,self.Tee[0:3,0:3],solvejointvars,self.freejointvars)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
tree = self.SolveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
return AST.SolverIKChainRotation3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], (self.Tee[0:3,0:3] * self.affineInverse(Tfirstright)[0:3,0:3]).subs(self.freevarsubs), tree, Rfk = self.Tfinal[0:3,0:3] * Tfirstright[0:3,0:3])
def solveFullIK_TranslationLocalGlobal6D(self,LinksRaw, jointvars, isolvejointvars, Tgripperraw=eye(4)):
self._iktype = 'translation3d'
Tgripper = eye(4)
for i in range(4):
for j in range(4):
Tgripper[i,j] = self.convertRealToRational(Tgripperraw[i,j])
localpos = Matrix(3,1,[self.Tee[0,0],self.Tee[1,1],self.Tee[2,2]])
chain = self._solveFullIK_Translation3D(LinksRaw,jointvars,isolvejointvars,Tgripper[0:3,3]+Tgripper[0:3,0:3]*localpos,False)
chain.uselocaltrans = True
return chain
def solveFullIK_Translation3D(self,LinksRaw, jointvars, isolvejointvars, rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
self._iktype = 'translation3d'
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
return self._solveFullIK_Translation3D(LinksRaw,jointvars,isolvejointvars,basepos)
def _solveFullIK_Translation3D(self,LinksRaw, jointvars, isolvejointvars, basepos,check=True):
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
self.Tfinal = self.multiplyMatrix(Links)
self.Tfinal[0:3,3] = self.Tfinal[0:3,0:3]*basepos+self.Tfinal[0:3,3]
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 3:
raise self.CannotSolveError('need 3 joints')
log.info('ikfast translation3d: %s',solvejointvars)
Tbaseposinv = eye(4)
Tbaseposinv[0:3,3] = -basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [self.affineInverse(Tbaseposinv)]+Links[::-1]+[self.Teeinv]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,solvejointvars,self.freejointvars,uselength=True)
if check:
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
transtree = self.SolveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
transtree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,transtree)
chaintree = AST.SolverIKChainTranslation3D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3], jointtree=transtree, Pfk = self.Tfinal[0:3,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_TranslationXY2D(self,LinksRaw, jointvars, isolvejointvars, rawbasepos=Matrix(2,1,[S.Zero,S.Zero])):
self._iktype = 'translationxy2d'
self.ppsubs = [] # disable since pz is not valid
self.pp = None
basepos = Matrix(2,1,[self.convertRealToRational(x) for x in rawbasepos])
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
self.Tfinal = self.multiplyMatrix(Links)
self.Tfinal[0:2,3] = self.Tfinal[0:2,0:2]*basepos+self.Tfinal[0:2,3]
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 2:
raise self.CannotSolveError('need 2 joints')
log.info('ikfast translationxy2d: %s',solvejointvars)
Tbaseposinv = eye(4)
Tbaseposinv[2,2] = S.Zero
Tbaseposinv[0:2,3] = -basepos
Tbasepos = eye(4)
Tbasepos[2,2] = S.Zero
Tbasepos[0:2,3] = basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [Tbasepos]+Links[::-1]+[self.Teeinv]
Taccum = eye(4)
numvarsdone = 1
Positions = []
Positionsee = []
for i in range(len(T1links)-1):
Taccum = T1linksinv[i]*Taccum
hasvars = [self.has(Taccum,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(Taccum[0:2,3])
Positionsee.append(self.multiplyMatrix(T1links[(i+1):])[0:2,3])
numvarsdone += 1
if numvarsdone > 2:
# more than 2 variables is almost always useless
break
if len(Positions) == 0:
Positions.append(zeros((2,1)))
Positionsee.append(self.multiplyMatrix(T1links)[0:2,3])
AllEquations = self.buildEquationsFromTwoSides(Positions,Positionsee,solvejointvars+self.freejointvars,uselength=True)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
transtree = self.SolveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
transtree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,transtree)
chaintree = AST.SolverIKChainTranslationXY2D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:2,3], jointtree=transtree, Pfk = self.Tfinal[0:2,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_TranslationXYOrientation3D(self,LinksRaw, jointvars, isolvejointvars, rawbasepos=Matrix(2,1,[S.Zero,S.Zero]), rawangle=S.Zero):
self._iktype = 'translationxyorientation3d'
raise self.CannotSolveError('TranslationXYOrientation3D not implemented yet')
def solveFullIK_Ray4D(self,LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
"""basedir,basepos needs to be filled with a direction and position of the ray to control"""
self._iktype = 'ray4d'
basedir = Matrix(3,1,[Float(x,30) for x in rawbasedir])
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i])
basepos = basepos-basedir*basedir.dot(basepos)
Links = LinksRaw[:]
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
self.Tfinal = zeros((4,4))
self.Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
self.Tfinal[0:3,3] = T[0:3,0:3]*basepos+T[0:3,3]
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 4:
raise self.CannotSolveError('need 4 joints')
log.info('ikfast ray4d: %s',solvejointvars)
Pee = self.Tee[0:3,3]
Dee = self.Tee[0,0:3].transpose()
numvarsdone = 2
Positions = []
Positionsee = []
for i in range(len(Links)-1):
T = self.multiplyMatrix(Links[i:])
P = T[0:3,0:3]*basepos+T[0:3,3]
D = T[0:3,0:3]*basedir
hasvars = [self.has(P,v) or self.has(D,v) for v in solvejointvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(P.cross(D))
Positionsee.append(Pee.cross(Dee))
Positions.append(D)
Positionsee.append(Dee)
break
Tinv = self.affineInverse(Links[i])
Pee = Tinv[0:3,0:3]*Pee+Tinv[0:3,3]
Dee = Tinv[0:3,0:3]*Dee
AllEquations = self.buildEquationsFromTwoSides(Positions,Positionsee,jointvars,uselength=True)
self.checkSolvability(AllEquations,solvejointvars,self.freejointvars)
#try:
tree = self.SolveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars = self.freejointvars[:],solsubs = self.freevarsubs[:],endbranchtree=endbranchtree)
#except self.CannotSolveError:
# build the raghavan/roth equations and solve with higher power methods
# pass
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
chaintree = AST.SolverIKChainRay([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3].subs(self.freevarsubs), Dee=self.Tee[0,0:3].transpose().subs(self.freevarsubs),jointtree=tree,Dfk=self.Tfinal[0,0:3].transpose(),Pfk=self.Tfinal[0:3,3])
chaintree.dictequations += self.ppsubs
return chaintree
def solveFullIK_TranslationDirection5D(self, LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.Zero,S.Zero,S.One]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero])):
"""Solves 3D translation + 3D direction
"""
self._iktype = 'translationdirection5d'
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir = Matrix(3,1,[Float(x,30) for x in rawbasedir])
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i],5)
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2]) # unfortunately have to do it again...
offsetdist = basedir.dot(basepos)
basepos = basepos-basedir*offsetdist
Links = LinksRaw[:]
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
numzeros = int(basedir[0]==S.Zero) + int(basedir[1]==S.Zero) + int(basedir[2]==S.Zero)
# if numzeros < 2:
# try:
# log.info('try to rotate the last joint so that numzeros increases')
# assert(not self.has(Links[-1],*solvejointvars))
# localdir = Links[-1][0:3,0:3]*basedir
# localpos = Links[-1][0:3,0:3]*basepos+Links[-1][0:3,3]
# AllEquations = Links[-2][0:3,0:3]*localdir
# tree=self.SolveAllEquations(AllEquations,curvars=solvejointvars[-1:],othersolvedvars = [],solsubs = [],endbranchtree=[])
# offset = tree[0].jointeval[0]
# endbranchtree[0].offsetvalues = [S.Zero]*len(solvejointvars)
# endbranchtree[0].offsetvalues[-1] = offset
# Toffset = Links[-2].subs(solvejointvars[-1],offset).evalf()
# localdir2 = Toffset[0:3,0:3]*localdir
# localpos2 = Toffset[0:3,0:3]*localpos+Toffset[0:3,3]
# Links[-1]=eye(4)
# for i in range(3):
# basedir[i] = self.convertRealToRational(localdir2[i])
# basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2]) # unfortunately have to do it again...
# basepos = Matrix(3,1,[self.convertRealToRational(x) for x in localpos2])
# except Exception, e:
# print 'failed to rotate joint correctly',e
LinksInv = [self.affineInverse(link) for link in Links]
T = self.multiplyMatrix(Links)
self.Tfinal = zeros((4,4))
self.Tfinal[0,0:3] = (T[0:3,0:3]*basedir).transpose()
self.Tfinal[0:3,3] = T[0:3,0:3]*basepos+T[0:3,3]
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 5:
raise self.CannotSolveError('need 5 joints')
log.info('ikfast translation direction 5d: %s',solvejointvars)
# if last two axes are intersecting, can divide computing position and direction
ilinks = [i for i,Tlink in enumerate(Links) if self.has(Tlink,*solvejointvars)]
T = self.multiplyMatrix(Links[ilinks[-2]:])
P = T[0:3,0:3]*basepos+T[0:3,3]
D = T[0:3,0:3]*basedir
tree = None
if not self.has(P,*solvejointvars):
Tposinv = eye(4)
Tposinv[0:3,3] = -P
T0links=[Tposinv]+Links[:ilinks[-2]]
try:
log.info('last 2 axes are intersecting')
tree = self.solve5DIntersectingAxes(T0links,basepos,D,solvejointvars,endbranchtree)
except self.CannotSolveError, e:
log.warn('%s', e)
if tree is None:
rawpolyeqs2 = [None]*len(solvejointvars)
coupledsolutions = None
endbranchtree2 = []
for solvemethod in [self.solveLiWoernleHiller, self.solveKohliOsvatic]:#, self.solveManochaCanny]:
if coupledsolutions is not None:
break
for index in [2,3]:
T0links=LinksInv[:ilinks[index]][::-1]
T0 = self.multiplyMatrix(T0links)
T1links=Links[ilinks[index]:]
T1 = self.multiplyMatrix(T1links)
p0 = T0[0:3,0:3]*self.Tee[0:3,3]+T0[0:3,3]
p1 = T1[0:3,0:3]*basepos+T1[0:3,3]
l0 = T0[0:3,0:3]*self.Tee[0,0:3].transpose()
l1 = T1[0:3,0:3]*basedir
AllEquations = []
for i in range(3):
AllEquations.append(self.SimplifyTransform(p0[i]-p1[i]).expand())
AllEquations.append(self.SimplifyTransform(l0[i]-l1[i]).expand())
self.sortComplexity(AllEquations)
if rawpolyeqs2[index] is None:
rawpolyeqs2[index] = self.buildRaghavanRothEquations(p0,p1,l0,l1,solvejointvars)
try:
coupledsolutions,usedvars = solvemethod(rawpolyeqs2[index],solvejointvars,endbranchtree=[AST.SolverSequence([endbranchtree2])], AllEquationsExtra=AllEquations)
break
except self.CannotSolveError, e:
log.warn('%s', e)
continue
if coupledsolutions is None:
raise self.CannotSolveError('raghavan roth equations too complex')
log.info('solved coupled variables: %s',usedvars)
if len(usedvars) < len(solvejointvars):
curvars=solvejointvars[:]
solsubs = self.freevarsubs[:]
for var in usedvars:
curvars.remove(var)
solsubs += self.Variable(var).subs
self.checkSolvability(AllEquations,curvars,self.freejointvars+usedvars)
localtree = self.SolveAllEquations(AllEquations,curvars=curvars,othersolvedvars = self.freejointvars+usedvars,solsubs = solsubs,endbranchtree=endbranchtree)
# make it a function so compiled code is smaller
endbranchtree2.append(AST.SolverFunction('innerfn', self.verifyAllEquations(AllEquations,curvars,solsubs,localtree)))
tree = coupledsolutions
else:
endbranchtree2 += endbranchtree
tree = coupledsolutions
chaintree = AST.SolverIKChainRay([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=(self.Tee[0:3,3]-self.Tee[0,0:3].transpose()*offsetdist).subs(self.freevarsubs), Dee=self.Tee[0,0:3].transpose().subs(self.freevarsubs),jointtree=tree,Dfk=self.Tfinal[0,0:3].transpose(),Pfk=self.Tfinal[0:3,3],is5dray=True)
chaintree.dictequations += self.ppsubs
return chaintree
def solve5DIntersectingAxes(self, T0links, basepos, D, solvejointvars, endbranchtree):
LinksInv = [self.affineInverse(T) for T in T0links]
T0 = self.multiplyMatrix(T0links)
Tbaseposinv = eye(4)
Tbaseposinv[0:3,3] = -basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [self.affineInverse(Tbaseposinv)]+T0links[::-1]+[self.Teeinv]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,solvejointvars,self.freejointvars,uselength=True)
transvars = [v for v in solvejointvars if self.has(T0,v)]
self.checkSolvability(AllEquations,transvars,self.freejointvars)
dirtree = []
newendbranchtree = [AST.SolverSequence([dirtree])]
transtree = self.SolveAllEquations(AllEquations,curvars=transvars[:],othersolvedvars=self.freejointvars,solsubs = self.freevarsubs[:],endbranchtree=newendbranchtree)
transtree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,transtree)
rotvars = [v for v in solvejointvars if self.has(D,v)]
solsubs = self.freevarsubs[:]
for v in transvars:
solsubs += self.Variable(v).subs
AllEquations = self.buildEquationsFromTwoSides([D],[T0[0:3,0:3]*self.Tee[0,0:3].transpose()],solvejointvars,uselength=False)
self.checkSolvability(AllEquations,rotvars,self.freejointvars+transvars)
localdirtree = self.SolveAllEquations(AllEquations,curvars=rotvars[:],othersolvedvars = self.freejointvars+transvars,solsubs=solsubs,endbranchtree=endbranchtree)
# make it a function so compiled code is smaller
dirtree.append(AST.SolverFunction('innerfn', self.verifyAllEquations(AllEquations,rotvars,solsubs,localdirtree)))
return transtree
def solveFullIK_6D(self, LinksRaw, jointvars, isolvejointvars,Tgripperraw=eye(4)):
"""Solves the full 6D translatio + rotation IK
"""
self._iktype = 'transform6d'
Tgripper = eye(4)
for i in range(4):
for j in range(4):
Tgripper[i,j] = self.convertRealToRational(Tgripperraw[i,j])
Tfirstright = LinksRaw[-1]*Tgripper
Links = LinksRaw[:-1]
# if Links[0][0:3,0:3] == eye(3):
# # first axis is prismatic, so zero out self.Tee
# for i in range(3):
# if Links[0][i,3] != S.Zero:
# self.Tee[i,3] = S.Zero
# self.Teeinv = self.affineInverse(self.Tee)
LinksInv = [self.affineInverse(link) for link in Links]
self.Tfinal = self.multiplyMatrix(Links)
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
solvejointvars = [jointvars[i] for i in isolvejointvars]
if len(solvejointvars) != 6:
raise self.CannotSolveError('need 6 joints')
log.info('ikfast 6d: %s',solvejointvars)
tree = self.TestIntersectingAxes(solvejointvars,Links, LinksInv,endbranchtree)
if tree is None:
sliderjointvars = [var for var in solvejointvars if not self.IsHinge(var.name)]
if len(sliderjointvars) > 0:
ZeroMatrix = zeros(4)
for i,Tlink in enumerate(Links):
if self.has(Tlink,*sliderjointvars):
# try sliding left
if i > 0:
ileftsplit = None
for isplit in range(i-1,-1,-1):
M = self.multiplyMatrix(Links[isplit:i])
if M*Tlink-Tlink*M != ZeroMatrix:
break
if self.has(M,*solvejointvars):
# surpassed a variable!
ileftsplit = isplit
if ileftsplit is not None:
# try with the new order
log.info('rearranging Links[%d] to Links[%d]',i,ileftsplit)
NewLinks = list(Links)
NewLinks[(ileftsplit+1):(i+1)] = Links[ileftsplit:i]
NewLinks[ileftsplit] = Links[i]
NewLinksInv = list(LinksInv)
NewLinksInv[(ileftsplit+1):(i+1)] = Links[ileftsplit:i]
NewLinksInv[ileftsplit] = LinksInv[i]
tree = self.TestIntersectingAxes(solvejointvars,NewLinks, NewLinksInv,endbranchtree)
if tree is not None:
break
# try sliding right
if i+1 < len(Links):
irightsplit = None
for isplit in range(i+1,len(Links)):
M = self.multiplyMatrix(Links[i+1:(isplit+1)])
if M*Tlink-Tlink*M != ZeroMatrix:
break
if self.has(M,*solvejointvars):
# surpassed a variable!
irightsplit = isplit
if irightsplit is not None:
log.info('rearranging Links[%d] to Links[%d]',i,irightsplit)
# try with the new order
NewLinks = list(Links)
NewLinks[i:irightsplit] = Links[(i+1):(irightsplit+1)]
NewLinks[irightsplit] = Links[i]
NewLinksInv = list(LinksInv)
NewLinksInv[i:irightsplit] = LinksInv[(i+1):(irightsplit+1)]
NewLinksInv[irightsplit] = LinksInv[i]
tree = self.TestIntersectingAxes(solvejointvars,NewLinks, NewLinksInv,endbranchtree)
if tree is not None:
break
if tree is None:
linklist = list(self.iterateThreeNonIntersectingAxes(solvejointvars,Links, LinksInv))
for T0links, T1links in linklist:#[2:]:
try:
# if T1links[-1] doesn't have any symbols, put it over to T0links. Since T1links has the position unknowns, putting over the coefficients to T0links makes things simpler
if not self.has(T1links[-1], *solvejointvars):
T0links.append(self.affineInverse(T1links.pop(-1)))
tree = self.solveFullIK_6DGeneral(T0links, T1links, solvejointvars, endbranchtree)
break
except (self.CannotSolveError,self.IKFeasibilityError), e:
log.warn('%s',e)
if tree is None:
raise self.CannotSolveError('cannot solve 6D mechanism!')
chaintree = AST.SolverIKChainTransform6D([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], (self.Tee * self.affineInverse(Tfirstright)).subs(self.freevarsubs), tree,Tfk=self.Tfinal*Tfirstright)
chaintree.dictequations += self.ppsubs+self.npxyzsubs+self.rxpsubs
return chaintree
def TestIntersectingAxes(self,solvejointvars,Links,LinksInv,endbranchtree):
for T0links,T1links,transvars,rotvars,solveRotationFirst in self.iterateThreeIntersectingAxes(solvejointvars,Links, LinksInv):
try:
return self.solve6DIntersectingAxes(T0links,T1links,transvars,rotvars,solveRotationFirst=solveRotationFirst, endbranchtree=endbranchtree)
except (self.CannotSolveError,self.IKFeasibilityError), e:
log.warn('%s',e)
return None
def _ExtractTranslationsOutsideOfMatrixMultiplication(self, Links, solvejointvars):
"""try to extract translations outside of the multiplication (left and right)
Tlefttrans * MultiplyMatrix((NewLinks) * Trighttrans = MultiplyMatrix(Links)
where Tleftrans and Trighttrans are only translation matrices
:return: Tlefttrans, NewLinks, Trighttrans
"""
NewLinks = list(Links)
Trighttrans = eye(4)
Trighttrans[0:3,3] = NewLinks[-2][0:3,0:3].transpose() * NewLinks[-2][0:3,3]
Trot_with_trans = Trighttrans * NewLinks[-1]
separated_trans = Trot_with_trans[0:3,0:3].transpose() * Trot_with_trans[0:3,3]
for j in range(0,3):
if separated_trans[j].has(*solvejointvars):
Trighttrans[j,3] = S.Zero
else:
Trighttrans[j,3] = separated_trans[j]
NewLinks[-2] = NewLinks[-2] * self.affineInverse(Trighttrans)
separated_trans = NewLinks[0][0:3,0:3] * NewLinks[1][0:3,3]
Tlefttrans = eye(4)
for j in range(0,3):
if not separated_trans[j].has(*solvejointvars):
Tlefttrans[j,3] = separated_trans[j]
NewLinks[1] = self.affineInverse(Tlefttrans) * NewLinks[1]
return Tlefttrans, NewLinks, Trighttrans
def iterateThreeIntersectingAxes(self, solvejointvars, Links, LinksInv):
"""Search for 3 consectuive intersecting axes. If a robot has this condition, it makes a lot of IK computations simpler.
"""
TestLinks=Links
TestLinksInv=LinksInv
ilinks = [i for i,Tlink in enumerate(TestLinks) if self.has(Tlink,*solvejointvars)]
hingejointvars = [var for var in solvejointvars if self.IsHinge(var.name)]
polysymbols = []
for solvejointvar in solvejointvars:
polysymbols += [s[0] for s in self.Variable(solvejointvar).subs]
for i in range(len(ilinks)-2):
startindex = ilinks[i]
endindex = ilinks[i+2]+1
Tlefttrans, T0links, Trighttrans = self._ExtractTranslationsOutsideOfMatrixMultiplication(TestLinks[startindex:endindex], solvejointvars)
T0 = self.multiplyMatrix(T0links)
# count number of variables in T0[0:3,0:3]
numVariablesInRotation = sum([self.has(T0[0:3,0:3],solvejointvar) for solvejointvar in solvejointvars])
if numVariablesInRotation < 3:
continue
solveRotationFirst = None
# sometimes the intersecting condition can be there, but is masked by small epsilon errors
# so set any coefficients in T0[:3,3] below self.precision precision to zero
translationeqs = [self.RoundEquationTerms(eq.expand()) for eq in T0[:3,3]]
if not self.has(translationeqs,*hingejointvars):
T1links = TestLinksInv[:startindex][::-1]
if len(T1links) > 0:
T1links[0] = self.affineInverse(Tlefttrans) * T1links[0]
else:
T1links = [self.affineInverse(Tlefttrans)]
T1links.append(self.Tee)
T1links += TestLinksInv[endindex:][::-1]
T1links[-1] = T1links[-1] * self.affineInverse(Trighttrans)
solveRotationFirst = False
else:
Tlefttrans, T0links, Trighttrans = self._ExtractTranslationsOutsideOfMatrixMultiplication(TestLinksInv[startindex:endindex][::-1], solvejointvars)
T0 = self.multiplyMatrix(T0links)
translationeqs = [self.RoundEquationTerms(eq.expand()) for eq in T0[:3,3]]
if not self.has(translationeqs,*hingejointvars):
T1links = TestLinks[endindex:]
if len(T1links) > 0:
T1links[0] = Trighttrans * T1links[0]
else:
T1links = [Trighttrans]
T1links.append(self.Teeinv)
T1links += TestLinks[:startindex]
T1links[-1] = T1links[-1] * Tlefttrans
solveRotationFirst = False
if solveRotationFirst is not None:
rotvars = []
transvars = []
for svar in solvejointvars:
if self.has(T0[0:3,0:3],svar):
rotvars.append(svar)
else:
transvars.append(svar)
if len(rotvars) == 3 and len(transvars) == 3:
log.info('found 3 consecutive intersecting axes links[%d:%d], rotvars=%s, translationvars=%s',startindex, endindex, rotvars,transvars)
yield T0links,T1links,transvars,rotvars,solveRotationFirst
def RoundEquationTerms(self,eq,epsilon=None):
if eq.is_Add:
neweq = S.Zero
for subeq in eq.args:
neweq += self.RoundEquationTerms(subeq,epsilon)
elif eq.is_Mul:
neweq = self.RoundEquationTerms(eq.args[0],epsilon)
for subeq in eq.args[1:]:
neweq *= self.RoundEquationTerms(subeq,epsilon)
elif eq.is_Function:
newargs = [self.RoundEquationTerms(subeq,epsilon) for subeq in eq.args]
neweq = eq.func(*newargs)
elif eq.is_number:
if epsilon is None:
epsilon = 5*(10**-self.precision)
if abs(eq.evalf()) <= epsilon:
neweq = S.Zero
else:
neweq = eq
else:
neweq=eq
return neweq
def RoundPolynomialTerms(self,peq,epsilon):
terms = {}
for monom, coeff in peq.terms():
if not coeff.is_number or abs(coeff) > epsilon:
terms[monom] = coeff
if len(terms) == 0:
return Poly(S.Zero,peq.gens)
return peq.from_dict(terms, *peq.gens)
def iterateThreeNonIntersectingAxes(self, solvejointvars, Links, LinksInv):
"""check for three consecutive non-intersecting axes.
if several points exist, so have to choose one that is least complex?
"""
ilinks = [i for i,Tlink in enumerate(Links) if self.has(Tlink,*solvejointvars)]
usedindices = []
for imode in range(2):
for i in range(len(ilinks)-2):
if i in usedindices:
continue
startindex = ilinks[i]
endindex = ilinks[i+2]+1
p0 = self.multiplyMatrix(Links[ilinks[i]:ilinks[i+1]])[0:3,3]
p1 = self.multiplyMatrix(Links[ilinks[i+1]:ilinks[i+2]])[0:3,3]
has0 = self.has(p0,*solvejointvars)
has1 = self.has(p1,*solvejointvars)
if (imode == 0 and has0 and has1) or (imode == 1 and (has0 or has1)):
T0links = Links[startindex:endindex]
T1links = LinksInv[:startindex][::-1]
T1links.append(self.Tee)
T1links += LinksInv[endindex:][::-1]
usedindices.append(i)
usedvars = [var for var in solvejointvars if any([self.has(T0,var) for T0 in T0links])]
log.info('found 3 consecutive non-intersecting axes links[%d:%d], vars=%s',startindex,endindex,str(usedvars))
yield T0links, T1links
def solve6DIntersectingAxes(self, T0links, T1links, transvars,rotvars,solveRotationFirst,endbranchtree):
"""Solve 6D equations using fact that 3 axes are intersecting. The 3 intersecting axes are all part of T0links and will be used to compute the rotation of the robot. The other 3 axes are part of T1links and will be used to first compute the position.
"""
self._iktype = 'transform6d'
assert(len(transvars)==3 and len(rotvars) == 3)
T0 = self.multiplyMatrix(T0links)
T0posoffset = eye(4)
T0posoffset[0:3,3] = -T0[0:3,3]
T0links = [T0posoffset] + T0links
T1links = [T0posoffset] + T1links
T1 = self.multiplyMatrix(T1links)
othersolvedvars = rotvars+self.freejointvars if solveRotationFirst else self.freejointvars[:]
T1linksinv = [self.affineInverse(T) for T in T1links]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,transvars,othersolvedvars,uselength=True)
self.checkSolvability(AllEquations,transvars,self.freejointvars)
rottree = []
if solveRotationFirst:
newendbranchtree = endbranchtree
else:
newendbranchtree = [AST.SolverSequence([rottree])]
curvars = transvars[:]
solsubs=self.freevarsubs[:]
transtree = self.SolveAllEquations(AllEquations,curvars=curvars,othersolvedvars=othersolvedvars[:],solsubs=solsubs,endbranchtree=newendbranchtree)
transtree = self.verifyAllEquations(AllEquations,rotvars if solveRotationFirst else transvars+rotvars,self.freevarsubs[:],transtree)
solvertree= []
solvedvarsubs = self.freevarsubs[:]
if solveRotationFirst:
storesolutiontree = transtree
else:
solvertree += transtree
storesolutiontree = endbranchtree
for tvar in transvars:
solvedvarsubs += self.Variable(tvar).subs
Ree = zeros((3,3))
for i in range(3):
for j in range(3):
Ree[i,j] = Symbol('new_r%d%d'%(i,j))
try:
T1sub = T1.subs(solvedvarsubs)
for i in range(3):
for j in range(3):
self.globalsymbols.append((Ree[i,j],T1sub[i,j]))
othersolvedvars = self.freejointvars if solveRotationFirst else transvars+self.freejointvars
AllEquations = self.buildEquationsFromRotation(T0links,Ree,rotvars,othersolvedvars)
self.checkSolvability(AllEquations,rotvars,othersolvedvars)
currotvars = rotvars[:]
rottree += self.SolveAllEquations(AllEquations,curvars=currotvars,othersolvedvars=othersolvedvars,solsubs=self.freevarsubs[:],endbranchtree=storesolutiontree)
if len(rottree) == 0:
raise self.CannotSolveError('could not solve for all rotation variables: %s:%s'%(str(freevar),str(freevalue)))
if solveRotationFirst:
solvertree.append(AST.SolverRotation(T1sub, rottree))
else:
rottree[:] = [AST.SolverRotation(T1sub, rottree[:])]
return solvertree
finally:
# remove the Ree global symbols
removesymbols = set()
for i in range(3):
for j in range(3):
removesymbols.add(Ree[i,j])
self.globalsymbols = [g for g in self.globalsymbols if not g[0] in removesymbols]
def solveFullIK_6DGeneral(self, T0links, T1links, solvejointvars, endbranchtree):
"""Solve 6D equations of a general kinematics structure.
This method only works if there exists 3 consecutive joints in that do not always intersect!
"""
self._iktype = 'transform6d'
rawpolyeqs2 = [None,None]
coupledsolutions = None
leftovervarstree = []
origendbranchtree = endbranchtree
for solvemethod in [self.solveLiWoernleHiller, self.solveKohliOsvatic]:#, self.solveManochaCanny]:
if coupledsolutions is not None:
break
complexities = [0,0]
for splitindex in [0, 1]:
if rawpolyeqs2[splitindex] is None:
if splitindex == 0:
# invert, this seems to always give simpler solutions, so prioritize it
T0 = self.affineSimplify(self.multiplyMatrix([self.affineInverse(T) for T in T0links][::-1]))
T1 = self.affineSimplify(self.multiplyMatrix([self.affineInverse(T) for T in T1links][::-1]))
else:
T0 = self.affineSimplify(self.multiplyMatrix(T0links))
T1 = self.affineSimplify(self.multiplyMatrix(T1links))
rawpolyeqs,numminvars = self.buildRaghavanRothEquationsFromMatrix(T0,T1,solvejointvars,simplify=False)
if numminvars <= 5 or len(rawpolyeqs[0][1].gens) <= 6:
rawpolyeqs2[splitindex] = rawpolyeqs
complexities[splitindex] = sum([self.ComputePolyComplexity(peq0)+self.ComputePolyComplexity(peq1) for peq0, peq1 in rawpolyeqs2[splitindex]])
# try the lowest complexity first and then simplify!
sortedindices = sorted(zip(complexities,[0,1]))
for complexity, splitindex in sortedindices:
for peqs in rawpolyeqs2[splitindex]:
peqs[0] = self.SimplifyTransformPoly (peqs[0])
peqs[1] = self.SimplifyTransformPoly (peqs[1])
try:
if rawpolyeqs2[splitindex] is not None:
rawpolyeqs=rawpolyeqs2[splitindex]
endbranchtree=[AST.SolverSequence([leftovervarstree])]
AllEquationsExtra = []
for i in range(3):
for j in range(4):
AllEquationsExtra.append(self.SimplifyTransform(T0[i,j]-T1[i,j]))
self.sortComplexity(AllEquationsExtra)
coupledsolutions,usedvars = solvemethod(rawpolyeqs,solvejointvars,endbranchtree=endbranchtree,AllEquationsExtra=AllEquationsExtra)
break
except self.CannotSolveError, e:
if rawpolyeqs2[splitindex] is not None and len(rawpolyeqs2[splitindex]) > 0:
log.warn(u'solving %s: %s', rawpolyeqs2[splitindex][0][0].gens, e)
else:
log.warn(e)
continue
if coupledsolutions is None:
raise self.CannotSolveError('6D general method failed, raghavan roth equations might be too complex')
log.info('solved coupled variables: %s',usedvars)
self.sortComplexity(AllEquationsExtra)
curvars=solvejointvars[:]
solsubs = self.freevarsubs[:]
for var in usedvars:
curvars.remove(var)
solsubs += self.Variable(var).subs
if len(curvars) > 0:
self.checkSolvability(AllEquationsExtra,curvars,self.freejointvars+usedvars)
leftovertree = self.SolveAllEquations(AllEquationsExtra,curvars=curvars,othersolvedvars = self.freejointvars+usedvars,solsubs = solsubs,endbranchtree=origendbranchtree)
leftovervarstree.append(AST.SolverFunction('innerfn',leftovertree))
else:
leftovervarstree += origendbranchtree
return coupledsolutions
def solveFullIK_TranslationAxisAngle4D(self, LinksRaw, jointvars, isolvejointvars, rawbasedir=Matrix(3,1,[S.One,S.Zero,S.Zero]),rawbasepos=Matrix(3,1,[S.Zero,S.Zero,S.Zero]),rawglobaldir=Matrix(3,1,[S.Zero,S.Zero,S.One]), rawnormaldir=None, ignoreaxis=None):
"""Solves 3D translation + Angle with respect to X-axis
:param rawnormaldir: the axis in the base coordinate system that will be computing a rotation about
:param rawglobaldir: the axis normal to rawnormaldir that represents the 0 angle.
:param basedir: the axis in the effector coordinate system measuring the in-plane angle with
"""
self._iktype = 'translationaxisangle4d'
globaldir = Matrix(3,1,[Float(x,30) for x in rawglobaldir])
globaldir /= sqrt(globaldir[0]*globaldir[0]+globaldir[1]*globaldir[1]+globaldir[2]*globaldir[2])
for i in range(3):
globaldir[i] = self.convertRealToRational(globaldir[i],5)
iktype = None
if rawnormaldir is not None:
normaldir = Matrix(3,1,[Float(x,30) for x in rawnormaldir])
binormaldir = normaldir.cross(globaldir).transpose()
if globaldir[0] == S.One and normaldir[2] == S.One:
if ignoreaxis == 2:
iktype = IkType.TranslationXYOrientation3D
else:
iktype = IkType.TranslationXAxisAngleZNorm4D
elif globaldir[1] == S.One and normaldir[0] == S.One:
iktype = IkType.TranslationYAxisAngleXNorm4D
elif globaldir[2] == S.One and normaldir[1] == S.One:
iktype = IkType.TranslationZAxisAngleYNorm4D
else:
normaldir = None
if globaldir[0] == S.One:
iktype = IkType.TranslationXAxisAngle4D
elif globaldir[1] == S.One:
iktype = IkType.TranslationYAxisAngle4D
elif globaldir[2] == S.One:
iktype = IkType.TranslationZAxisAngle4D
if iktype is None:
raise ValueError('currently globaldir can only by one of x,y,z axes')
basepos = Matrix(3,1,[self.convertRealToRational(x) for x in rawbasepos])
basedir = Matrix(3,1,[Float(x,30) for x in rawbasedir])
L = sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2])
basedir /= L
for i in range(3):
basedir[i] = self.convertRealToRational(basedir[i],5)
basedir /= sqrt(basedir[0]*basedir[0]+basedir[1]*basedir[1]+basedir[2]*basedir[2]) # unfortunately have to do it again...
Links = LinksRaw[:]
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
LinksInv = [self.affineInverse(link) for link in Links]
Tallmult = self.multiplyMatrix(Links)
self.Tfinal = zeros((4,4))
if normaldir is None:
self.Tfinal[0,0] = acos(globaldir.dot(Tallmult[0:3,0:3]*basedir))
else:
self.Tfinal[0,0] = atan2(binormaldir.dot(Tallmult[0:3,0:3]*basedir), globaldir.dot(Tallmult[0:3,0:3]*basedir))
if self.Tfinal[0,0] == nan:
raise self.CannotSolveError('cannot solve 4D axis angle IK. Most likely manipulator direction is aligned with the rotation axis')
self.Tfinal[0:3,3] = Tallmult[0:3,0:3]*basepos+Tallmult[0:3,3]
self.testconsistentvalues = self.ComputeConsistentValues(jointvars,self.Tfinal,numsolutions=4)
solvejointvars = [jointvars[i] for i in isolvejointvars]
expecteddof = 4
if ignoreaxis is not None:
expecteddof -= 1
if len(solvejointvars) != expecteddof:
raise self.CannotSolveError('need %d joints'%expecteddof)
log.info('ikfast translation axis %dd, globaldir=%s, basedir=%s: %s', expecteddof, globaldir, basedir, solvejointvars)
# if last two axes are intersecting, can divide computing position and direction
ilinks = [i for i,Tlink in enumerate(Links) if self.has(Tlink,*solvejointvars)]
Tbaseposinv = eye(4)
Tbaseposinv[0:3,3] = -basepos
T1links = [Tbaseposinv]+LinksInv[::-1]+[self.Tee]
T1linksinv = [self.affineInverse(Tbaseposinv)]+Links[::-1]+[self.Teeinv]
AllEquations = self.buildEquationsFromPositions(T1links,T1linksinv,solvejointvars,self.freejointvars,uselength=True, ignoreaxis=ignoreaxis)
for index in range(len(ilinks)):
# inv(T0) * T1 * basedir = globaldir
# => T1 * basedir = T0 * globaldir
T0links=LinksInv[:ilinks[index]][::-1]
T0 = self.multiplyMatrix(T0links)
T1links=Links[ilinks[index]:]
T1 = self.multiplyMatrix(T1links)
globaldir2 = T0[0:3,0:3]*globaldir
basedir2 = T1[0:3,0:3]*basedir
for i in range(3):
if globaldir2[i].is_number:
globaldir2[i] = self.convertRealToRational(globaldir2[i])
if basedir2[i].is_number:
basedir2[i] = self.convertRealToRational(basedir2[i])
eq = self.SimplifyTransform(self.trigsimp(globaldir2.dot(basedir2),solvejointvars))-cos(self.Tee[0])
if self.CheckExpressionUnique(AllEquations,eq):
AllEquations.append(eq)
if normaldir is not None:
binormaldir2 = T0[0:3,0:3]*binormaldir
for i in range(3):
if binormaldir2[i].is_number:
binormaldir2[i] = self.convertRealToRational(binormaldir2[i])
eq = self.SimplifyTransform(self.trigsimp(binormaldir2.dot(basedir2),solvejointvars))-sin(self.Tee[0])
if self.CheckExpressionUnique(AllEquations,eq):
AllEquations.append(eq)
# check if planar with respect to normaldir
extravar = None
if normaldir is not None:
if Tallmult[0:3,0:3]*normaldir == normaldir:
Tnormaltest = self.rodrigues(normaldir,pi/2)
# planar, so know that the sum of all hinge joints is equal to the final angle
# can use this fact to substitute one angle with the other values
angles = []
for solvejoint in solvejointvars:
if self.IsHinge(solvejoint.name):
Tall0 = Tallmult[0:3,0:3].subs(solvejoint,S.Zero)
Tall1 = Tallmult[0:3,0:3].subs(solvejoint,pi/2)
if Tall0*Tnormaltest-Tall1:
angles.append(solvejoint)
else:
angles.append(-solvejoint)
Tzero = Tallmult.subs([(a,S.Zero) for a in angles])
for i in range(3):
if binormaldir[i].is_number:
binormaldir[i] = self.convertRealToRational(binormaldir[i])
if basedir[i].is_number:
basedir[i] = self.convertRealToRational(basedir[i])
zeroangle = atan2(binormaldir.dot(Tzero[0:3,0:3]*basedir), globaldir.dot(Tzero[0:3,0:3]*basedir))
eqangles = self.Tee[0]-zeroangle
for a in angles[:-1]:
eqangles -= a
extravar = (angles[-1],eqangles)
coseq = cos(eqangles).expand(trig=True)
sineq = sin(eqangles).expand(trig=True)
AllEquationsOld = AllEquations
AllEquations = [self.trigsimp(eq.subs([(cos(angles[-1]),coseq),(sin(angles[-1]),sineq)]).expand(),solvejointvars) for eq in AllEquationsOld]
solvejointvars.remove(angles[-1])
self.sortComplexity(AllEquations)
endbranchtree = [AST.SolverStoreSolution (jointvars,isHinge=[self.IsHinge(var.name) for var in jointvars])]
if extravar is not None:
solution=AST.SolverSolution(extravar[0].name, jointeval=[extravar[1]],isHinge=self.IsHinge(extravar[0].name))
endbranchtree.insert(0,solution)
try:
tree = self.SolveAllEquations(AllEquations,curvars=solvejointvars[:],othersolvedvars=self.freejointvars,solsubs=self.freevarsubs[:],endbranchtree=endbranchtree)
tree = self.verifyAllEquations(AllEquations,solvejointvars,self.freevarsubs,tree)
except self.CannotSolveError:
if 0:
solvejointvar0sols = solve(AllEquations[4], solvejointvars[0])
NewEquations = [eq.subs(solvejointvars[0], solvejointvar0sols[0]) for eq in AllEquations]
newsolution=AST.SolverSolution(solvejointvars[0].name, jointeval=solvejointvar0sols,isHinge=self.IsHinge(solvejointvars[0].name))
endbranchtree.insert(0,newsolution)
tree = self.SolveAllEquations(NewEquations,curvars=solvejointvars[1:],othersolvedvars=self.freejointvars,solsubs=self.freevarsubs[:],endbranchtree=endbranchtree)
else:
othersolvedvars = self.freejointvars[:]
solsubs = self.freevarsubs[:]
freevarinvsubs = [(f[1],f[0]) for f in self.freevarsubs]
solinvsubs = [(f[1],f[0]) for f in solsubs]
# single variable solutions
solutions = []
for curvar in solvejointvars:
othervars = [var for var in solvejointvars if var != curvar]
curvarsym = self.Variable(curvar)
raweqns = []
for e in AllEquations:
if (len(othervars) == 0 or not e.has(*othervars)) and e.has(curvar,curvarsym.htvar,curvarsym.cvar,curvarsym.svar):
eq = e.subs(self.freevarsubs+solsubs)
if self.CheckExpressionUnique(raweqns,eq):
raweqns.append(eq)
if len(raweqns) > 0:
try:
rawsolutions=self.solveSingleVariable(self.sortComplexity(raweqns),curvar,othersolvedvars,unknownvars=solvejointvars)
for solution in rawsolutions:
self.ComputeSolutionComplexity(solution,othersolvedvars,solvejointvars)
solutions.append((solution,curvar))
except self.CannotSolveError:
pass
firstsolution, firstvar = solutions[0]
othersolvedvars.append(firstvar)
solsubs += self.Variable(firstvar).subs
curvars=solvejointvars[:]
curvars.remove(firstvar)
trigsubs = []
polysubs = []
polyvars = []
for v in curvars:
if self.IsHinge(v.name):
var = self.Variable(v)
polysubs += [(cos(v),var.cvar),(sin(v),var.svar)]
polyvars += [var.cvar,var.svar]
trigsubs.append((var.svar**2,1-var.cvar**2))
trigsubs.append((var.svar**3,var.svar*(1-var.cvar**2)))
else:
polyvars.append(v)
polysubsinv = [(b,a) for a,b in polysubs]
rawpolyeqs = [Poly(Poly(eq.subs(polysubs),*polyvars).subs(trigsubs),*polyvars) for eq in AllEquations if eq.has(*curvars)]
dummys = []
dummysubs = []
dummysubs2 = []
dummyvars = []
for i in range(0,len(polyvars),2):
dummy = Symbol('ht%s'%polyvars[i].name[1:])
# [0] - cos, [1] - sin
dummys.append(dummy)
dummysubs += [(polyvars[i],(1-dummy**2)/(1+dummy**2)),(polyvars[i+1],2*dummy/(1+dummy**2))]
var = polyvars[i].subs(self.invsubs).args[0]
dummysubs2.append((var,2*atan(dummy)))
dummyvars.append((dummy,tan(0.5*var)))
newreducedeqs = []
for peq in rawpolyeqs:
maxdenom = [0]*(len(polyvars)/2)
for monoms in peq.monoms():
for i in range(len(maxdenom)):
maxdenom[i] = max(maxdenom[i],monoms[2*i]+monoms[2*i+1])
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
for i in range(len(polyvars)):
num,denom = fraction(dummysubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
for i in range(len(maxdenom)):
denom = fraction(dummysubs[2*i][1])[1]
term *= denom**(maxdenom[i]-monoms[2*i]-monoms[2*i+1])
eqnew += term
newreducedeqs.append(Poly(eqnew,*dummys))
newreducedeqs.sort(cmp=lambda x,y: len(x.monoms()) - len(y.monoms()))
ileftvar = 0
leftvar = dummys[ileftvar]
exportcoeffeqs=None
for ioffset in range(len(newreducedeqs)):
try:
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs[ioffset:],ileftvar)
log.info('ioffset %d'%ioffset)
break
except self.CannotSolveError, e:
log.debug('solveDialytically errors: %s',e)
if exportcoeffeqs is None:
raise self.CannotSolveError('failed to solveDialytically')
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in curvars],jointeval=[v[1] for v in dummysubs2],jointevalcos=[dummysubs[2*i][1] for i in range(len(curvars))],jointevalsin=[dummysubs[2*i+1][1] for i in range(len(curvars))],isHinges=[self.IsHinge(v.name) for v in curvars],exportvar=[v.name for v in dummys],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly12qep',rootmaxdim=16)
self.usinglapack = True
tree = [firstsolution,coupledsolution]+ endbranchtree
# package final solution
chaintree = AST.SolverIKChainAxisAngle([(jointvars[ijoint],ijoint) for ijoint in isolvejointvars], [(v,i) for v,i in izip(self.freejointvars,self.ifreejointvars)], Pee=self.Tee[0:3,3].subs(self.freevarsubs), angleee=self.Tee[0,0].subs(self.freevarsubs),jointtree=tree,Pfk=self.Tfinal[0:3,3],anglefk=self.Tfinal[0,0],iktype=iktype)
chaintree.dictequations += self.ppsubs
return chaintree
def buildEquationsFromTwoSides(self,leftside, rightside, usedvars, uselength=True):
# try to shift all the constants of each Position expression to one side
for i in range(len(leftside)):
for j in range(leftside[i].shape[0]):
p = leftside[i][j]
pee = rightside[i][j]
pconstterm = None
peeconstterm = None
if p.is_Add:
pconstterm = [term for term in p.args if term.is_number]
elif p.is_number:
pconstterm = [p]
else:
continue
if pee.is_Add:
peeconstterm = [term for term in pee.args if term.is_number]
elif pee.is_number:
peeconstterm = [pee]
else:
continue
if len(pconstterm) > 0 and len(peeconstterm) > 0:
# shift it to the one that has the least terms
for term in peeconstterm if len(p.args) < len(pee.args) else pconstterm:
leftside[i][j] -= term
rightside[i][j] -= term
AllEquations = []
for i in range(len(leftside)):
for j in range(leftside[i].shape[0]):
e = self.trigsimp(leftside[i][j] - rightside[i][j],usedvars)
if self.codeComplexity(e) < 1500:
e = self.SimplifyTransform(e)
if self.CheckExpressionUnique(AllEquations,e):
AllEquations.append(e)
if uselength:
p2 = S.Zero
pe2 = S.Zero
for j in range(leftside[i].shape[0]):
p2 += leftside[i][j]**2
pe2 += rightside[i][j]**2
if self.codeComplexity(p2) < 1200 and self.codeComplexity(pe2) < 1200:
# sympy's trigsimp/customtrigsimp give up too easily
e = self.SimplifyTransform(self.trigsimp(p2,usedvars)-self.trigsimp(pe2,usedvars))
if self.CheckExpressionUnique(AllEquations,e):
AllEquations.append(e.expand())
else:
log.info('length equations too big, skipping %d,%d',self.codeComplexity(p2),self.codeComplexity(pe2))
self.sortComplexity(AllEquations)
return AllEquations
def buildEquationsFromPositions(self,T1links,T1linksinv,transvars,othersolvedvars,uselength=True,removesmallnumbers=True, ignoreaxis=None):
"""multiplies out all the matrices and builds up the equations
:param ignoreaxis: if not None, can be 0, 1, 2 to specify the axes which should be ignored
"""
Taccum = eye(4)
numvarsdone = 1
Positions = []
Positionsee = []
indices = [0,1,2]
if ignoreaxis is not None:
indices.remove(ignoreaxis)
for i in range(len(T1links)-1):
Taccum = T1linksinv[i]*Taccum
hasvars = [self.has(Taccum,v) for v in transvars]
if __builtin__.sum(hasvars) == numvarsdone:
Positions.append(Taccum.extract(indices,[3]))
Positionsee.append(self.multiplyMatrix(T1links[(i+1):]).extract(indices,[3]))
numvarsdone += 1
if numvarsdone > 2:
# more than 2 variables is almost always useless
break
if len(Positions) == 0:
Positions.append(zeros((len(indices),1)))
Positionsee.append(self.multiplyMatrix(T1links).extract(indices,[3]))
if removesmallnumbers:
for i in range(len(Positions)):
for j in range(len(indices)):
Positions[i][j] = self.RoundEquationTerms(Positions[i][j].expand())
Positionsee[i][j] = self.RoundEquationTerms(Positionsee[i][j].expand())
return self.buildEquationsFromTwoSides(Positions,Positionsee,transvars+othersolvedvars,uselength=uselength)
def buildEquationsFromRotation(self,T0links,Ree,rotvars,othersolvedvars):
"""Ree is a 3x3 matrix
"""
Raccum = Ree
numvarsdone = 0
AllEquations = []
for i in range(len(T0links)):
Raccum = T0links[i][0:3,0:3].transpose()*Raccum # transpose is the inverse
hasvars = [self.has(Raccum,v) for v in rotvars]
if len(AllEquations) > 0 and __builtin__.sum(hasvars) >= len(rotvars):
break
if __builtin__.sum(hasvars) == numvarsdone:
R = self.multiplyMatrix(T0links[(i+1):])
Reqs = []
for i in range(3):
Reqs.append([self.trigsimp(Raccum[i,j]-R[i,j],othersolvedvars+rotvars) for j in range(3)])
for i in range(3):
for eq in Reqs[i]:
AllEquations.append(eq)
numvarsdone += 1
# take dot products (equations become unnecessarily complex)
# eqdots = [S.Zero, S.Zero, S.Zero]
# for i in range(3):
# eqdots[0] += Reqs[0][i] * Reqs[1][i]
# eqdots[1] += Reqs[1][i] * Reqs[2][i]
# eqdots[2] += Reqs[2][i] * Reqs[0][i]
# for i in range(3):
# AllEquations.append(self.trigsimp(eqdots[i].expand(),othersolvedvars+rotvars))
#AllEquations.append((eqs[0]*eqs[0]+eqs[1]*eqs[1]+eqs[2]*eqs[2]-S.One).expand())
self.sortComplexity(AllEquations)
return AllEquations
def buildRaghavanRothEquationsFromMatrix(self,T0,T1,solvejointvars,simplify=True):
"""Builds the 14 equations using only 5 unknowns. Method explained in [Raghavan1993]_. Basically take the position and one column/row so that the least number of variables are used.
.. [Raghavan1993] M Raghavan and B Roth, "Inverse Kinematics of the General 6R Manipulator and related Linkages", Journal of Mechanical Design, Volume 115, Issue 3, 1993.
"""
p0 = T0[0:3,3]
p1 = T1[0:3,3]
p=p0-p1
T = T0-T1
numminvars = 100000
for irow in range(3):
hasvar = [self.has(T[0:3,irow],var) or self.has(p,var) for var in solvejointvars]
numcurvars = __builtin__.sum(hasvar)
if numminvars > numcurvars and numcurvars > 0:
numminvars = numcurvars
l0 = T0[0:3,irow]
l1 = T1[0:3,irow]
hasvar = [self.has(T[irow,0:3],var) or self.has(p,var) for var in solvejointvars]
numcurvars = __builtin__.sum(hasvar)
if numminvars > numcurvars and numcurvars > 0:
numminvars = numcurvars
l0 = T0[irow,0:3].transpose()
l1 = T1[irow,0:3].transpose()
return self.buildRaghavanRothEquations(p0,p1,l0,l1,solvejointvars,simplify),numminvars
def CheckEquationForVarying(self, eq):
return eq.has('vj0px') or eq.has('vj0py') or eq.has('vj0pz')
def buildRaghavanRothEquationsOld(self,p0,p1,l0,l1,solvejointvars):
eqs = []
for i in range(3):
eqs.append([l0[i],l1[i]])
for i in range(3):
eqs.append([p0[i],p1[i]])
l0xp0 = l0.cross(p0)
l1xp1 = l1.cross(p1)
for i in range(3):
eqs.append([l0xp0[i],l1xp1[i]])
ppl0 = p0.dot(p0)*l0 - 2*l0.dot(p0)*p0
ppl1 = p1.dot(p1)*l1 - 2*l1.dot(p1)*p1
for i in range(3):
eqs.append([ppl0[i],ppl1[i]])
eqs.append([p0.dot(p0),p1.dot(p1)])
eqs.append([l0.dot(p0),l1.dot(p1)])
# prune any that have varying symbols
eqs = [(eq0,eq1) for eq0,eq1 in eqs if not self.CheckEquationForVarying(eq0) and not self.CheckEquationForVarying(eq1)]
trigsubs = []
polysubs = []
polyvars = []
for v in solvejointvars:
polyvars.append(v)
if self.IsHinge(v.name):
var = self.Variable(v)
polysubs += [(cos(v),var.cvar),(sin(v),var.svar)]
polyvars += [var.cvar,var.svar]
trigsubs.append((var.svar**2,1-var.cvar**2))
trigsubs.append((var.svar**3,var.svar*(1-var.cvar**2)))
for v in self.freejointvars:
if self.IsHinge(v.name):
trigsubs.append((sin(v)**2,1-cos(v)**2))
trigsubs.append((sin(v)**3,sin(v)*(1-cos(v)**2)))
polysubsinv = [(b,a) for a,b in polysubs]
usedvars = []
for j in range(2):
usedvars.append([var for var in polyvars if any([eq[j].subs(polysubs).has(var) for eq in eqs])])
polyeqs = []
for i in range(len(eqs)):
polyeqs.append([None,None])
for j in range(2):
for i in range(len(eqs)):
poly0 = Poly(eqs[i][j].subs(polysubs),*usedvars[j]).subs(trigsubs)
poly1 = Poly(poly0.expand().subs(trigsubs),*usedvars[j])
if poly1 == S.Zero:
polyeqs[i][j] = poly1
else:
polyeqs[i][j] = self.SimplifyTransformPoly(poly1)
# remove all fractions? having big integers could blow things up...
return polyeqs
def buildRaghavanRothEquations(self,p0,p1,l0,l1,solvejointvars,simplify=True):
trigsubs = []
polysubs = []
polyvars = []
for v in solvejointvars:
polyvars.append(v)
if self.IsHinge(v.name):
var = self.Variable(v)
polysubs += [(cos(v),var.cvar),(sin(v),var.svar)]
polyvars += [var.cvar,var.svar]
trigsubs.append((var.svar**2,1-var.cvar**2))
trigsubs.append((var.svar**3,var.svar*(1-var.cvar**2)))
for v in self.freejointvars:
if self.IsHinge(v.name):
trigsubs.append((sin(v)**2,1-cos(v)**2))
trigsubs.append((sin(v)**3,sin(v)*(1-cos(v)**2)))
polysubsinv = [(b,a) for a,b in polysubs]
polyeqs = []
for i in range(14):
polyeqs.append([None,None])
eqs = []
for i in range(3):
eqs.append([l0[i],l1[i]])
for i in range(3):
eqs.append([p0[i],p1[i]])
l0xp0 = l0.cross(p0)
l1xp1 = l1.cross(p1)
for i in range(3):
eqs.append([l0xp0[i],l1xp1[i]])
eqs.append([p0.dot(p0),p1.dot(p1)])
eqs.append([l0.dot(p0),l1.dot(p1)])
starttime = time.time()
usedvars = []
for j in range(2):
usedvars.append([var for var in polyvars if any([eq[j].subs(polysubs).has(var) for eq in eqs])])
for i in range(len(eqs)):
if not self.CheckEquationForVarying(eqs[i][0]) and not self.CheckEquationForVarying(eqs[i][1]):
for j in range(2):
if polyeqs[i][j] is not None:
continue
poly0 = Poly(eqs[i][j].subs(polysubs),*usedvars[j]).subs(trigsubs)
if 1:#self.codeComplexity(poly0.as_expr()) < 2000:
poly1 = Poly(poly0.expand().subs(trigsubs),*usedvars[j])
if not simplify or poly1 == S.Zero:
polyeqs[i][j] = poly1
else:
polyeqs[i][j] = self.SimplifyTransformPoly(poly1)
else:
polyeqs[i][j] = poly0
#ppl0 = p0.dot(p0)*l0 - 2*l0.dot(p0)*p0
#ppl1 = p1.dot(p1)*l1 - 2*l1.dot(p1)*p1
ppl0 = polyeqs[9][0].as_expr()*l0 - 2*polyeqs[10][0].as_expr()*p0 # p0.dot(p0)*l0 - 2*l0.dot(p0)*p0
ppl1 = polyeqs[9][1].as_expr()*l1 - 2*polyeqs[10][1].as_expr()*p1 # p1.dot(p1)*l1 - 2*l1.dot(p1)*p1
for i in range(3):
eqs.append([ppl0[i],ppl1[i]])
for i in range(11, len(eqs)):
if not self.CheckEquationForVarying(eqs[i][0]) and not self.CheckEquationForVarying(eqs[i][1]):
for j in range(2):
if polyeqs[i][j] is not None:
continue
poly0 = Poly(eqs[i][j].subs(polysubs),*usedvars[j]).subs(trigsubs)
if 1:#self.codeComplexity(poly0.as_expr()) < 2000:
poly1 = Poly(poly0.expand().subs(trigsubs),*usedvars[j])
if not simplify or poly1 == S.Zero:
polyeqs[i][j] = poly1
else:
polyeqs[i][j] = self.SimplifyTransformPoly(poly1)
else:
log.warn('raghavan roth equation (%d,%d) too complex', i, j)
polyeqs[i][j] = poly0
log.info('computed in %fs', time.time()-starttime)
# prune any that have varying symbols
# remove all fractions? having big integers could blow things up...
return [[peq0, peq1] for peq0, peq1 in polyeqs if peq0 is not None and peq1 is not None and not self.CheckEquationForVarying(peq0) and not self.CheckEquationForVarying(peq1)]
def reduceBothSides(self,polyeqs):
"""Reduces a set of equations in 5 unknowns to a set of equations with 3 unknowns by solving for one side with respect to another.
The input is usually the output of buildRaghavanRothEquations.
"""
usedvars = [polyeqs[0][0].gens, polyeqs[0][1].gens]
reducedelayed = []
for j in range(2):
if len(usedvars[j]) <= 4:
leftsideeqs = [polyeq[j] for polyeq in polyeqs if sum(polyeq[j].degree_list()) > 0]
rightsideeqs = [polyeq[1-j] for polyeq in polyeqs if sum(polyeq[j].degree_list()) > 0]
if all([all(d <= 2 for d in eq.degree_list()) for eq in leftsideeqs]):
try:
numsymbolcoeffs, _computereducedequations = self.reduceBothSidesSymbolicallyDelayed(leftsideeqs,rightsideeqs)
reducedelayed.append([j,leftsideeqs,rightsideeqs,__builtin__.sum(numsymbolcoeffs), _computereducedequations])
except self.CannotSolveError:
continue
# sort with respect to least number of symbols
reducedelayed.sort(lambda x,y: x[3]-y[3])
reducedeqs = []
tree = []
for j,leftsideeqs,rightsideeqs,numsymbolcoeffs, _computereducedequations in reducedelayed:
try:
reducedeqs2 = _computereducedequations()
if len(reducedeqs2) == 0:
log.info('forcing matrix inverse (might take some time)')
reducedeqs2,tree = self.reduceBothSidesInverseMatrix(leftsideeqs,rightsideeqs)
if len(reducedeqs2) > 0:
# success, add all the reduced equations
reducedeqs += [[Poly(eq[0],*usedvars[j]),Poly(eq[1],*usedvars[1-j])] for eq in reducedeqs2] + [[Poly(S.Zero,*polyeq[j].gens),polyeq[1-j]-polyeq[j].as_expr()] for polyeq in polyeqs if sum(polyeq[j].degree_list()) == 0]
if len(reducedeqs) > 0:
break;
except self.CannotSolveError,e:
log.warn(e)
continue
if len(reducedeqs) > 0:
# check if any substitutions are needed
# for eq in reducedeqs:
# for j in range(2):
# eq[j] = Poly(eq[j].subs(trigsubs).as_expr().expand(),*eq[j].gens)
polyeqs = reducedeqs
return [eq for eq in polyeqs if eq[0] != S.Zero or eq[1] != S.Zero],tree
def reduceBothSidesInverseMatrix(self,leftsideeqs,rightsideeqs):
"""solve a linear system inside the program since the matrix cannot be reduced so easily
"""
allmonomsleft = set()
for peq in leftsideeqs:
allmonomsleft = allmonomsleft.union(set(peq.monoms()))
allmonomsleft = list(allmonomsleft)
allmonomsleft.sort()
if __builtin__.sum(allmonomsleft[0]) == 0:
allmonomsleft.pop(0)
if len(leftsideeqs) < len(allmonomsleft):
raise self.CannotSolveError('left side has too few equations for the number of variables %d<%d'%(len(leftsideeqs),len(allmonomsleft)))
systemcoeffs = []
for ileft,left in enumerate(leftsideeqs):
coeffs = [S.Zero]*len(allmonomsleft)
rank = 0
for m,c in left.terms():
if __builtin__.sum(m) > 0:
if c != S.Zero:
rank += 1
coeffs[allmonomsleft.index(m)] = c
systemcoeffs.append((rank,ileft,coeffs))
# ideally we want to try all combinations of simple equations first until we arrive to linearly independent ones.
# However, in practice most of the first equations are linearly dependent and it takes a lot of time to prune all of them,
# so start at the most complex
systemcoeffs.sort(lambda x,y: -x[0]+y[0])
# sort left and right in the same way
leftsideeqs = [leftsideeqs[ileft] for rank,ileft,coeffs in systemcoeffs]
rightsideeqs = [rightsideeqs[ileft] for rank,ileft,coeffs in systemcoeffs]
A = zeros((len(allmonomsleft),len(allmonomsleft)))
Asymbols = []
for i in range(A.shape[0]):
Asymbols.append([Symbol('gconst%d_%d'%(i,j)) for j in range(A.shape[1])])
solution = None
for eqindices in combinations(range(len(leftsideeqs)),len(allmonomsleft)):
for i,index in enumerate(eqindices):
for k in range(len(allmonomsleft)):
A[i,k] = systemcoeffs[index][2][k]
nummatrixsymbols = __builtin__.sum([1 for a in A if not a.is_number])
if nummatrixsymbols > 10:
# if too many symbols, evaluate numerically
if not self.IsDeterminantNonZeroByEval(A):
continue
log.info('found non-zero determinant by evaluation')
else:
det = self.det_bareis(A,*self.pvars)
if det == S.Zero:
continue
solution.checkforzeros = [self.removecommonexprs(det,onlygcd=False,onlynumbers=True)]
solution = AST.SolverMatrixInverse(A=A,Asymbols=Asymbols)
self.usinglapack = True
Aadj=A.adjugate() # too big to be useful for now, but can be used to see if any symbols are always 0
break
if solution is None:
raise self.CannotSolveError('failed to find %d linearly independent equations'%len(allmonomsleft))
reducedeqs = []
for i in range(len(allmonomsleft)):
var=S.One
for k,kpower in enumerate(allmonomsleft[i]):
if kpower != 0:
var *= leftsideeqs[0].gens[k]**kpower
pright = S.Zero
for k in range(len(allmonomsleft)):
if Aadj[i,k] != S.Zero:
pright += Asymbols[i][k] * (rightsideeqs[eqindices[k]].as_expr()-leftsideeqs[eqindices[k]].TC())
reducedeqs.append([var,pright.expand()])
othereqindices = set(range(len(leftsideeqs))).difference(set(eqindices))
for i in othereqindices:
# have to multiply just the constant by the determinant
neweq = rightsideeqs[i].as_expr()
for m,c in leftsideeqs[i].terms():
if __builtin__.sum(m) > 0:
neweq -= c*reducedeqs[allmonomsleft.index(m)][1]
else:
neweq -= c
reducedeqs.append([S.Zero,neweq])
return reducedeqs, [solution]
# Adj=M[:,:-1].adjugate()
# #D=M[:,:-1].det()
# D=M[:,:-1].det()
# sols=-Adj*M[:,-1]
# solsubs = []
# for i,v in enumerate(newunknowns):
# newsol=sols[i].subs(localsymbols)
# solsubs.append((v,newsol))
# reducedeqs.append([v.subs(localsymbols)*D,newsol])
# othereqindices = set(range(len(newleftsideeqs))).difference(set(eqindices))
# for i in othereqindices:
# # have to multiply just the constant by the determinant
# newpoly = S.Zero
# for c,m in newleftsideeqs[i].terms():
# monomindices = [index for index in range(len(newunknowns)) if m[index]>0]
# if len(monomindices) == 0:
# newpoly += c.subs(localsymbols)*D
# else:
# assert(len(monomindices)==1)
# newpoly += c.subs(localsymbols)*solsubs[monomindices[0]][1]
# reducedeqs.append([S.Zero,newpoly])
# break
# # there are too many symbols, so have to resolve to a little more involved method
# P,L,DD,U= M[:,:-1].LUdecompositionFF(*self.pvars)
# finalnums = S.One
# finaldenoms = S.One
# for i in range(len(newunknowns)):
# n,d = self.recursiveFraction(L[i,i]*U[i,i]/DD[i,i])
# finalnums *= n
# finaldenoms *= d
# n,d = self.recursiveFraction(DD[i,i])
# q,r = div(n,d,*pvars)
# DD[i,i] = q
# assert(r==S.Zero)
# det,r = div(finalnums,finaldenoms,*pvars)
# assert(r==S.Zero)
# b = -P*M[:,-1]
# y = [[b[0],L[0,0]]]
# for i in range(1,L.shape[0]):
# commondenom=y[0][1]
# for j in range(1,i):
# commondenom=lcm(commondenom,y[j][1],*pvars)
# accum = S.Zero
# for j in range(i):
# accum += L[i,j]*y[j][0]*(commondenom/y[j][1])
# res = (commondenom*b[i]-accum)/(commondenom*L[i,i])
# y.append(self.recursiveFraction(res))
#
# ynew = []
# for i in range(L.shape[0]):
# q,r=div(y[i][0]*DD[i,i],y[i][1],*pvars)
# print 'remainder: ',r
# ynew.append(q)
#
# x = [[ynew[-1],U[-1,-1]]]
# for i in range(U.shape[0]-2,-1,-1):
# commondenom=x[0][1]
# for j in range(i+1,U.shape[0]):
# commondenom=lcm(commondenom,x[j][1],*pvars)
# accum = S.Zero
# for j in range(i+1,U.shape[0]):
# accum += U[i,j]*x[j][0]*(commondenom/x[j][1])
# res = (commondenom*b[i]-accum)/(commondenom*U[i,i])
# x.append(self.recursiveFraction(res))
#
# print 'ignoring num symbols: ',numsymbols
# continue
def reduceBothSidesSymbolically(self,*args,**kwargs):
numsymbolcoeffs, _computereducedequations = self.reduceBothSidesSymbolicallyDelayed(*args,**kwargs)
return _computereducedequations()
def reduceBothSidesSymbolicallyDelayed(self,leftsideeqs,rightsideeqs,maxsymbols=10,usesymbols=True):
"""the left and right side of the equations need to have different variables
"""
assert(len(leftsideeqs)==len(rightsideeqs))
# first count the number of different monomials, then try to solve for each of them
symbolgen = cse_main.numbered_symbols('const')
vargen = cse_main.numbered_symbols('tempvar')
rightsidedummy = []
localsymbols = []
dividesymbols = []
allmonoms = dict()
for left,right in izip(leftsideeqs,rightsideeqs):
if right != S.Zero:
rightsidedummy.append(symbolgen.next())
localsymbols.append((rightsidedummy[-1],right.as_expr().expand()))
else:
rightsidedummy.append(S.Zero)
for m in left.monoms():
if __builtin__.sum(m) > 0 and not m in allmonoms:
newvar = vargen.next()
localsymbols.append((newvar,Poly.from_dict({m:S.One},*left.gens).as_expr()))
allmonoms[m] = newvar
if len(leftsideeqs) < len(allmonoms):
raise self.CannotSolveError('left side has too few equations for the number of variables %d<%d'%(len(leftsideeqs),len(allmonoms)))
if len(allmonoms) == 0:
def _returnequations():
return [[left,right] for left,right in izip(leftsideeqs,rightsideeqs)]
return 0, _returnequations
unknownvars = leftsideeqs[0].gens
newleftsideeqs = []
numsymbolcoeffs = []
for left,right in izip(leftsideeqs,rightsidedummy):
left = left - right
newleft = Poly(S.Zero,*allmonoms.values())
leftcoeffs = [c for m,c in left.terms() if __builtin__.sum(m) > 0]
allnumbers = all([c.is_number for c in leftcoeffs])
if usesymbols and not allnumbers:
# check if all the equations are within a constant from each other
# This is neceesary since the current linear system solver cannot handle too many symbols.
reducedeq0,common0 = self.removecommonexprs(leftcoeffs[0],returncommon=True)
commonmults = [S.One]
for c in leftcoeffs[1:]:
reducedeq1,common1 = self.removecommonexprs(c,returncommon=True)
if self.equal(reducedeq1,reducedeq0):
commonmults.append(common1/common0)
elif self.equal(reducedeq1,-reducedeq0):
commonmults.append(-common1/common0)
else:
break
if len(commonmults) == len(leftcoeffs):
# divide everything by reducedeq0
index = 0
for m,c in left.terms():
if __builtin__.sum(m) > 0:
newleft = newleft + commonmults[index]*allmonoms.get(m)
index += 1
else:
# look in the dividesymbols for something similar
gmult = None
for gsym,geq in dividesymbols:
greducedeq,gcommon = self.removecommonexprs(S.One/geq,returncommon=True)
if self.equal(greducedeq,reducedeq0):
gmult = gsym*(gcommon/common0)
break
elif self.equal(greducedeq,-reducedeq0):
gmult = gsym*(-gcommon/common0)
break
if gmult is None:
gmult = symbolgen.next()
dividesymbols.append((gmult,S.One/leftcoeffs[0]))
newc = (c*gmult).subs(localsymbols).expand()
sym = symbolgen.next()
localsymbols.append((sym,newc))
newleft = newleft + sym
numsymbolcoeffs.append(0)
newleftsideeqs.append(newleft)
continue
numsymbols = 0
for m,c in left.terms():
polyvar = S.One
if __builtin__.sum(m) > 0:
polyvar = allmonoms.get(m)
if not c.is_number:
numsymbols += 1
newleft = newleft + c*polyvar
numsymbolcoeffs.append(numsymbols)
newleftsideeqs.append(newleft)
def _computereducedequations():
reducedeqs = []
# order the equations based on the number of terms
newleftsideeqs.sort(lambda x,y: len(x.monoms()) - len(y.monoms()))
newunknowns = newleftsideeqs[0].gens
log.info('solving for all pairwise variables in %s, number of symbol coeffs are %s',unknownvars,__builtin__.sum(numsymbolcoeffs))
systemcoeffs = []
for eq in newleftsideeqs:
eqdict = eq.as_dict()
coeffs = []
for i,var in enumerate(newunknowns):
monom = [0]*len(newunknowns)
monom[i] = 1
coeffs.append(eqdict.get(tuple(monom),S.Zero))
monom = [0]*len(newunknowns)
coeffs.append(-eqdict.get(tuple(monom),S.Zero))
systemcoeffs.append(coeffs)
detvars = [s for s,v in localsymbols] + self.pvars
for eqindices in combinations(range(len(newleftsideeqs)),len(newunknowns)):
# very quick rejection
numsymbols = __builtin__.sum([numsymbolcoeffs[i] for i in eqindices])
if numsymbols > maxsymbols:
continue
M = Matrix([systemcoeffs[i] for i in eqindices])
det = self.det_bareis(M[:,:-1], *detvars)
if det == S.Zero:
continue
try:
eqused = [newleftsideeqs[i] for i in eqindices]
solution=solve(eqused,newunknowns)
except IndexError:
# not enough equations?
continue
if solution is not None and all([self.isValidSolution(value.subs(localsymbols)) for key,value in solution.iteritems()]):
# substitute
solsubs = []
allvalid = True
for key,value in solution.iteritems():
valuesub = value.subs(localsymbols)
solsubs.append((key,valuesub))
reducedeqs.append([key.subs(localsymbols),valuesub])
othereqindices = set(range(len(newleftsideeqs))).difference(set(eqindices))
for i in othereqindices:
reducedeqs.append([S.Zero,(newleftsideeqs[i].subs(solsubs).subs(localsymbols)).as_expr().expand()])
break
# remove the dividesymbols from reducedeqs
for sym,ivalue in dividesymbols:
value=1/ivalue
for i in range(len(reducedeqs)):
eq = reducedeqs[i][1]
if eq.has(sym):
neweq = S.Zero
peq = Poly(eq,sym)
for m,c in peq.terms():
neweq += c*value**(peq.degree(0) - m[0])
reducedeqs[i][1] = neweq.expand()
reducedeqs[i][0] = (reducedeqs[i][0]*value**peq.degree(0)).expand()
if len(reducedeqs) > 0:
log.info('finished with %d equations',len(reducedeqs))
return reducedeqs
return numsymbolcoeffs, _computereducedequations
def solveManochaCanny(self,rawpolyeqs,solvejointvars,endbranchtree, AllEquationsExtra=None):
"""Solves the IK equations using eigenvalues/eigenvectors of a 12x12 quadratic eigenvalue problem. Method explained in
Dinesh Manocha and J.F. Canny. "Efficient inverse kinematics for general 6R manipulators", IEEE Transactions on Robotics and Automation, Volume 10, Issue 5, Oct 1994.
"""
log.info('attempting manocha/canny general ik method')
PolyEquations, raghavansolutiontree = self.reduceBothSides(rawpolyeqs)
# find all equations with zeros on the left side
RightEquations = []
for ipeq,peq in enumerate(PolyEquations):
if peq[0] == S.Zero:
if len(raghavansolutiontree) > 0 or peq[1] == S.Zero:
# give up on optimization
RightEquations.append(peq[1])
else:
RightEquations.append(self.SimplifyTransformPoly(peq[1]))
if len(RightEquations) < 6:
raise self.CannotSolveError('number of equations %d less than 6'%(len(RightEquations)))
# sort with respect to the number of monomials
RightEquations.sort(lambda x, y: len(x.monoms())-len(y.monoms()))
# substitute with dummy=tan(half angle)
symbols = RightEquations[0].gens
symbolsubs = [(symbols[i].subs(self.invsubs),symbols[i]) for i in range(len(symbols))]
unsolvedsymbols = []
for solvejointvar in solvejointvars:
testvars = self.Variable(solvejointvar).vars
if not any([v in symbols for v in testvars]):
unsolvedsymbols += testvars
# check that the coefficients of the reduced equations do not contain any unsolved variables
for peq in RightEquations:
if peq.has(*unsolvedsymbols):
raise self.CannotSolveError('found unsolved symbol being used so ignoring: %s'%peq)
log.info('solving simultaneously for symbols: %s',symbols)
dummys = []
dummysubs = []
dummysubs2 = []
dummyvars = []
usedvars = []
singlevariables = []
i = 0
while i < len(symbols):
dummy = Symbol('ht%s'%symbols[i].name[1:])
var = symbols[i].subs(self.invsubs)
if not isinstance(var,Symbol):
# [0] - cos, [1] - sin
var = var.args[0]
dummys.append(dummy)
dummysubs += [(symbols[i],(1-dummy**2)/(1+dummy**2)),(symbols[i+1],2*dummy/(1+dummy**2))]
dummysubs2.append((var,2*atan(dummy)))
dummyvars.append((dummy,tan(0.5*var)))
if not var in usedvars:
usedvars.append(var)
i += 2
else:
singlevariables.append(var)
# most likely a single variable
dummys.append(var)
dummysubs += [(var,var)]
dummysubs2.append((var,var))
if not var in usedvars:
usedvars.append(var)
i += 1
newreducedeqs = []
for peq in RightEquations:
maxdenom = dict()
for monoms in peq.monoms():
i = 0
while i < len(monoms):
if peq.gens[i].name[0] == 'j':
# single variable
maxdenom[peq.gens[i]] = max(maxdenom.get(peq.gens[i],0),monoms[i])
i += 1
else:
maxdenom[peq.gens[i]] = max(maxdenom.get(peq.gens[i],0),monoms[i]+monoms[i+1])
i += 2
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
for i in range(len(dummysubs)):
num,denom = fraction(dummysubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
i = 0
while i < len(monoms):
if peq.gens[i].name[0] == 'j':
denom = fraction(dummysubs[i][1])[1]
term *= denom**(maxdenom[peq.gens[i]]-monoms[i])
i += 1
else:
denom = fraction(dummysubs[i][1])[1]
term *= denom**(maxdenom[peq.gens[i]]-monoms[i]-monoms[i+1])
i += 2
eqnew += term
newreducedeqs.append(Poly(eqnew,*dummys))
# check for equations with a single variable
if len(singlevariables) > 0:
try:
AllEquations = [eq.subs(self.invsubs).as_expr() for eq in newreducedeqs]
tree = self.SolveAllEquations(AllEquations,curvars=dummys,othersolvedvars=[],solsubs=self.freevarsubs,endbranchtree=endbranchtree)
return raghavansolutiontree+tree,usedvars
except self.CannotSolveError:
pass
if 0:
# try solving for the single variable and substituting for the rest of the equations in order to get a set of equations without the single variable
var = singlevariables[0]
monomindex = symbols.index(var)
singledegreeeqs = []
AllEquations = []
for peq in newreducedeqs:
if all([m[monomindex] <= 1 for m in peq.monoms()]):
newpeq = Poly(peq,var)
if sum(newpeq.degree_list()) > 0:
singledegreeeqs.append(newpeq)
else:
AllEquations.append(peq.subs(self.invsubs).as_expr())
for peq0, peq1 in combinations(singledegreeeqs,2):
AllEquations.append(simplify((peq0.TC()*peq1.LC() - peq0.LC()*peq1.TC()).subs(self.invsubs)))
log.info(str(AllEquations))
#sol=self.SolvePairVariablesHalfAngle(AllEquations,usedvars[1],usedvars[2],[])
# choose which leftvar can determine the singularity of the following equations!
exportcoeffeqs = None
getsubs = raghavansolutiontree[0].getsubs if len(raghavansolutiontree) > 0 else None
for ileftvar in range(len(dummys)):
leftvar = dummys[ileftvar]
try:
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs,ileftvar,getsubs=getsubs)
break
except self.CannotSolveError,e:
log.warn('failed with leftvar %s: %s',leftvar,e)
if exportcoeffeqs is None:
raise self.CannotSolveError('failed to solve dialytically')
if ileftvar > 0:
raise self.CannotSolveError('solving equations dialytically succeeded with var index %d, unfortunately code generation supports only index 0'%ileftvar)
jointevalcos=[d[1] for d in dummysubs if d[0].name[0] == 'c']
jointevalsin=[d[1] for d in dummysubs if d[0].name[0] == 's']
#jointeval=[d[1] for d in dummysubs if d[0].name[0] == 'j']
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in usedvars],jointeval=[v[1] for v in dummysubs2],jointevalcos=jointevalcos, jointevalsin=jointevalsin, isHinges=[self.IsHinge(v.name) for v in usedvars],exportvar=[v.name for v in dummys],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly12qep',rootmaxdim=16)
self.usinglapack = True
return raghavansolutiontree+[coupledsolution]+endbranchtree,usedvars
def solveLiWoernleHiller(self,rawpolyeqs,solvejointvars,endbranchtree,AllEquationsExtra=[]):
"""Li-Woernle-Hiller procedure covered in
Jorge Angeles, "Fundamentals of Robotics Mechanical Systems", Springer, 2007.
"""
log.info('attempting li/woernle/hiller general ik method')
if len(rawpolyeqs[0][0].gens) < len(rawpolyeqs[0][1].gens):
for peq in rawpolyeqs:
peq[0],peq[1] = peq[1],peq[0]
originalsymbols = list(rawpolyeqs[0][0].gens)
symbolsubs = [(originalsymbols[i].subs(self.invsubs),originalsymbols[i]) for i in range(len(originalsymbols))]
numsymbols = 0
for solvejointvar in solvejointvars:
for var in self.Variable(solvejointvar).vars:
if var in originalsymbols:
numsymbols += 1
break
if numsymbols != 3:
raise self.CannotSolveError('Li/Woernle/Hiller method requires 3 unknown variables, has %d'%numsymbols)
if len(originalsymbols) != 6:
log.warn('symbols %r are not all rotational, is this really necessary?'%originalsymbols)
raise self.CannotSolveError('symbols %r are not all rotational, is this really necessary?'%originalsymbols)
# choose which leftvar can determine the singularity of the following equations!
allowedindices = []
for i in range(len(originalsymbols)):
# if first symbol is cjX, then next should be sjX
if originalsymbols[i].name[0] == 'c':
assert( originalsymbols[i+1].name == 's'+originalsymbols[i].name[1:])
if 8 == __builtin__.sum([int(peq[0].has(originalsymbols[i],originalsymbols[i+1])) for peq in rawpolyeqs]):
allowedindices.append(i)
if len(allowedindices) == 0:
log.warn('could not find any variable where number of equations is exacty 8, trying all possibilities')
for i in range(len(originalsymbols)):
# if first symbol is cjX, then next should be sjX
if originalsymbols[i].name[0] == 'c':
assert( originalsymbols[i+1].name == 's'+originalsymbols[i].name[1:])
allowedindices.append(i)
#raise self.CannotSolveError('need exactly 8 equations of one variable')
for allowedindex in allowedindices:
solutiontree = []
checkforzeros = []
symbols = list(originalsymbols)
cvar = symbols[allowedindex]
svar = symbols[allowedindex+1]
varname = cvar.name[1:]
tvar = Symbol('ht'+varname)
symbols.remove(cvar)
symbols.remove(svar)
symbols.append(tvar)
othersymbols = list(rawpolyeqs[0][1].gens)
othersymbols.append(tvar)
polyeqs = [[peq[0].as_expr(),peq[1]] for peq in rawpolyeqs if peq[0].has(cvar,svar)]
neweqs=[]
unusedindices = set(range(len(polyeqs)))
for i in range(len(polyeqs)):
if not i in unusedindices:
continue
p0 = Poly(polyeqs[i][0],cvar,svar)
p0dict=p0.as_dict()
for j in unusedindices:
if j == i:
continue
p1 = Poly(polyeqs[j][0],cvar,svar)
p1dict=p1.as_dict()
r0 = polyeqs[i][1].as_expr()
r1 = polyeqs[j][1].as_expr()
if self.equal(p0dict.get((1,0),S.Zero),-p1dict.get((0,1),S.Zero)) and self.equal(p0dict.get((0,1),S.Zero),p1dict.get((1,0),S.Zero)):
p0,p1 = p1,p0
p0dict,p1dict=p1dict,p0dict
r0,r1 = r1,r0
if self.equal(p0dict.get((1,0),S.Zero),p1dict.get((0,1),S.Zero)) and self.equal(p0dict.get((0,1),S.Zero),-p1dict.get((1,0),S.Zero)):
# p0+tvar*p1, p1-tvar*p0
# subs: tvar*svar + cvar = 1, svar-tvar*cvar=tvar
neweqs.append([Poly(p0dict.get((1,0),S.Zero) + p0dict.get((0,1),S.Zero)*tvar + p0.TC() + tvar*p1.TC(),*symbols), Poly(r0+tvar*r1,*othersymbols)])
neweqs.append([Poly(p0dict.get((1,0),S.Zero)*tvar - p0dict.get((0,1),S.Zero) - p0.TC()*tvar + p1.TC(),*symbols), Poly(r1-tvar*r0,*othersymbols)])
unusedindices.remove(i)
unusedindices.remove(j)
break
if len(neweqs) >= 8:
break
log.warn('allowedindex %d found %d equations where coefficients of equations match', allowedindex, len(neweqs))
if len(neweqs) < 8:
raise self.CannotSolveError('found %d equations where coefficients of equations match! need at least 8'%len(neweqs))
for ipeq in unusedindices:
p0 = Poly(polyeqs[ipeq][0],cvar,svar)
p1 = polyeqs[ipeq][1]
# need to substitute cvar and svar with tvar
maxdenom = 0
for monoms in p0.monoms():
maxdenom=max(maxdenom,monoms[0]+monoms[1])
eqnew = S.Zero
for monoms,c in p0.terms():
term = c*((1-tvar**2)**monoms[0])*(2*tvar)**monoms[1]*(1+tvar**2)**(maxdenom-monoms[0]-monoms[1])
eqnew += term
neweqs.append([Poly(eqnew,*symbols),Poly(p1.as_expr()*(1+tvar**2)**maxdenom,*othersymbols)])
neweqs.append([Poly(eqnew*tvar,*symbols),Poly(p1.as_expr()*tvar*(1+tvar**2)**maxdenom,*othersymbols)])
for ipeq,peq in enumerate(rawpolyeqs):
if not peq[0].has(cvar,svar):
neweqs.append([Poly(peq[0],*symbols),Poly(peq[1],*othersymbols)])
neweqs.append([Poly(peq[0].as_expr()*tvar,*symbols),Poly(peq[1].as_expr()*tvar,*othersymbols)])
# according to theory, neweqs should have 20 equations, however this isn't always the case
# one side should have only numbers, this makes the following inverse operations trivial
for peq in neweqs:
peq0dict = peq[0].as_dict()
peq[1] = peq[1] - tvar*peq0dict.get((0,0,0,0,1),S.Zero)-peq[0].TC()
peq[0] = peq[0] - tvar*peq0dict.get((0,0,0,0,1),S.Zero)-peq[0].TC()
hasreducedeqs = True
while hasreducedeqs:
hasreducedeqs = False
for ipeq,peq in enumerate(neweqs):
peq0dict = peq[0].as_dict()
if len(peq0dict) == 1:
monomkey = peq0dict.keys()[0]
monomcoeff = peq0dict[monomkey]
monomvalue = peq[1].as_expr()
if sympy_smaller_073:
monomexpr = Monomial(*monomkey).as_expr(*peq[0].gens)
else:
monomexpr = Monomial(monomkey).as_expr(*peq[0].gens)
# for every equation that has this monom, substitute it
for ipeq2, peq2 in enumerate(neweqs):
if ipeq == ipeq2:
continue
for monoms,c in peq2[0].terms():
if monoms == monomkey:
peq2[0] = (peq2[0] - c*monomexpr)*monomcoeff
peq2[1] = peq2[1]*monomcoeff - c*monomvalue
hasreducedeqs = True
break
# see if there's two equations with two similar monomials on the left-hand side
for ipeq,peq in enumerate(neweqs):
peq0monoms = peq[0].monoms()
if peq[0] != S.Zero and len(peq0monoms) == 2:
for ipeq2, peq2 in enumerate(neweqs):
if ipeq2 == ipeq:
continue
if peq0monoms == peq2[0].monoms():
peqdict = peq[0].as_dict()
peq2dict = peq2[0].as_dict()
peqdiff = peq2[0]*peqdict[peq0monoms[0]] - peq[0]*peq2dict[peq0monoms[0]]
if peqdiff != S.Zero:
# there's one monomial left
peqright = (peq2[1]*peqdict[peq0monoms[0]] - peq[1]*peq2dict[peq0monoms[0]])
if peqdiff.LC() != S.Zero:
peqright = peqright * (S.One / peqdiff.LC())
peqdiff = peqdiff * (S.One / peqdiff.LC())
# now solve for the other variable
peq2diff = peq2[0]*peqdict[peq0monoms[1]] - peq[0]*peq2dict[peq0monoms[1]]
peq2right = (peq2[1]*peqdict[peq0monoms[1]] - peq[1]*peq2dict[peq0monoms[1]])
if peq2diff.LC() != S.Zero:
peq2right = peq2right * (S.One / peq2diff.LC())
peq2diff = peq2diff * (S.One / peq2diff.LC())
peq[0] = peqdiff
peq[1] = peqright
peq2[0] = peq2diff
peq2[1] = peq2right
hasreducedeqs = True
break
else:
# overwrite peq2 in case there are others
peq2[0] = peqdiff
peq2[1] = peq2[1]*peqdict[peq0monoms[0]] - peq[1]*peq2dict[peq0monoms[0]]
hasreducedeqs = True
neweqs_full = []
reducedeqs = []
# filled with equations where one variable is singled out
reducedsinglevars = [None,None,None,None]
for peq in neweqs:
coeff, factors = (peq[1]-peq[0]).factor_list()
# check if peq[1] can factor out certain monoms
if len(factors) > 1:
# if either of the factors evaluate to 0, then we are ok
# look for trivial factors that evaluate to 0 or some constant expression and put those into the checkforzeros
eq = S.One
divisoreq = S.One
newzeros = []
for factor, fdegree in factors:
if sum(factor.degree_list()) == 1:
log.info('assuming equation %r is non-zero', factor)
newzeros.append(factor.as_expr())
divisoreq *= factor.as_expr()
else:
eq *= factor.as_expr()
eq = coeff*eq.expand() # have to multiply by the coeff, or otherwise the equation will be weighted different and will be difficult to determine epsilons
if peq[0] != S.Zero:
peq0norm, r = div(peq[0], divisoreq)
assert(r==S.Zero)
peq1norm, r = div(peq[1], divisoreq)
assert(r==S.Zero)
peq0norm = Poly(peq0norm, *peq[0].gens)
peq1norm = Poly(peq1norm, *peq[1].gens)
peq0dict = peq0norm.as_dict()
monom, value = peq0dict.items()[0]
if len(peq0dict) == 1 and __builtin__.sum(monom) == 1:
indices = [index for index in range(4) if monom[index] == 1]
if len(indices) > 0 and indices[0] < 4:
reducedsinglevars[indices[0]] = (value, peq1norm.as_expr())
isunique = True
for test0, test1 in neweqs_full:
if (self.equal(test0,peq0norm) or self.equal(test0,-peq0norm)) and (self.equal(test1,peq1norm) or self.equal(test1,-peq1norm)):
isunique = False
break
if isunique:
neweqs_full.append((peq0norm, peq1norm))
else:
eq = eq.subs(self.freevarsubs)
if self.CheckExpressionUnique(reducedeqs, eq):
reducedeqs.append(eq)
assert(len(reducedeqs)!=4)
else:
if peq[0] != S.Zero:
peq0dict = peq[0].as_dict()
monom, value = peq0dict.items()[0]
if len(peq0dict) == 1 and __builtin__.sum(monom) == 1:
indices = [index for index in range(4) if monom[index] == 1]
if len(indices) > 0 and indices[0] < 4:
reducedsinglevars[indices[0]] = (value,peq[1].as_expr())
neweqs_full.append(peq)
else:
eq = peq[1].as_expr().subs(self.freevarsubs)
if self.CheckExpressionUnique(reducedeqs, eq):
reducedeqs.append(eq)
for ivar in range(2):
if reducedsinglevars[2*ivar+0] is not None and reducedsinglevars[2*ivar+1] is not None:
# a0*cos = b0, a1*sin = b1
a0,b0 = reducedsinglevars[2*ivar+0]
a1,b1 = reducedsinglevars[2*ivar+1]
reducedeqs.append((b0*a1)**2 + (a0*b1)**2 - (a0*a1)**2)
haszeroequations = len(reducedeqs)>0
allmonoms = set()
for peq in neweqs_full:
allmonoms = allmonoms.union(set(peq[0].monoms()))
allmonoms = list(allmonoms)
allmonoms.sort()
if len(allmonoms) > len(neweqs_full) and len(reducedeqs) < 4:
raise self.CannotSolveError('new monoms is %d>%d'%(len(allmonoms), len(neweqs_full)))
# the equations are ginac objects
getsubs = None
dictequations = []
preprocesssolutiontree = []
localsymbolmap = {}
AUinv = None
if len(allmonoms) < len(neweqs_full):
# order with respect to complexity of [0], this is to make the inverse of A faster
complexity = [(self.codeComplexity(peq[0].as_expr()),peq) for peq in neweqs_full]
complexity.sort(key=itemgetter(0))
neweqs_full = [peq for c,peq in complexity]
A = zeros((len(neweqs_full),len(allmonoms)))
B = zeros((len(neweqs_full),1))
for ipeq,peq in enumerate(neweqs_full):
for m,c in peq[0].terms():
A[ipeq,allmonoms.index(m)] = c.subs(self.freevarsubs)
B[ipeq] = peq[1].as_expr().subs(self.freevarsubs)
AU = zeros((len(allmonoms),len(allmonoms)))
AL = zeros((A.shape[0]-len(allmonoms),len(allmonoms)))
BU = zeros((len(allmonoms),1))
BL = zeros((A.shape[0]-len(allmonoms),1))
AUadjugate = None
AU = A[:A.shape[1],:]
nummatrixsymbols = __builtin__.sum([1 for a in AU if not a.is_number])
# the 150 threshold is a guess
if nummatrixsymbols > 150:
log.info('found a non-singular matrix with %d symbols, but most likely there is a better one', nummatrixsymbols)
raise self.CannotSolveError('matrix has too many symbols (%d), giving up since most likely will freeze'%nummatrixsymbols)
log.info('matrix has %d symbols', nummatrixsymbols)
if nummatrixsymbols > 10:
# if matrix symbols are great, yield so that other combinations can be tested?
pass
AUdetmat = None
if self.IsDeterminantNonZeroByEval(AU):
rows = range(A.shape[1])
AUdetmat = AU
elif not self.IsDeterminantNonZeroByEval(A*A.transpose()):
raise self.CannotSolveError('coefficient matrix is singular')
else:
# prune the dependent vectors
AU = A[0:1,:]
rows = [0]
for i in range(1,A.shape[0]):
AU2 = AU.col_join(A[i:(i+1),:])
if AU2.shape[0] == AU2.shape[1]:
AUdetmat = AU2
else:
AUdetmat = AU2*AU2.transpose()
if not self.IsDeterminantNonZeroByEval(AUdetmat):
continue
AU = AU2
rows.append(i)
if AU.shape[0] == AU.shape[1]:
break
if AU.shape[0] != AU.shape[1]:
raise self.CannotSolveError('could not find non-singular matrix')
otherrows = range(A.shape[0])
for i,row in enumerate(rows):
BU[i] = B[row]
otherrows.remove(row)
for i,row in enumerate(otherrows):
BL[i] = B[row]
AL[i,:] = A[row,:]
if self.has(A,*self.freevars):
AUinv = AU.inv()
AUdet = AUdetmat.det()
log.info('AU has symbols, so working with inverse might take some time')
AUdet = self.trigsimp(AUdet.subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
# find the adjugate by simplifying from the inverse
AUadjugate = zeros(AUinv.shape)
sinsubs = []
for freevar in self.freejointvars:
var=self.Variable(freevar)
for ideg in range(2,40):
if ideg % 2:
sinsubs.append((var.cvar**ideg,var.cvar*(1-var.svar**2)**int((ideg-1)/2)))
else:
sinsubs.append((var.cvar**ideg,(1-var.svar**2)**(ideg/2)))
for i in range(AUinv.shape[0]):
log.info('replacing row %d', i)
for j in range(AUinv.shape[1]):
numerator,denominator = self.recursiveFraction(AUinv[i,j])
numerator = self.trigsimp(numerator.subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
numerator, common = self.removecommonexprs(numerator,onlygcd=True,returncommon=True)
denominator = self.trigsimp((denominator/common).subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
try:
q,r=div(numerator*AUdet,denominator,self.freevars)
except PolynomialError, e:
# 1/(-9000000*cj16 - 9000000) contains an element of the generators set
raise self.CannotSolveError('cannot divide for matrix inversion: %s'%e)
if r != S.Zero:
# sines and cosines can mix things up a lot, so converto to half-tan
numerator2, numerator2d, htvarsubsinv = self.ConvertSinCosEquationToHalfTan((AUdet*numerator).subs(sinsubs).expand().subs(sinsubs).expand().subs(sinsubs).expand(), self.freejointvars)
denominator2, denominator2d, htvarsubsinv = self.ConvertSinCosEquationToHalfTan(denominator.subs(sinsubs).expand().subs(sinsubs).expand().subs(sinsubs).expand(), self.freejointvars)
extranumerator, extradenominator = fraction(numerator2d/denominator2d)
htvars = [v for v,eq in htvarsubsinv]
q,r=div((numerator2*extradenominator).expand(),(denominator2).expand(),*htvars)
if r != S.Zero:
log.warn('cannot get rid of denominator for element (%d, %d) in (%s/%s)',i, j, numerator2,denominator2)
#raise self.CannotSolveError('cannot get rid of denominator')
# convert back to cos/sin in order to get rid of the denominator term?
sym = self.gsymbolgen.next()
dictequations.append((sym, q / extranumerator))
q = sym
#div(q.subs(htvarsubsinv).expand(), extranumerator.subs(htvarsubsinv).expand(), *self.freevars)
#newsubs=[(Symbol('htj4'), sin(self.freejointvars[0])/(1+cos(self.freejointvars[0])))]
#div(q.extranumerator
AUadjugate[i,j] = self.trigsimp(q.subs(self.freevarsubsinv),self.freejointvars).subs(self.freevarsubs)
checkforzeros.append(self.removecommonexprs(AUdet,onlygcd=False,onlynumbers=True))
# reason we're multiplying by adjugate instead of inverse is to get rid of the potential divides by (free) parameters
BUresult = AUadjugate*BU
C = AL*BUresult-BL*AUdet
for c in C:
reducedeqs.append(c)
else:
if nummatrixsymbols > 40:
Asymbols = []
for i in range(AU.shape[0]):
Asymbols.append([Symbol('gclwh%d_%d'%(i,j)) for j in range(AU.shape[1])])
matrixsolution = AST.SolverMatrixInverse(A=AU,Asymbols=Asymbols)
getsubs = matrixsolution.getsubs
preprocesssolutiontree.append(matrixsolution)
self.usinglapack = True
# evaluate the inverse at various solutions and see which entries are always zero
isnotzero = zeros((AU.shape[0],AU.shape[1]))
epsilon = 1e-15
epsilondet = 1e-30
for itest,subs in enumerate(self.testconsistentvalues):
AUvalue = AU.subs(subs)
AUdetvalue = AUvalue.evalf().det().evalf()
if abs(AUdetvalue) > epsilondet:# != S.Zero:
AUinvvalue = AUvalue.evalf().inv()
for i in range(AUinvvalue.shape[0]):
for j in range(AUinvvalue.shape[1]):
# since making numerical approximations, need a good value for zero
if abs(AUinvvalue[i,j]) > epsilon:#!= S.Zero:
isnotzero[i,j] = 1
AUinv = zeros((AU.shape[0],AU.shape[1]))
for i in range(AUinvvalue.shape[0]):
for j in range(AUinvvalue.shape[1]):
if isnotzero[i,j] == 0:
Asymbols[i][j] = None
else:
AUinv[i,j] = Asymbols[i][j]
BUresult = AUinv*BU
C = AL*BUresult-BL
for c in C:
reducedeqs.append(c)
elif 0:#nummatrixsymbols > 60:
# requires swiginac
getsubs = lambda valuesubs: self.SubstituteGinacEquations(dictequations, valuesubs, localsymbolmap)
# cannot compute inverse since too many symbols
log.info('lu decomposition')
# PA = L DD**-1 U
P, L, DD, U = self.LUdecompositionFF(AU,*self.pvars)
log.info('lower triangular solve')
res0 = L.lower_triangular_solve(P*BU)
# have to use ginac, since sympy is too slow
# there are divides in res0, so have to simplify
gres1 = swiginac.symbolic_matrix(len(res0),1,'gres1')
for i in range(len(res0)):
gres0i = GinacUtils.ConvertToGinac(res0[i],localsymbolmap)
gDDi = GinacUtils.ConvertToGinac(DD[i,i],localsymbolmap)
gres1[i,0] = gres0i*gDDi
gothersymbols = [localsymbolmap[s.name] for s in othersymbols]
res2 = []
gres2 = swiginac.symbolic_matrix(len(res0),1,'gres2')
for icol in range(len(gres1)):
log.info('extracting poly monoms from L solving: %d', icol)
polyterms = GinacUtils.GetPolyTermsFromGinac(gres1[icol],gothersymbols,othersymbols)
# create a new symbol for every term
eq = S.Zero
for monom, coeff in polyterms.iteritems():
sym = self.gsymbolgen.next()
dictequations.append((sym,coeff))
localsymbolmap[sym.name] = swiginac.symbol(sym.name)
if sympy_smaller_073:
eq += sym*Monomial(*monom).as_expr(*othersymbols)
else:
eq += sym*Monomial(monom).as_expr(*othersymbols)
res2.append(eq)
gres2[icol] = GinacUtils.ConvertToGinac(eq,localsymbolmap)
gU = GinacUtils.ConvertMatrixToGinac(U,'U',localsymbolmap)
log.info('upper triangular solve')
gres3 = GinacUtils.SolveUpperTriangular(gU, gres2, 'gres3')
res3 = []
for icol in range(len(gres3)):
log.info('extracting poly monoms from U solving: %d', icol)
polyterms = GinacUtils.GetPolyTermsFromGinac(gres3[icol],gothersymbols,othersymbols)
# create a new symbol for every term
eq = S.Zero
for monom, coeff in polyterms.iteritems():
sym = self.gsymbolgen.next()
dictequations.append((sym,coeff))
localsymbolmap[sym.name] = swiginac.symbol(sym.name)
if sympy_smaller_073:
eq += sym*Monomial(*monom).as_expr(*othersymbols)
else:
eq += sym*Monomial(monom).as_expr(*othersymbols)
res3.append(eq)
BUresult = Matrix(gres3.rows(),gres3.cols(),res3)
C = AL*BUresult-BL
for c in C:
reducedeqs.append(c)
else:
AUinv = AU.inv()
BUresult = AUinv*BU
C = AL*BUresult-BL
for c in C:
reducedeqs.append(c)
log.info('computed non-singular AU matrix')
if len(reducedeqs) == 0:
raise self.CannotSolveError('reduced equations are zero')
# is now a (len(neweqs)-len(allmonoms))x1 matrix, usually this is 4x1
htvars = []
htvarsubs = []
htvarsubs2 = []
usedvars = []
htvarcossinoffsets = []
nonhtvars = []
for iothersymbol, othersymbol in enumerate(othersymbols):
if othersymbol.name[0] == 'c':
assert(othersymbols[iothersymbol+1].name[0] == 's')
htvarcossinoffsets.append(iothersymbol)
name = othersymbol.name[1:]
htvar = Symbol('ht%s'%name)
htvarsubs += [(othersymbol,(1-htvar**2)/(1+htvar**2)),(othersymbols[iothersymbol+1],2*htvar/(1+htvar**2))]
htvars.append(htvar)
htvarsubs2.append((Symbol(name),2*atan(htvar)))
usedvars.append(Symbol(name))
elif othersymbol.name[0] != 'h' and othersymbol.name[0] != 's':
# not half-tan, sin, or cos
nonhtvars.append(othersymbol)
usedvars.append(othersymbol)
htvarsubs += [(cvar,(1-tvar**2)/(1+tvar**2)),(svar,2*tvar/(1+tvar**2))]
htvars.append(tvar)
htvarsubs2.append((Symbol(varname),2*atan(tvar)))
usedvars.append(Symbol(varname))
if haszeroequations:
log.info('special structure in equations detected, try to solve through elimination')
AllEquations = [eq.subs(self.invsubs) for eq in reducedeqs if self.codeComplexity(eq) < 2000]
for curvar in usedvars[:-1]:
try:
unknownvars = usedvars[:]
unknownvars.remove(curvar)
jointtrees2=[]
curvarsubs=self.Variable(curvar).subs
treefirst = self.SolveAllEquations(AllEquations,curvars=[curvar],othersolvedvars=self.freejointvars,solsubs=self.freevarsubs[:],endbranchtree=[AST.SolverSequence([jointtrees2])],unknownvars=unknownvars+[tvar])
# solvable, which means we now have len(AllEquations)-1 with two variables, solve with half angles
halfanglesolution=self.SolvePairVariablesHalfAngle(raweqns=[eq.subs(curvarsubs) for eq in AllEquations],var0=unknownvars[0],var1=unknownvars[1],othersolvedvars=self.freejointvars+[curvar])[0]
# sometimes halfanglesolution can evaluate to all zeros (katana arm), need to catch this and go to a different branch
halfanglesolution.AddHalfTanValue = True
jointtrees2.append(halfanglesolution)
halfanglevar = unknownvars[0] if halfanglesolution.jointname==unknownvars[0].name else unknownvars[1]
unknownvars.remove(halfanglevar)
try:
# give that two variables are solved, can most likely solve the rest. Solving with the original
# equations yields simpler solutions since reducedeqs hold half-tangents
curvars = solvejointvars[:]
curvars.remove(curvar)
curvars.remove(halfanglevar)
subsinv = []
for v in solvejointvars:
subsinv += self.Variable(v).subsinv
AllEquationsOrig = [(peq[0].as_expr()-peq[1].as_expr()).subs(subsinv) for peq in rawpolyeqs]
self.sortComplexity(AllEquationsOrig)
jointtrees2 += self.SolveAllEquations(AllEquationsOrig,curvars=curvars,othersolvedvars=self.freejointvars+[curvar,halfanglevar],solsubs=self.freevarsubs+curvarsubs+self.Variable(halfanglevar).subs,endbranchtree=endbranchtree)
return preprocesssolutiontree+solutiontree+treefirst,solvejointvars
except self.CannotSolveError,e:
# try another strategy
log.debug(e)
# solve all the unknowns now
jointtrees3=[]
treesecond = self.SolveAllEquations(AllEquations,curvars=unknownvars,othersolvedvars=self.freejointvars+[curvar,halfanglevar],solsubs=self.freevarsubs+curvarsubs+self.Variable(halfanglevar).subs,endbranchtree=[AST.SolverSequence([jointtrees3])])
for t in treesecond:
# most likely t is a solution...
t.AddHalfTanValue = True
if isinstance(t,AST.SolverCheckZeros):
for t2 in t.zerobranch:
t2.AddHalfTanValue = True
for t2 in t.nonzerobranch:
t2.AddHalfTanValue = True
if len(t.zerobranch) == 0 or isinstance(t.zerobranch[0],AST.SolverBreak):
log.info('detected zerobranch with SolverBreak, trying to fix')
jointtrees2 += treesecond
# using these solutions, can evaluate all monoms and check for consistency, this step is crucial since
# AllEquations might not constrain all degrees of freedom (check out katana)
indices = []
for i in range(4):
monom = [0]*len(symbols)
monom[i] = 1
indices.append(allmonoms.index(tuple(monom)))
if AUinv is not None:
X = AUinv*BU
for i in [0,2]:
jointname=symbols[i].name[1:]
try:
# atan2(0,0) produces an invalid solution
jointtrees3.append(AST.SolverSolution(jointname,jointeval=[atan2(X[indices[i+1]],X[indices[i]])],isHinge=self.IsHinge(jointname)))
usedvars.append(Symbol(jointname))
except Exception, e:
log.warn(e)
jointcheckeqs = []
for i,monom in enumerate(allmonoms):
if not i in indices:
eq = S.One
for isymbol,ipower in enumerate(monom):
eq *= symbols[isymbol]**ipower
jointcheckeqs.append(eq-X[i])
# threshold can be a little more loose since just a sanity check
jointtrees3.append(AST.SolverCheckZeros('sanitycheck',jointcheckeqs,zerobranch=endbranchtree,nonzerobranch=[AST.SolverBreak('sanitycheck for solveLiWoernleHiller')],anycondition=False,thresh=0.001))
return preprocesssolutiontree+solutiontree+treefirst,usedvars
else:
log.warn('AUinv not initialized, perhaps missing important equations')
except self.CannotSolveError,e:
log.info(e)
try:
log.info('try to solve first two variables pairwise')
#solution = self.SolvePairVariables(AllEquations,usedvars[0],usedvars[1],self.freejointvars,maxcomplexity=50)
jointtrees=[]
raweqns=[eq for eq in AllEquations if not eq.has(tvar)]
if len(raweqns) > 1:
halfanglesolution = self.SolvePairVariablesHalfAngle(raweqns=raweqns,var0=usedvars[0],var1=usedvars[1],othersolvedvars=self.freejointvars)[0]
halfanglevar = usedvars[0] if halfanglesolution.jointname==usedvars[0].name else usedvars[1]
unknownvar = usedvars[1] if halfanglesolution.jointname==usedvars[0].name else usedvars[0]
nexttree = self.SolveAllEquations(raweqns,curvars=[unknownvar],othersolvedvars=self.freejointvars+[halfanglevar],solsubs=self.freevarsubs+self.Variable(halfanglevar).subs,endbranchtree=[AST.SolverSequence([jointtrees])])
#finalsolution = self.solveSingleVariable(AllEquations,usedvars[2],othersolvedvars=self.freejointvars+usedvars[0:2],maxsolutions=4,maxdegree=4)
try:
finaltree = self.SolveAllEquations(AllEquations,curvars=usedvars[2:],othersolvedvars=self.freejointvars+usedvars[0:2],solsubs=self.freevarsubs+self.Variable(usedvars[0]).subs+self.Variable(usedvars[1]).subs,endbranchtree=endbranchtree)
jointtrees += finaltree
return preprocesssolutiontree+[halfanglesolution]+nexttree,usedvars
except self.CannotSolveError,e:
log.debug('failed to solve for final variable %s, so returning just two: %s'%(usedvars[2],str(usedvars[0:2])))
jointtrees += endbranchtree
# sometimes the last variable cannot be solved, so returned the already solved variables and let the higher function take care of it
return preprocesssolutiontree+[halfanglesolution]+nexttree,usedvars[0:2]
except self.CannotSolveError,e:
log.debug(u'failed solving first two variables pairwise: %s', e)
log.info('reducing equations')
newreducedeqs = []
hassinglevariable = False
for eq in reducedeqs:
complexity = self.codeComplexity(eq)
if complexity > 4000:
raise self.CannotSolveError('equation way too complex (%d), looking for another solution'%complexity)
if complexity > 4000:
log.info('equation way too complex (%d), so try breaking it down', complexity)
# don't support this yet...
eq2 = eq.expand()
assert(eq2.is_Add)
log.info('equation has %d additions', len(eq2.args))
indices = list(range(0, len(eq2.args), 100))
indices[-1] = len(eq2.args)
testpolyeqs = []
startvalue = 0
for nextvalue in indices[1:]:
log.info('computing up to %d', nextvalue)
testadd = S.Zero
for i in range(startvalue,nextvalue):
testadd += eq2.args[i]
testpolyeqs.append(Poly(testadd,*othersymbols))
startvalue = nextvalue
# convert each poly's coefficients to symbols
peq = Poly(S.Zero, *othersymbols)
for itest, testpolyeq in enumerate(testpolyeqs):
log.info('adding equation %d', itest)
newpeq = Poly(S.Zero, *othersymbols)
for monom, coeff in newpeq.iteritems():
sym = self.gsymbolgen.next()
dictequations.append((sym,coeff))
if sympy_smaller_073:
newpeq += sym*Monomial(*monom).as_expr(*othersymbols)
else:
newpeq += sym*Monomial(monom).as_expr(*othersymbols)
peq += newpeq
else:
peq = Poly(eq,*othersymbols)
maxdenom = [0]*len(htvarcossinoffsets)
for monoms in peq.monoms():
for i,ioffset in enumerate(htvarcossinoffsets):
maxdenom[i] = max(maxdenom[i],monoms[ioffset]+monoms[ioffset+1])
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
for i,ioffset in enumerate(htvarcossinoffsets):
# for cos
num, denom = fraction(htvarsubs[2*i][1])
term *= num**monoms[ioffset]
# for sin
num, denom = fraction(htvarsubs[2*i+1][1])
term *= num**monoms[ioffset+1]
# the denoms for sin/cos of the same joint variable are the same
for i,ioffset in enumerate(htvarcossinoffsets):
denom = fraction(htvarsubs[2*i][1])[1]
term *= denom**(maxdenom[i]-monoms[ioffset]-monoms[ioffset+1])
# multiply the rest of the monoms
for imonom, monom in enumerate(monoms):
if not imonom in htvarcossinoffsets and not imonom-1 in htvarcossinoffsets:
# handle non-sin/cos variables yet
term *= othersymbols[imonom]**monom
eqnew += term
newpeq = Poly(eqnew,htvars+nonhtvars)
newreducedeqs.append(newpeq)
hassinglevariable |= any([all([__builtin__.sum(monom)==monom[i] for monom in newpeq.monoms()]) for i in range(3)])
if hassinglevariable:
log.info('hassinglevariable, trying with raw equations')
AllEquations = []
for eq in reducedeqs:
peq = Poly(eq,tvar)
if sum(peq.degree_list()) == 0:
AllEquations.append(peq.TC().subs(self.invsubs).expand())
elif sum(peq.degree_list()) == 1 and peq.TC() == S.Zero:
AllEquations.append(peq.LC().subs(self.invsubs).expand())
else:
# two substitutions: sin/(1+cos), (1-cos)/sin
neweq0 = S.Zero
neweq1 = S.Zero
for monoms,c in peq.terms():
neweq0 += c*(svar**monoms[0])*((1+cvar)**(peq.degree(0)-monoms[0]))
neweq1 += c*((1-cvar)**monoms[0])*(svar**(peq.degree(0)-monoms[0]))
AllEquations.append(neweq0.subs(self.invsubs).expand())
AllEquations.append(neweq1.subs(self.invsubs).expand())
unusedvars = [solvejointvar for solvejointvar in solvejointvars if not solvejointvar in usedvars]
for eq in AllEquationsExtra:
if eq.has(*usedvars) and not eq.has(*unusedvars):
AllEquations.append(eq)
self.sortComplexity(AllEquations)
# first try to solve all the variables at once
try:
solutiontree = self.SolveAllEquations(AllEquations,curvars=usedvars,othersolvedvars=self.freejointvars[:],solsubs=self.freevarsubs[:], unknownvars=unusedvars, endbranchtree=endbranchtree)
return solutiontree, usedvars
except self.CannotSolveError, e:
log.debug(u'failed solving all variables: %s', e)
for ivar in range(3):
try:
unknownvars = usedvars[:]
unknownvars.pop(ivar)
endbranchtree2 = []
if 1:
solutiontree = self.SolveAllEquations(AllEquations,curvars=[usedvars[ivar]],othersolvedvars=self.freejointvars[:],solsubs=self.freevarsubs[:],endbranchtree=[AST.SolverSequence([endbranchtree2])],unknownvars=unknownvars+unusedvars)
endbranchtree2 += self.SolveAllEquations(AllEquations,curvars=unknownvars[0:2],othersolvedvars=self.freejointvars[:]+[usedvars[ivar]],solsubs=self.freevarsubs[:]+self.Variable(usedvars[ivar]).subs, unknownvars=unusedvars, endbranchtree=endbranchtree)
return preprocesssolutiontree+solutiontree, usedvars#+unusedvars#[unknownvars[1], usedvars[ivar]]#
except self.CannotSolveError, e:
log.debug(u'single variable %s failed: %s', usedvars[ivar], e)
# try:
# testvars = [Symbol(othersymbols[0].name[1:]),Symbol(othersymbols[2].name[1:]),Symbol(varname)]
# AllEquations = [(peq[0].as_expr()-peq[1].as_expr()).expand() for peq in polyeqs if not peq[0].has(*symbols)]
# coupledsolutions = self.SolveAllEquations(AllEquations,curvars=testvars,othersolvedvars=self.freejointvars[:],solsubs=self.freevarsubs[:],endbranchtree=endbranchtree)
# return coupledsolutions,testvars
# except self.CannotSolveError:
# pass
#
exportcoeffeqs = None
# only support ileftvar=0 for now
for ileftvar in [0]:#range(len(htvars)):
# always take the equations 4 at a time
for dialyticeqs in combinations(newreducedeqs,4):
try:
exportcoeffeqs,exportmonoms = self.solveDialytically(dialyticeqs,ileftvar,getsubs=getsubs)
break
except self.CannotSolveError,e:
log.warn('failed with leftvar %s: %s',newreducedeqs[0].gens[ileftvar],e)
if exportcoeffeqs is not None:
break
if exportcoeffeqs is None:
if len(nonhtvars) > 0:
log.info('try to solve one variable in terms of the others')
usedvar0solution = solve(newreducedeqs[0],nonhtvars[0])[0]
num,denom = fraction(usedvar0solution)
igenoffset = len(htvars)
# substitute all instances of the variable
processedequations = []
for peq in newreducedeqs[1:]:
maxdegree = peq.degree(igenoffset)
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
term *= denom**(maxdegree-monoms[igenoffset])
term *= num**(monoms[igenoffset])
for imonom, monom in enumerate(monoms):
if imonom != igenoffset:
term *= htvars[imonom]**monom
eqnew += term
try:
newpeq = Poly(eqnew,htvars)
except PolynomialError, e:
# most likel uservar0solution was bad
raise self.CannotSolveError('equation %s cannot be represented as a polynomial'%eqnew)
if newpeq != S.Zero:
processedequations.append(newpeq)
# check if any variables have degree <= 1 for all equations
for ihtvar,htvar in enumerate(htvars):
leftoverhtvars = list(htvars)
leftoverhtvars.pop(ihtvar)
freeequations = []
linearequations = []
higherequations = []
for peq in processedequations:
if peq.degree(ihtvar) == 0:
freeequations.append(peq)
elif peq.degree(ihtvar) == 1:
linearequations.append(peq)
else:
higherequations.append(peq)
if len(freeequations) > 0:
log.info('found a way to solve this! still need to implement it though...')
elif len(linearequations) > 0 and len(leftoverhtvars) == 1:
# try substituting one into the other equations Ax = B
A = S.Zero
B = S.Zero
for monoms,c in linearequations[0].terms():
term = c
for imonom, monom in enumerate(monoms):
if imonom != ihtvar:
term *= htvars[imonom]**monom
if monoms[ihtvar] > 0:
A += term
else:
B -= term
Apoly = Poly(A,leftoverhtvars)
Bpoly = Poly(B,leftoverhtvars)
singlepolyequations = []
useequations = linearequations[1:]
if len(useequations) == 0:
useequations += higherequations
for peq in useequations:
peqnew = Poly(S.Zero,leftoverhtvars)
maxhtvardegree = peq.degree(ihtvar)
for monoms,c in peq.terms():
term = c
for imonom, monom in enumerate(monoms):
if imonom != ihtvar:
term *= htvars[imonom]**monom
termpoly = Poly(term,leftoverhtvars)
peqnew += termpoly * (Bpoly**(monoms[ihtvar]) * Apoly**(maxhtvardegree-monoms[ihtvar]))
singlepolyequations.append(peqnew)
if len(singlepolyequations) > 0:
jointsol = 2*atan(leftoverhtvars[0])
jointname = leftoverhtvars[0].name[2:]
firstsolution = AST.SolverPolynomialRoots(jointname=jointname,poly=singlepolyequations[0],jointeval=[jointsol],isHinge=self.IsHinge(jointname))
firstsolution.checkforzeros = []
firstsolution.postcheckforzeros = []
firstsolution.postcheckfornonzeros = []
firstsolution.postcheckforrange = []
# in Ax=B, if A is 0 and B is non-zero, then equation is invalid
# however if both A and B evaluate to 0, then equation is still valid
# therefore equation is invalid only if A==0&&B!=0
firstsolution.postcheckforNumDenom = [(A.as_expr(), B.as_expr())]
firstsolution.AddHalfTanValue = True
# actually both A and B can evaluate to zero, in which case we have to use a different method to solve them
AllEquations = []
for eq in reducedeqs:
if self.codeComplexity(eq) > 500:
continue
peq = Poly(eq, tvar)
if sum(peq.degree_list()) == 0:
AllEquations.append(peq.TC().subs(self.invsubs).expand())
elif sum(peq.degree_list()) == 1 and peq.TC() == S.Zero:
AllEquations.append(peq.LC().subs(self.invsubs).expand())
else:
# two substitutions: sin/(1+cos), (1-cos)/sin
neweq0 = S.Zero
neweq1 = S.Zero
for monoms,c in peq.terms():
neweq0 += c*(svar**monoms[0])*((1+cvar)**(peq.degree(0)-monoms[0]))
neweq1 += c*((1-cvar)**monoms[0])*(svar**(peq.degree(0)-monoms[0]))
if self.codeComplexity(neweq0) > 1000 or self.codeComplexity(neweq1) > 1000:
break
AllEquations.append(neweq0.subs(self.invsubs).expand())
AllEquations.append(neweq1.subs(self.invsubs).expand())
#oldmaxcasedepth = self.maxcasedepth
try:
#self.maxcasedepth = min(self.maxcasedepth, 2)
solvevar = Symbol(jointname)
curvars = list(usedvars)
curvars.remove(solvevar)
unusedvars = [solvejointvar for solvejointvar in solvejointvars if not solvejointvar in usedvars]
solutiontree = self.SolveAllEquations(AllEquations+AllEquationsExtra,curvars=curvars+unusedvars,othersolvedvars=self.freejointvars[:]+[solvevar],solsubs=self.freevarsubs[:]+self.Variable(solvevar).subs,endbranchtree=endbranchtree)
#secondSolutionComplexity = self.codeComplexity(B) + self.codeComplexity(A)
#if secondSolutionComplexity > 500:
# log.info('solution for %s is too complex, so delaying its solving')
#solutiontree = self.SolveAllEquations(AllEquations,curvars=curvars,othersolvedvars=self.freejointvars[:]+[solvevar],solsubs=self.freevarsubs[:]+self.Variable(solvevar).subs,endbranchtree=endbranchtree)
return preprocesssolutiontree+[firstsolution]+solutiontree,usedvars+unusedvars
except self.CannotSolveError, e:
log.debug('could not solve full variables from scratch, so use existing solution: %s', e)
secondsolution = AST.SolverSolution(htvar.name[2:], isHinge=self.IsHinge(htvar.name[2:]))
secondsolution.jointeval = [2*atan2(B.as_expr(), A.as_expr())]
secondsolution.AddHalfTanValue = True
thirdsolution = AST.SolverSolution(nonhtvars[0].name, isHinge=self.IsHinge(nonhtvars[0].name))
thirdsolution.jointeval = [usedvar0solution]
return preprocesssolutiontree+[firstsolution, secondsolution, thirdsolution]+endbranchtree, usedvars
# finally:
# self.maxcasedepth = oldmaxcasedepth
# try to factor the equations manually
if newreducedeqs[0].degree(2) == 1:
# try to solve one variable in terms of the others
usedvar0solution = solve(newreducedeqs[0],htvars[2])[0]
num,denom = fraction(usedvar0solution)
igenoffset = 2
# substitute all instances of the variable
processedequations = []
for peq in newreducedeqs[1:]:
newpeq = S.Zero
if peq.degree(2) > 1:
# ignore higher powers
continue
elif peq.degree(2) == 0:
newpeq = Poly(peq,htvars[0],htvars[1])
else:
maxdegree = peq.degree(igenoffset)
eqnew = S.Zero
for monoms,c in peq.terms():
term = c*denom**(maxdegree-monoms[igenoffset])
term *= num**(monoms[igenoffset])
for imonom, monom in enumerate(monoms):
if imonom != igenoffset:
term *= htvars[imonom]**monom
eqnew += term.expand()
try:
newpeq = Poly(eqnew,htvars[0],htvars[1])
except PolynomialError, e:
# most likel uservar0solution was bad
raise self.CannotSolveError('equation %s cannot be represented as a polynomial'%eqnew)
if newpeq != S.Zero:
# normalize by the greatest coefficient in LC, or otherwise determinant will never succeed
LC=newpeq.LC()
highestcoeff = None
if LC.is_Add:
for arg in LC.args:
coeff = None
if arg.is_Mul:
coeff = S.One
for subarg in arg.args:
if subarg.is_number:
coeff *= abs(subarg)
elif arg.is_number:
coeff = abs(arg)
if coeff is not None:
if coeff > S.One:
# round to the nearest integer
coeff = int(round(coeff.evalf()))
if highestcoeff is None or coeff > highestcoeff:
highestcoeff = coeff
if highestcoeff == oo:
log.warn('an equation has inifinity?!')
else:
processedequations.append(newpeq*(S.One/highestcoeff))
else:
log.info('equation is zero, so ignoring')
for dialyticeqs in combinations(processedequations,3):
Mall = None
leftvar = None
for ileftvar in range(2):
# TODO, sometimes this works and sometimes this doesn't
try:
Mall, allmonoms = self.solveDialytically(dialyticeqs,ileftvar,returnmatrix=True)
if Mall is not None:
leftvar=processedequations[0].gens[ileftvar]
break
except self.CannotSolveError, e:
log.debug(e)
if Mall is None:
continue
log.info('success in solving sub-coeff matrix!')
shape=Mall[0].shape
Malltemp = [None]*len(Mall)
M = zeros(shape)
dictequations2 = list(dictequations)
for idegree in range(len(Mall)):
Malltemp[idegree] = zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
if Mall[idegree][i,j] != S.Zero:
sym = self.gsymbolgen.next()
Malltemp[idegree][i,j] = sym
dictequations2.append((sym,Mall[idegree][i,j]))
M += Malltemp[idegree]*leftvar**idegree
tempsymbols = [self.gsymbolgen.next() for i in range(len(M))]
tempsubs = []
for i in range(len(tempsymbols)):
if M[i] != S.Zero:
tempsubs.append((tempsymbols[i],Poly(M[i],leftvar)))
else:
tempsymbols[i] = S.Zero
Mtemp = Matrix(M.shape[0],M.shape[1],tempsymbols)
dettemp=Mtemp.det()
log.info('multiplying all determinant coefficients for solving %s',leftvar)
eqadds = []
for arg in dettemp.args:
eqmuls = [Poly(arg2.subs(tempsubs),leftvar) for arg2 in arg.args]
if sum(eqmuls[0].degree_list()) == 0:
eq = eqmuls.pop(0)
eqmuls[0] = eqmuls[0]*eq
while len(eqmuls) > 1:
ioffset = 0
eqmuls2 = []
while ioffset < len(eqmuls)-1:
eqmuls2.append(eqmuls[ioffset]*eqmuls[ioffset+1])
ioffset += 2
eqmuls = eqmuls2
eqadds.append(eqmuls[0])
det = Poly(S.Zero,leftvar)
for eq in eqadds:
det += eq
jointsol = 2*atan(leftvar)
firstsolution = AST.SolverPolynomialRoots(jointname=usedvars[ileftvar].name,poly=det,jointeval=[jointsol],isHinge=self.IsHinge(usedvars[ileftvar].name))
firstsolution.checkforzeros = []
firstsolution.postcheckforzeros = []
firstsolution.postcheckfornonzeros = []
firstsolution.postcheckforrange = []
firstsolution.dictequations = dictequations2
firstsolution.AddHalfTanValue = True
# just solve the lowest degree one
complexity = [(eq.degree(1-ileftvar)*100000+self.codeComplexity(eq.as_expr()),eq) for eq in processedequations if eq.degree(1-ileftvar) > 0]
complexity.sort(key=itemgetter(0))
orderedequations = [peq for c,peq in complexity]
jointsol = 2*atan(htvars[1-ileftvar])
secondsolution = AST.SolverPolynomialRoots(jointname=usedvars[1-ileftvar].name,poly=Poly(orderedequations[0],htvars[1-ileftvar]),jointeval=[jointsol],isHinge=self.IsHinge(usedvars[1-ileftvar].name))
secondsolution.checkforzeros = []
secondsolution.postcheckforzeros = []
secondsolution.postcheckfornonzeros = []
secondsolution.postcheckforrange = []
secondsolution.AddHalfTanValue = True
thirdsolution = AST.SolverSolution(usedvars[2].name, isHinge=self.IsHinge(usedvars[2].name))
thirdsolution.jointeval = [usedvar0solution]
return preprocesssolutiontree+[firstsolution, secondsolution, thirdsolution]+endbranchtree, usedvars
raise self.CannotSolveError('failed to solve dialytically')
if ileftvar > 0:
raise self.CannotSolveError('solving equations dialytically succeeded with var index %d, unfortunately code generation supports only index 0'%ileftvar)
exportvar = [htvars[ileftvar].name]
exportvar += [v.name for i,v in enumerate(htvars) if i != ileftvar]
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in usedvars],jointeval=[v[1] for v in htvarsubs2],jointevalcos=[htvarsubs[2*i][1] for i in range(len(htvars))],jointevalsin=[htvarsubs[2*i+1][1] for i in range(len(htvars))],isHinges=[self.IsHinge(v.name) for v in usedvars],exportvar=exportvar,exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly8qep',rootmaxdim=16)
coupledsolution.presetcheckforzeros = checkforzeros
coupledsolution.dictequations = dictequations
solutiontree.append(coupledsolution)
self.usinglapack = True
return preprocesssolutiontree+solutiontree+endbranchtree,usedvars
def ConvertSinCosEquationToHalfTan(self, eq, convertvars):
"""converts all the sin/cos of variables to half-tangents. Returns two equations (poly, denominator)
"""
cossinvars = []
htvarsubs = []
htvars = []
htvarsubsinv = []
for varsym in convertvars:
var = self.Variable(varsym)
cossinvars.append(var.cvar)
cossinvars.append(var.svar)
htvar = Symbol('ht%s'%varsym.name)
htvarsubs += [(var.cvar,(1-htvar**2)/(1+htvar**2)),(var.svar,2*htvar/(1+htvar**2))]
htvarsubsinv.append((htvar, (1-var.cvar)/var.svar))
htvars.append(htvar)
peq = Poly(eq,*cossinvars)
maxdenom = [0]*len(convertvars)
for monoms in peq.monoms():
for i in range(len(convertvars)):
maxdenom[i] = max(maxdenom[i],monoms[2*i]+monoms[2*i+1])
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
for i in range(len(convertvars)):
# for cos
num, denom = fraction(htvarsubs[2*i][1])
term *= num**monoms[2*i]
# for sin
num, denom = fraction(htvarsubs[2*i+1][1])
term *= num**monoms[2*i+1]
# the denoms for sin/cos of the same joint variable are the same
for i in range(len(convertvars)):
denom = fraction(htvarsubs[2*i][1])[1]
exp = maxdenom[i] - monoms[2*i] - monoms[2*i+1]
if exp > 0:
term *= denom**exp
eqnew += term
#newpeq = Poly(eqnew,htvars)
othereq = S.One
for i in range(len(convertvars)):
othereq *= (1+htvars[i]**2)**maxdenom[i]
return eqnew, othereq, htvarsubsinv
def ConvertHalfTanEquationToSinCos(self, eq, convertvars):
"""converts all the sin/cos of variables to half-tangents. Returns two equations (poly, denominator)
"""
assert(0)
cossinvars = []
htvarsubs = []
htvars = []
htvarsubsinv = []
for varsym in convertvars:
var = self.Variable(varsym)
cossinvars.append(var.cvar)
cossinvars.append(var.svar)
htvar = Symbol('ht%s'%varsym.name)
htvarsubs += [(var.cvar,(1-htvar**2)/(1+htvar**2)),(var.svar,2*htvar/(1+htvar**2))]
htvarsubsinv.append((htvar, (1-var.cvar)/var.svar))
htvars.append(htvar)
peq = Poly(eq,*cossinvars)
maxdenom = [0]*len(convertvars)
for monoms in peq.monoms():
for i in range(len(convertvars)):
maxdenom[i] = max(maxdenom[i],monoms[2*i]+monoms[2*i+1])
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
for i in range(len(convertvars)):
# for cos
num, denom = fraction(htvarsubs[2*i][1])
term *= num**monoms[2*i]
# for sin
num, denom = fraction(htvarsubs[2*i+1][1])
term *= num**monoms[2*i+1]
# the denoms for sin/cos of the same joint variable are the same
for i in range(len(convertvars)):
denom = fraction(htvarsubs[2*i][1])[1]
exp = maxdenom[i] - monoms[2*i] - monoms[2*i+1]
if exp > 0:
term *= denom**exp
eqnew += term
#newpeq = Poly(eqnew,htvars)
othereq = S.One
for i in range(len(convertvars)):
othereq *= (1+htvars[i]**2)**maxdenom[i]
return eqnew, othereq, htvarsubsinv
def solveKohliOsvatic(self,rawpolyeqs,solvejointvars,endbranchtree, AllEquationsExtra=None):
"""Find a 16x16 matrix where the entries are linear with respect to the tan half-angle of one of the variables [Kohli1993]_. Takes in the 14 raghavan/roth equations.
.. [Kohli1993] Dilip Kohli and M. Osvatic, "Inverse Kinematics of General 6R and 5R,P Serial Manipulators", Journal of Mechanical Design, Volume 115, Issue 4, Dec 1993.
"""
log.info('attempting kohli/osvatic general ik method')
if len(rawpolyeqs[0][0].gens) < len(rawpolyeqs[0][1].gens):
for peq in rawpolyeqs:
peq[0],peq[1] = peq[1],peq[0]
symbols = list(rawpolyeqs[0][0].gens)
othersymbols = list(rawpolyeqs[0][1].gens)
othersymbolsnames = []
for s in othersymbols:
testeq = s.subs(self.invsubs)
for solvejointvar in solvejointvars:
if testeq.has(solvejointvar):
othersymbolsnames.append(solvejointvar)
break
assert(len(othersymbols)==len(othersymbolsnames))
symbolsubs = [(symbols[i].subs(self.invsubs),symbols[i]) for i in range(len(symbols))]
if len(symbols) != 6:
raise self.CannotSolveError('Kohli/Osvatic method requires 3 unknown variables')
# choose which leftvar can determine the singularity of the following equations!
for i in range(0,6,2):
eqs = [peq for peq in rawpolyeqs if peq[0].has(symbols[i],symbols[i+1])]
if len(eqs) <= 8:
break
if len(eqs) > 8:
raise self.CannotSolveError('need 8 or less equations of one variable, currently have %d'%len(eqs))
cvar = symbols[i]
svar = symbols[i+1]
tvar = Symbol('t'+cvar.name[1:])
symbols.remove(cvar)
symbols.remove(svar)
othereqs = [peq for peq in rawpolyeqs if not peq[0].has(cvar,svar)]
polyeqs = [[eq[0].as_expr(),eq[1]] for eq in eqs]
if len(polyeqs) < 8:
raise self.CannotSolveError('solveKohliOsvatic: need 8 or more polyeqs')
# solve the othereqs for symbols without the standalone symbols[2] and symbols[3]
reducedeqs = []
othersymbolsnamesunique = list(set(othersymbolsnames)) # get the unique names
for jother in range(len(othersymbolsnamesunique)):
if not self.IsHinge(othersymbolsnamesunique[jother].name):
continue
othervar=self.Variable(othersymbolsnamesunique[jother])
cosmonom = [0]*len(othersymbols)
cosmonom[othersymbols.index(othervar.cvar)] = 1
cosmonom = tuple(cosmonom)
sinmonom = [0]*len(othersymbols)
sinmonom[othersymbols.index(othervar.svar)] = 1
sinmonom = tuple(sinmonom)
leftsideeqs = []
rightsideeqs = []
finaleqsymbols = symbols + [othervar.cvar,othervar.svar]
for eq0,eq1 in othereqs:
leftsideeq = Poly(eq1,*othersymbols)
leftsideeqdict = leftsideeq.as_dict()
rightsideeq = Poly(eq0,*finaleqsymbols)
coscoeff = leftsideeqdict.get(cosmonom,S.Zero)
if coscoeff != S.Zero:
rightsideeq = rightsideeq - othervar.cvar*coscoeff
leftsideeq = leftsideeq - othervar.cvar*coscoeff
sincoeff = leftsideeqdict.get(sinmonom,S.Zero)
if sincoeff != S.Zero:
rightsideeq = rightsideeq - othervar.svar*sincoeff
leftsideeq = leftsideeq - othervar.svar*sincoeff
const = leftsideeq.TC()
if const != S.Zero:
rightsideeq = rightsideeq - const
leftsideeq = leftsideeq - const
# check that leftsideeq doesn't hold any terms with cosmonom and sinmonom?
rightsideeqs.append(rightsideeq)
leftsideeqs.append(leftsideeq)
# number of symbols for kawada-hiro robot is 16
if len(othersymbols) > 2:
reducedeqs = self.reduceBothSidesSymbolically(leftsideeqs,rightsideeqs,usesymbols=False,maxsymbols=18)
for peq in reducedeqs:
peq[0] = Poly(peq[0],*othersymbols)
else:
reducedeqs = [[left,right] for left,right in izip(leftsideeqs,rightsideeqs)]
if len(reducedeqs) > 0:
break
if len(reducedeqs) == 0:
raise self.CannotSolveError('KohliOsvatic method: could not reduce the equations')
finaleqs = []
for peq0,eq1 in reducedeqs:
if peq0 == S.Zero:
finaleqs.append(Poly(eq1,*finaleqsymbols))
if len(finaleqs) >= 2:
# perhaps can solve finaleqs as is?
# transfer othersymbols[2*jother:(2+2*jother)] to the leftside
try:
leftsideeqs = []
rightsideeqs = []
for finaleq in finaleqs:
peq=Poly(finaleq,*othersymbols[2*jother:(2+2*jother)])
leftsideeqs.append(peq.sub(peq.TC()))
rightsideeqs.append(-peq.TC())
reducedeqs2 = self.reduceBothSidesSymbolically(leftsideeqs,rightsideeqs,usesymbols=False,maxsymbols=18)
# find all the equations with left side = to zero
usedvars = set()
for symbol in symbols:
usedvars.add(Symbol(symbol.name[1:]))
AllEquations = []
for eq0, eq1 in reducedeqs2:
if eq0 == S.Zero:
AllEquations.append(eq1.subs(self.invsubs))
if len(AllEquations) > 0:
otherjointtrees = []
tree = self.SolveAllEquations(AllEquations,curvars=list(usedvars),othersolvedvars=[],solsubs=self.freevarsubs,endbranchtree=[AST.SolverSequence([otherjointtrees])])
log.info('first SolveAllEquations successful: %s',usedvars)
# try:
# # although things can be solved at this point, it yields a less optimal solution than if all variables were considered...
# solsubs=list(self.freevarsubs)
# for usedvar in usedvars:
# solsubs += self.Variable(usedvar).subs
# # solved, so substitute back into reducedeqs and see if anything new can be solved
# otherusedvars = set()
# for symbol in othersymbols:
# otherusedvars.add(Symbol(symbol.name[1:]))
# OtherAllEquations = []
# for peq0,eq1 in reducedeqs:
# OtherAllEquations.append((peq0.as_expr()-eq1).subs(self.invsubs).expand())
# otherjointtrees += self.SolveAllEquations(OtherAllEquations,curvars=list(otherusedvars),othersolvedvars=list(usedvars),solsubs=solsubs,endbranchtree=endbranchtree)
# return tree, list(usedvars)+list(otherusedvars)
# except self.CannotSolveError:
# still have the initial solution
otherjointtrees += endbranchtree
return tree, list(usedvars)
except self.CannotSolveError,e:
pass
log.info('build final equations for symbols: %s',finaleqsymbols)
neweqs=[]
for i in range(0,8,2):
p0 = Poly(polyeqs[i][0],cvar,svar)
p0dict = p0.as_dict()
p1 = Poly(polyeqs[i+1][0],cvar,svar)
p1dict = p1.as_dict()
r0 = polyeqs[i][1].as_expr()
r1 = polyeqs[i+1][1].as_expr()
if self.equal(p0dict.get((1,0),S.Zero),-p1dict.get((0,1),S.Zero)) and self.equal(p0dict.get((0,1),S.Zero),p1dict.get((1,0),S.Zero)):
p0,p1 = p1,p0
p0dict,p1dict=p1dict,p0dict
r0,r1 = r1,r0
if self.equal(p0dict.get((1,0),S.Zero),p1dict.get((0,1),S.Zero)) and self.equal(p0dict.get((0,1),S.Zero),-p1dict.get((1,0),S.Zero)):
# p0+tvar*p1, p1-tvar*p0
# subs: tvar*svar + cvar = 1, svar-tvar*cvar=tvar
neweqs.append([Poly(p0dict.get((1,0),S.Zero) + p0dict.get((0,1),S.Zero)*tvar + p0.TC() + tvar*p1.TC(),*symbols), Poly(r0+tvar*r1,*othersymbols)])
neweqs.append([Poly(p0dict.get((1,0),S.Zero)*tvar - p0dict.get((0,1),S.Zero) - p0.TC()*tvar + p1.TC(),*symbols), Poly(r1-tvar*r0,*othersymbols)])
if len(neweqs) != 8:
raise self.CannotSolveError('coefficients of equations need to match! only got %d reduced equations'%len(neweqs))
for eq0,eq1 in neweqs:
commondenom = Poly(S.One,*self.pvars)
hasunknown = False
for m,c in eq1.terms():
foundreq = [req[1] for req in reducedeqs if req[0].monoms()[0] == m]
if len(foundreq) > 0:
n,d = fraction(foundreq[0])
commondenom = Poly(lcm(commondenom,d),*self.pvars)
else:
if m[2*(1-jother)] > 0 or m[2*(1-jother)+1] > 0:
# perhaps there's a way to combine what's in reducedeqs?
log.warn('unknown %s',m)
hasunknown = True
if hasunknown:
continue
commondenom = self.removecommonexprs(commondenom.as_expr(),onlygcd=True,onlynumbers=True)
finaleq = eq0.as_expr()*commondenom
for m,c in eq1.terms():
foundreq = [req[1] for req in reducedeqs if req[0].monoms()[0] == m]
if len(foundreq) > 0:
finaleq = finaleq - c*simplify(foundreq[0]*commondenom)
else:
finaleq = finaleq - Poly.from_dict({m:c*commondenom},*eq1.gens).as_expr()
finaleqs.append(Poly(finaleq.expand(),*finaleqsymbols))
# finally do the half angle substitution with symbols
# set:
# j=othersymbols[2]*(1+dummys[0]**2)*(1+dummys[1]**2)
# k=othersymbols[3]*(1+dummys[0]**2)*(1+dummys[1]**2)
dummys = []
dummysubs = []
dummysubs2 = []
dummyvars = []
usedvars = []
dummys.append(tvar)
dummyvars.append((tvar,tan(0.5*Symbol(tvar.name[1:]))))
usedvars.append(Symbol(cvar.name[1:]))
dummysubs2.append((usedvars[-1],2*atan(tvar)))
dummysubs += [(cvar,(1-tvar**2)/(1+tvar**2)),(svar,2*tvar/(1+tvar**2))]
for i in range(0,len(symbols),2):
dummy = Symbol('ht%s'%symbols[i].name[1:])
# [0] - cos, [1] - sin
dummys.append(dummy)
dummysubs += [(symbols[i],(1-dummy**2)/(1+dummy**2)),(symbols[i+1],2*dummy/(1+dummy**2))]
var = symbols[i].subs(self.invsubs).args[0]
dummyvars.append((dummy,tan(0.5*var)))
dummysubs2.append((var,2*atan(dummy)))
if not var in usedvars:
usedvars.append(var)
commonmult = (1+dummys[1]**2)*(1+dummys[2]**2)
usedvars.append(Symbol(othersymbols[2*jother].name[1:]))
dummyj = Symbol('dummyj')
dummyk = Symbol('dummyk')
dummyjk = Symbol('dummyjk')
dummys.append(dummyj)
dummyvars.append((dummyj,othersymbols[2*jother]*(1+dummyvars[1][1]**2)*(1+dummyvars[2][1]**2)))
dummysubs.append((othersymbols[2*jother],cos(dummyjk)))
dummys.append(dummyk)
dummyvars.append((dummyk,othersymbols[1+2*jother]*(1+dummyvars[1][1]**2)*(1+dummyvars[2][1]**2)))
dummysubs.append((othersymbols[1+2*jother],sin(dummyjk)))
dummysubs2.append((usedvars[-1],dummyjk))
newreducedeqs = []
for peq in finaleqs:
eqnew = S.Zero
for monoms,c in peq.terms():
term = S.One
for i in range(4):
term *= dummysubs[i+2][1]**monoms[i]
if monoms[4] == 1:
eqnew += c * dummyj
elif monoms[5] == 1:
eqnew += c * dummyk
else:
eqnew += c*simplify(term*commonmult)
newreducedeqs.append(Poly(eqnew,*dummys))
exportcoeffeqs = None
for ileftvar in range(len(dummys)):
leftvar = dummys[ileftvar]
try:
exportcoeffeqs,exportmonoms = self.solveDialytically(newreducedeqs,ileftvar,getsubs=None)
break
except self.CannotSolveError,e:
log.warn('failed with leftvar %s: %s',leftvar,e)
if exportcoeffeqs is None:
raise self.CannotSolveError('failed to solve dialytically')
if ileftvar > 0:
raise self.CannotSolveError('solving equations dialytically succeeded with var index %d, unfortunately code generation supports only index 0'%ileftvar)
coupledsolution = AST.SolverCoeffFunction(jointnames=[v.name for v in usedvars],jointeval=[v[1] for v in dummysubs2],jointevalcos=[dummysubs[2*i][1] for i in range(len(usedvars))],jointevalsin=[dummysubs[2*i+1][1] for i in range(len(usedvars))],isHinges=[self.IsHinge(v.name) for v in usedvars],exportvar=dummys[0:3]+[dummyjk],exportcoeffeqs=exportcoeffeqs,exportfnname='solvedialyticpoly16lep',rootmaxdim=16)
self.usinglapack = True
return [coupledsolution]+endbranchtree,usedvars
def solveDialytically(self,dialyticeqs,ileftvar,returnmatrix=False,getsubs=None):
""" Return the coefficients to solve equations dialytically (Salmon 1885) leaving out variable index ileftvar.
Extract the coefficients of 1, leftvar**1, leftvar**2, ... of every equation
every len(dialyticeqs)*len(monoms) coefficients specify one degree of all the equations (order of monoms is specified in exportmonomorder
there should be len(dialyticeqs)*len(monoms)*maxdegree coefficients
Method also checks if the equations are linearly dependent
"""
if len(dialyticeqs) == 0:
raise self.CannotSolveError('solveDialytically given zero equations')
allmonoms = set()
origmonoms = set()
maxdegree = 0
leftvar = dialyticeqs[0].gens[ileftvar]
for peq in dialyticeqs:
if sum(peq.degree_list()) == 0:
log.warn('solveDialytically: polynomial %s degree is 0',peq)
continue
for m in peq.monoms():
mlist = list(m)
maxdegree=max(maxdegree,mlist.pop(ileftvar))
allmonoms.add(tuple(mlist))
origmonoms.add(tuple(mlist))
mlist[0] += 1
allmonoms.add(tuple(mlist))
allmonoms = list(allmonoms)
allmonoms.sort()
origmonoms = list(origmonoms)
origmonoms.sort()
if len(allmonoms)<2*len(dialyticeqs):
log.warn('solveDialytically equations %d > %d, should be equal...', 2*len(dialyticeqs),len(allmonoms))
dialyticeqs = dialyticeqs[0:(len(allmonoms)/2)]
if len(allmonoms) == 0 or len(allmonoms)>2*len(dialyticeqs):
raise self.CannotSolveError('solveDialytically: more unknowns than equations %d>%d'%(len(allmonoms), 2*len(dialyticeqs)))
Mall = [zeros((2*len(dialyticeqs),len(allmonoms))) for i in range(maxdegree+1)]
exportcoeffeqs = [S.Zero]*(len(dialyticeqs)*len(origmonoms)*(maxdegree+1))
for ipeq,peq in enumerate(dialyticeqs):
for m,c in peq.terms():
mlist = list(m)
degree=mlist.pop(ileftvar)
exportindex = degree*len(origmonoms)*len(dialyticeqs) + len(origmonoms)*ipeq+origmonoms.index(tuple(mlist))
exportcoeffeqs[exportindex] = c
Mall[degree][len(dialyticeqs)+ipeq,allmonoms.index(tuple(mlist))] = c
mlist[0] += 1
Mall[degree][ipeq,allmonoms.index(tuple(mlist))] = c
# have to check that the determinant is not zero for several values of ileftvar! It is very common that
# some equations are linearly dependent and not solvable through this method.
if self.testconsistentvalues is not None:
linearlyindependent = False
for itest,subs in enumerate(self.testconsistentvalues):
if getsubs is not None:
# have to explicitly evaluate since testsubs can be very complex
subsvals = [(s,v.evalf()) for s,v in subs]
subs = subsvals+getsubs(subsvals)
# have to sub at least twice with the global symbols
A = Mall[maxdegree].subs(subs).subs(self.globalsymbols).subs(subs).evalf()
eps = 10**-(self.precision-3)
Anumpy = numpy.array(numpy.array(A), numpy.float64)
if numpy.isnan(numpy.sum(Anumpy)):
break
eigenvals = numpy.linalg.eigvals(Anumpy)
if all([Abs(f) > eps for f in eigenvals]):
try:
Ainv = A.inv(method='LU')
except ValueError, e:
log.error('error when taking inverse: %s', e)
continue
B = Ainv*Mall[1].subs(subs).evalf()
C = Ainv*Mall[0].subs(subs).evalf()
A2 = zeros((B.shape[0],B.shape[0]*2))
for i in range(B.shape[0]):
A2[i,B.shape[0]+i] = S.One
A2=A2.col_join((-C).row_join(-B))
eigenvals2,eigenvecs2 = numpy.linalg.eig(numpy.array(numpy.array(A2),numpy.float64))
# check if solutions can actually be extracted
# find all the zero eigenvalues
roots = []
numrepeating = 0
for ieig,eigenvalue in enumerate(eigenvals2):
if abs(numpy.imag(eigenvalue)) < 1e-12:
if abs(numpy.real(eigenvalue)) > 1:
ev = eigenvecs2[A.shape[0]:,ieig]
else:
ev = eigenvecs2[:A.shape[0],ieig]
if abs(ev[0]) < 1e-14:
continue
br = ev[1:] / ev[0]
dists = abs(numpy.array(roots) - numpy.real(eigenvalue))
if any(dists<1e-7):
numrepeating += 1
roots.append(numpy.real(eigenvalue))
if numrepeating > 0:
log.info('found %d repeating roots in solveDialytically matrix: %s',numrepeating,roots)
continue
Atotal = None
for idegree in range(maxdegree+1):
Adegree = Mall[idegree].subs(subs).subs(self.globalsymbols).evalf()
if Atotal is None:
Atotal = Adegree
else:
Atotal += Adegree*leftvar**idegree
# make sure the determinant of Atotal is not-zero for at least several solutions
leftvarvalue = leftvar.subs(subs).evalf()
hasnonzerodet = False
for testvalue in [-10*S.One, -S.One,-0.5*S.One, 0.5*S.One, S.One, 10*S.One]:
detvalue = Atotal.subs(leftvar,leftvarvalue+testvalue).evalf().det()
if abs(detvalue) > 1e-10:
hasnonzerodet = True
if not hasnonzerodet:
log.warn('has zero det, so failed')
else:
linearlyindependent = True
break
if not linearlyindependent:
raise self.CannotSolveError('equations are not linearly independent')
if returnmatrix:
return Mall,allmonoms
return exportcoeffeqs,origmonoms
def SubstituteGinacEquations(self,dictequations, valuesubs, localsymbolmap):
gvaluesubs = []
for var, value in valuesubs:
if value != oo:
if var.name in localsymbolmap:
gvaluesubs.append(localsymbolmap[var.name] == GinacUtils.ConvertToGinac(value,localsymbolmap))
retvalues = []
for var, value in dictequations:
newvalue = value.subs(gvaluesubs).evalf()
if var.name in localsymbolmap:
gvaluesubs.append(localsymbolmap[var.name]==newvalue)
else:
log.warn('%s not in map',var)
retvalues.append((var,newvalue))
return retvalues
def SimplifyTransformPoly(self,peq):
"""simplifies the coefficients of the polynomial with simplifyTransform and returns the new polynomial
"""
return peq.termwise(lambda m,c: self.SimplifyTransform(c))
def SimplifyTransform(self,eq,othervars=None):
"""Attemps to simplify an equation given that variables from a rotation matrix have been used. There are 12 constraints that are tested:
- lengths of rows and colums are 1
- dot products of combinations of rows/columns are 0
- cross products of combinations of rows/columns yield the left over row/column
:param othervars: optional list of the unknown variables inside the equations. Help simplify depending on the terms of these variables
"""
if self._iktype != 'transform6d':
return eq
if othervars is not None:
peq = Poly(eq,*othervars)
if peq == S.Zero:
return S.Zero
peqnew = peq.termwise(lambda m,c: self.SimplifyTransform(c))
return peqnew.as_expr()
origeq = eq
# first simplify just rotations since they don't add any new variables
changed = True
while changed and eq.has(*self._rotsymbols):
changed = False
neweq = self._SimplifyRotationNorm(eq, self._rotnormgroups)
if neweq is not None:
eq = neweq
changed = True
neweq = self._SimplifyRotationDot(eq, self._rotsymbols, self._rotdotgroups)
if neweq is not None:
eq = neweq
changed = True
neweq = self._SimplifyRotationCross(eq, self._rotsymbols, self._rotcrossgroups)
if neweq is not None:
eq = neweq
changed = True
# check if full 3D position is available
if self.pp is not None:
changed = True
while changed and eq.has(*self._rotpossymbols):
changed = False
neweq = self._SimplifyRotationNorm(eq, self._rotposnormgroups)
if neweq is not None:
eq = neweq
changed = True
neweq = self._SimplifyRotationDot(eq, self._rotpossymbols, self._rotposdotgroups)
if neweq is not None:
eq = neweq
changed = True
neweq = self._SimplifyRotationCross(eq, self._rotpossymbols, self._rotposcrossgroups)
if neweq is not None:
eq = neweq
changed = True
if isinstance(eq, Poly):
eq = eq.as_expr()
#log.info("simplify eq:\n%r\n->new eq:\n%r", origeq, eq)
return eq
def _SimplifyRotationNorm(self, eq, groups):
"""simplify equation using self._rotnormgroups
"""
neweq = None
for group in groups:
try:
p = Poly(eq,group[0],group[1],group[2])
except PolynomialError:
continue
changed = False
if len(p.terms()) == 1:
continue
for (m0,c0),(m1,c1) in combinations(p.terms(),2):
if self.equal(c0,c1):
for i,j,k in [(0,1,2),(0,2,1),(1,2,0)]:
if ((m0[i] == 2 and m1[j] == 2) or (m0[j]==2 and m1[i]==2)) and m0[k]==m1[k]:
p = p + c0*(group[3]-group[0]**2-group[1]**2-group[2]**2)*group[k]**(m0[k])
neweq = p#.as_expr()
eq = neweq
changed = True
break
if changed:
break
return neweq
def _SimplifyRotationDot(self, eq, symbols, groups):
"""check for dot products between rows and columns
"""
try:
p = Poly(eq,*symbols)
except PolynomialError:
return None
changed = False
for dg in groups:
for i,j,k in [(0,1,2),(0,2,1),(1,2,0)]:
for comb in combinations(p.terms(),2):
if self.equal(comb[0][1],comb[1][1]):
for (m0,c0),(m1,c1) in [comb,comb[::-1]]:
if m0[dg[i][0]] == 1 and m0[dg[i][1]] == 1 and m1[dg[j][0]] == 1 and m1[dg[j][1]] == 1:
# make sure the left over terms are also the same
m0l = list(m0); m0l[dg[i][0]] = 0; m0l[dg[i][1]] = 0
m1l = list(m1); m1l[dg[j][0]] = 0; m1l[dg[j][1]] = 0
if tuple(m0l) == tuple(m1l):
m2 = list(m0l); m2[dg[k][0]] += 1; m2[dg[k][1]] += 1
# there is a bug in sympy v0.6.7 polynomial adding here!
p = p.sub(Poly.from_dict({m0:c0},*p.gens)).sub(Poly.from_dict({m1:c1},*p.gens)).sub(Poly.from_dict({tuple(m2):c0},*p.gens))
if dg[3] != S.Zero:
p = p.add(Poly(dg[3],*p.gens)*Poly.from_dict({tuple(m0l):c0},*p.gens))
changed = True
break
if changed:
break
return p if changed else None
def _SimplifyRotationCross(self, eq, symbols, groups):
"""simplify rotations using cross products
"""
changed = False
try:
p = Poly(eq,*symbols)
except PolynomialError:
return None
for cg in groups:
for comb in combinations(p.terms(),2):
if self.equal(comb[0][1],-comb[1][1]):
for (m0,c0),(m1,c1) in [comb,comb[::-1]]:
if m0[cg[0][0]] == 1 and m0[cg[0][1]] == 1 and m1[cg[1][0]] == 1 and m1[cg[1][1]] == 1:
# make sure the left over terms are also the same
m0l = list(m0); m0l[cg[0][0]] = 0; m0l[cg[0][1]] = 0
m1l = list(m1); m1l[cg[1][0]] = 0; m1l[cg[1][1]] = 0
if tuple(m0l) == tuple(m1l):
m2 = m0l; m2[cg[2]] += 1
# there is a bug in sympy polynomial caching here! (0.6.7)
p = p.sub(Poly.from_dict({m0:c0},*p.gens)).sub(Poly.from_dict({m1:c1},*p.gens)).add(Poly.from_dict({tuple(m2):c0},*p.gens))
changed = True
break
if changed:
break
return p if changed else None
def CheckExpressionUnique(self, exprs, expr, checknegative=True):
"""checks if expr is inside exprs.
:param checknegative: if True, then also check if -expr is inside exprs
"""
for exprtest in exprs:
if self.equal(expr,exprtest):
return False
if checknegative:
for exprtest in exprs:
if self.equal(-expr,exprtest):
return False
return True
def getCommonExpression(self, exprs, expr):
for i,exprtest in enumerate(exprs):
if self.equal(expr,exprtest):
return i
return None
def verifyAllEquations(self,AllEquations,unsolvedvars, solsubs, tree=None):
extrazerochecks=[]
for i in range(len(AllEquations)):
expr = AllEquations[i]
if not self.isValidSolution(expr):
raise self.CannotSolveError('verifyAllEquations: equation is not valid: %s'%(str(expr)))
if not expr.has(*unsolvedvars) and self.CheckExpressionUnique(extrazerochecks,expr):
extrazerochecks.append(self.removecommonexprs(expr.subs(solsubs).evalf(),onlygcd=False,onlynumbers=True))
if len(extrazerochecks) > 0:
return [AST.SolverCheckZeros(None,extrazerochecks,tree,[AST.SolverBreak('verifyAllEquations')],anycondition=False)]
return tree
def PropagateSolvedConstants(self, AllEquations, othersolvedvars, unknownvars, constantSymbols=None):
"""
Sometimes equations can be like "npz", or "pp-1", which means npz=0 and pp=1. Check for these constraints and apply them to the rest of the equations
Return a new set of equations
:param constantSymbols: the variables to try to propagage, if None will use self.pvars
"""
if constantSymbols is not None:
constantSymbols = list(constantSymbols)
else:
constantSymbols = list(self.pvars)
for othersolvedvar in othersolvedvars:
constantSymbols.append(othersolvedvar)
if self.IsHinge(othersolvedvar.name):
constantSymbols.append(cos(othersolvedvar))
constantSymbols.append(sin(othersolvedvar))
newsubsdict = {}
for eq in AllEquations:
if not eq.has(*unknownvars) and eq.has(*constantSymbols):
try:
reducedeq = self.SimplifyTransform(eq)
for constantSymbol in constantSymbols:
if eq.has(constantSymbol):
try:
peq = Poly(eq,constantSymbol)
if peq.degree(0) == 1:
# equation is only degree 1 in the variable, and doesn't have any solvevars multiplied with it
newsolution = solve(peq,constantSymbol)[0]
if constantSymbol in newsubsdict:
if self.codeComplexity(newsolution) < self.codeComplexity(newsubsdict[constantSymbol]):
newsubsdict[constantSymbol] = newsolution
else:
newsubsdict[constantSymbol] = newsolution
except PolynomialError:
pass
except PolynomialError, e:
# expected from simplifyTransform if eq is too complex
pass
# first substitute everything that doesn't have othersolvedvar or unknownvars
numberSubstitutions = []
otherSubstitutions = []
for var, value in newsubsdict.iteritems():
if not value.has(*constantSymbols):
numberSubstitutions.append((var,value))
else:
otherSubstitutions.append((var,value))
NewEquations = []
for ieq, eq in enumerate(AllEquations):
if eq.has(*unknownvars):
neweq = eq.subs(numberSubstitutions).expand()
if neweq != S.Zero:
# don't expand here since otherSubstitutions could make it very complicated
neweq2 = neweq.subs(otherSubstitutions)
if self.codeComplexity(neweq2) < self.codeComplexity(neweq)*2:
neweq2 = neweq2.expand()
if self.codeComplexity(neweq2) < self.codeComplexity(neweq) and neweq2 != S.Zero:
NewEquations.append(neweq2)
else:
NewEquations.append(neweq)
else:
NewEquations.append(neweq)
return NewEquations
def SolveAllEquations(self,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=None,unknownvars=None, currentcasesubs=None):
if len(curvars) == 0:
return endbranchtree
if unknownvars is None:
unknownvars = []
self._scopecounter+=1
scopecounter = int(self._scopecounter)
log.info('c=%d, %s %s',self._scopecounter, othersolvedvars,curvars)
solsubs = solsubs[:]
freevarinvsubs = [(f[1],f[0]) for f in self.freevarsubs]
solinvsubs = [(f[1],f[0]) for f in solsubs]
# single variable solutions
solutions = []
for curvar in curvars:
othervars = unknownvars+[var for var in curvars if var != curvar]
curvarsym = self.Variable(curvar)
raweqns = []
for e in AllEquations:
if (len(othervars) == 0 or not e.has(*othervars)) and e.has(curvar,curvarsym.htvar,curvarsym.cvar,curvarsym.svar):
eq = e.subs(self.freevarsubs+solsubs)
if self.CheckExpressionUnique(raweqns,eq):
raweqns.append(eq)
if len(raweqns) > 0:
try:
rawsolutions=self.solveSingleVariable(self.sortComplexity(raweqns),curvar,othersolvedvars, unknownvars=curvars+unknownvars)
for solution in rawsolutions:
self.ComputeSolutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,curvar))
except self.CannotSolveError:
pass
# only return here if a solution was found that perfectly determines the unknown
# otherwise, the pairwise solver could come up with something..
# There is still a problem with this: (bertold robot)
# Sometimes an equation like atan2(y,x) evaluates to atan2(0,0) during runtime.
# This cannot be known at compile time, so the equation is selected and any other possibilities are rejected.
# In the bertold robot case, the next possibility is a pair-wise solution involving two variables
if any([s[0].numsolutions()==1 for s in solutions]):
return self.AddSolution(solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
curvarsubssol = []
for var0,var1 in combinations(curvars,2):
othervars = unknownvars+[var for var in curvars if var != var0 and var != var1]
raweqns = []
complexity = 0
for e in AllEquations:
if (len(othervars) == 0 or not e.has(*othervars)) and e.has(var0,var1):
eq = e.subs(self.freevarsubs+solsubs)
if self.CheckExpressionUnique(raweqns,eq):
raweqns.append(eq)
complexity += self.codeComplexity(eq)
if len(raweqns) > 1:
curvarsubssol.append((var0,var1,raweqns,complexity))
curvarsubssol.sort(lambda x, y: x[3]-y[3])
if len(curvars) == 2 and self.IsHinge(curvars[0].name) and self.IsHinge(curvars[1].name) and len(curvarsubssol) > 0:
# there's only two variables left, it might be the case that the axes are aligning and the two variables are dependent on each other
# note that the axes's anchors also have to be along the direction!
var0,var1,raweqns,complexity = curvarsubssol[0]
dummyvar = Symbol('dummy')
dummyvalue = var0 + var1
NewEquations = []
hasExtraConstraints = False
for eq in raweqns:
neweq = self.trigsimp(eq.subs(var0,dummyvar-var1).expand(trig=True),curvars)
if neweq.has(dummyvar):
if neweq.has(*(othervars+curvars)):
hasExtraConstraints = True
break
else:
eq = neweq.subs(self.freevarsubs+solsubs)
if self.CheckExpressionUnique(NewEquations,eq):
NewEquations.append(eq)
if len(NewEquations) == 0 or hasExtraConstraints:
# try subtracting
hasExtraConstraints = False
dummyvalue = var0 - var1
for eq in raweqns:
neweq = self.trigsimp(eq.subs(var0,dummyvar-var1).expand(trig=True),curvars)
if neweq.has(dummyvar):
if neweq.has(*(othervars+curvars)):
hasExtraConstraints = True
break
else:
eq = neweq.subs(self.freevarsubs+solsubs)
if self.CheckExpressionUnique(NewEquations,eq):
NewEquations.append(eq)
if not hasExtraConstraints and len(NewEquations) > 0:
dummysolutions = []
try:
rawsolutions=self.solveSingleVariable(NewEquations,dummyvar,othersolvedvars, unknownvars=curvars+unknownvars)
for solution in rawsolutions:
self.ComputeSolutionComplexity(solution,othersolvedvars,curvars)
dummysolutions.append(solution)
except self.CannotSolveError:
pass
if any([s.numsolutions()==1 for s in dummysolutions]):
# two axes are aligning, so modify the solutions to reflect the original variables and add a free variable
log.info('found two aligning axes %s: %r',dummyvalue, NewEquations)
solutions = []
for dummysolution in dummysolutions:
if dummysolution.jointevalsin is not None or dummysolution.jointevalcos is not None:
log.warn('dummy solution should not have sin/cos parts!')
solution=AST.SolverSolution(curvars[0].name, isHinge=self.IsHinge(curvars[0].name))
solution.jointeval = [dummysolution.jointeval[0] - dummyvalue + curvars[0]]
self.ComputeSolutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,curvars[0]))
tree = self.AddSolution(solutions,raweqns,curvars[0:1],othersolvedvars+curvars[1:2],solsubs+self.Variable(curvars[1]).subs,endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
if tree is None:
return None
return [AST.SolverFreeParameter(curvars[1].name, tree)]
else:
log.warn('almost found two axes but num solutions was: %r', [s.numsolutions()==1 for s in dummysolutions])
for var0,var1,raweqns,complexity in curvarsubssol:
try:
rawsolutions=self.SolvePrismaticHingePairVariables(raweqns,var0,var1,othersolvedvars,unknownvars=curvars+unknownvars)
for solution in rawsolutions:
#solution.subs(freevarinvsubs)
self.ComputeSolutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,Symbol(solution.jointname)))
if len(rawsolutions) > 0: # solving a pair is rare, so any solution will do
break
except self.CannotSolveError:
pass
for var0,var1,raweqns,complexity in curvarsubssol:
try:
rawsolutions=self.SolvePairVariables(raweqns,var0,var1,othersolvedvars,unknownvars=curvars+unknownvars)
except self.CannotSolveError, e:
log.debug(e)
# try:
# rawsolutions=self.SolvePrismaticHingePairVariables(raweqns,var0,var1,othersolvedvars,unknownvars=curvars+unknownvars)
# except self.CannotSolveError, e:
# log.debug(e)
rawsolutions = []
for solution in rawsolutions:
#solution.subs(freevarinvsubs)
self.ComputeSolutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,Symbol(solution.jointname)))
if len(rawsolutions) > 0: # solving a pair is rare, so any solution will do
break
# take the least complex solution and go on
if len(solutions) > 0:
return self.AddSolution(solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
# test with higher degrees, necessary?
for curvar in curvars:
othervars = unknownvars+[var for var in curvars if var != curvar]
raweqns = []
for e in AllEquations:
if (len(othervars) == 0 or not e.has(*othervars)) and e.has(curvar):
eq = e.subs(self.freevarsubs+solsubs)
if self.CheckExpressionUnique(raweqns,eq):
raweqns.append(eq)
for raweqn in raweqns:
try:
log.info('testing with higher degrees')
solution=self.solveHighDegreeEquationsHalfAngle([raweqn],self.Variable(curvar))
self.ComputeSolutionComplexity(solution,othersolvedvars,curvars)
solutions.append((solution,curvar))
except self.CannotSolveError:
pass
if len(solutions) > 0:
return self.AddSolution(solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
# solve with all 3 variables together?
# perhaps there's a degree of freedom that is not trivial to compute?
# take the highest hinge variable and set it
return self.GuessValuesAndSolveEquations(AllEquations, curvars, othersolvedvars, solsubs, endbranchtree, currentcases, unknownvars, currentcasesubs)
# # have got this far, so perhaps two axes are aligned?
# raise self.CannotSolveError('SolveAllEquations failed to find a variable to solve')
def _SubstituteGlobalSymbols(self, eq, globalsymbols=None):
if globalsymbols is None:
globalsymbols = self.globalsymbols
preveq = eq
neweq = preveq.subs(globalsymbols)
while preveq != neweq:
if not self.isValidSolution(neweq):
raise self.CannotSolveError('equation %r is not valid'%neweq)
preveq = neweq
neweq = preveq.subs(globalsymbols)
return neweq
def _AddToGlobalSymbols(self, var, eq):
"""adds to the global symbols, returns True if replaced with an existing entry
"""
for iglobal, gvarexpr in enumerate(self.globalsymbols):
if var == gvarexpr[0]:
self.globalsymbols[iglobal] = (var, eq)
return True
self.globalsymbols.append((var, eq))
return False
def AddSolution(self,solutions,AllEquations,curvars,othersolvedvars,solsubs,endbranchtree, currentcases=None, currentcasesubs=None, unknownvars=None):
"""Take the least complex solution of a set of solutions and resume solving
"""
self._scopecounter += 1
scopecounter = int(self._scopecounter)
solutions = [s for s in solutions if s[0].score < oo and s[0].checkValidSolution()] # remove infinite scores
if len(solutions) == 0:
raise self.CannotSolveError('no valid solutions')
if unknownvars is None:
unknownvars = []
solutions.sort(lambda x, y: x[0].score-y[0].score)
hasonesolution = False
for solution in solutions:
checkforzeros = solution[0].checkforzeros
hasonesolution |= solution[0].numsolutions() == 1
if len(checkforzeros) == 0 and solution[0].numsolutions() == 1:
# did find a good solution, so take it. Make sure to check any zero branches
var = solution[1]
newvars=curvars[:]
newvars.remove(var)
return [solution[0].subs(solsubs)]+self.SolveAllEquations(AllEquations,curvars=newvars,othersolvedvars=othersolvedvars+[var],solsubs=solsubs+self.Variable(var).subs,endbranchtree=endbranchtree, currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
if not hasonesolution:
# check again except without the number of solutions requirement
for solution in solutions:
checkforzeros = solution[0].checkforzeros
if len(checkforzeros) == 0:
# did find a good solution, so take it. Make sure to check any zero branches
var = solution[1]
newvars=curvars[:]
newvars.remove(var)
return [solution[0].subs(solsubs)]+self.SolveAllEquations(AllEquations,curvars=newvars,othersolvedvars=othersolvedvars+[var],solsubs=solsubs+self.Variable(var).subs,endbranchtree=endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
originalGlobalSymbols = self.globalsymbols
# all solutions have check for zero equations
# choose the variable with the shortest solution and compute (this is a conservative approach)
usedsolutions = []
# remove any solutions with similar checkforzero constraints (because they are essentially the same)
for solution,var in solutions:
solution.subs(solsubs)
if len(usedsolutions) == 0:
usedsolutions.append((solution,var))
else:
match = False
for usedsolution,usedvar in usedsolutions:
if len(solution.checkforzeros) == len(usedsolution.checkforzeros):
if not any([self.CheckExpressionUnique(usedsolution.checkforzeros,eq) for eq in solution.checkforzeros]):
match = True
break
if not match:
usedsolutions.append((solution,var))
if len(usedsolutions) >= 3:
# don't need more than three alternatives (used to be two, but then lookat barrettwam4 proved that wrong)
break
nextsolutions = dict()
allvars = []
for v in curvars:
allvars += self.Variable(v).vars
allothersolvedvars = []
for v in othersolvedvars:
allothersolvedvars += self.Variable(v).vars
lastbranch = []
prevbranch=lastbranch
if currentcases is None:
currentcases = set()
if currentcasesubs is None:
currentcasesubs = list()
if self.degeneratecases is None:
self.degeneratecases = self.DegenerateCases()
handledconds = self.degeneratecases.gethandledconds(currentcases)
# one to one correspondence with usedsolutions and the SolverCheckZeros hierarchies (used for cross product of equations later on)
zerosubstitutioneqs = []
# zerosubstitutioneqs equations flattened for easier checking
flatzerosubstitutioneqs = []
hascheckzeros = False
# iterate in reverse order and put the most recently processed solution at the front.
# There is a problem with this algorithm transferring the degenerate cases correctly.
# Although the zeros of the first equation are checked, they are not added as conditions
# to the later equations, so that the later equations will also use variables as unknowns (even though they are determined to be specific constants). This is most apparent in rotations.
for solution,var in usedsolutions[::-1]:
# there are divide by zeros, so check if they can be explicitly solved for joint variables
checkforzeros = []
localsubstitutioneqs = []
for checkzero in solution.checkforzeros:
if checkzero.has(*allvars):
log.info('ignoring special check for zero since it has symbols %s: %s',str(allvars),str(checkzero))
continue
# bother trying to extract something if too complex (takes a lot of computation time to check and most likely nothing will be extracted). 100 is an arbitrary value
checkzeroComplexity = self.codeComplexity(checkzero)
if checkzeroComplexity > 120:
log.warn('checkforzero too big (%d): %s', checkzeroComplexity, checkzero)
# don't even add it if it is too big
if checkzeroComplexity < 500:
checkforzeros.append(checkzero)#self.removecommonexprs(checkzero.evalf(),onlygcd=False,onlynumbers=True))
else:
checkzero = self._SubstituteGlobalSymbols(checkzero)
# fractions could get big, so evaluate directly
checkzeroeval = checkzero.evalf()
if checkzeroComplexity < self.codeComplexity(checkzeroeval):
checkforzeros.append(checkzero)
else:
checkforzeros.append(checkzero.evalf())#self.removecommonexprs(checkzero.evalf(),onlygcd=False,onlynumbers=True)
checksimplezeroexprs = [checkzero]
if not checkzero.has(*allothersolvedvars):
sumsquaresexprs = self._GetSumSquares(checkzero)
if sumsquaresexprs is not None:
checksimplezeroexprs += sumsquaresexprs
sumsquaresexprstozero = []
for sumsquaresexpr in sumsquaresexprs:
if sumsquaresexpr.is_Symbol:
sumsquaresexprstozero.append(sumsquaresexpr)
elif sumsquaresexpr.is_Mul:
for arg in sumsquaresexpr.args:
if arg.is_Symbol:
sumsquaresexprstozero.append(arg)
if len(sumsquaresexprstozero) > 0:
localsubstitutioneqs.append([sumsquaresexprstozero,checkzero,[(sumsquaresexpr,S.Zero) for sumsquaresexpr in sumsquaresexprstozero], []])
for checksimplezeroexpr in checksimplezeroexprs:
#if checksimplezeroexpr.has(*othersolvedvars): # cannot do this check since sjX,cjX might be used
for othervar in othersolvedvars:
sothervar = self.Variable(othervar).svar
cothervar = self.Variable(othervar).cvar
if checksimplezeroexpr.has(othervar,sothervar,cothervar):
# the easiest thing to check first is if the equation evaluates to zero on boundaries 0,pi/2,pi,-pi/2
s = AST.SolverSolution(othervar.name,jointeval=[],isHinge=self.IsHinge(othervar.name))
for value in [S.Zero,pi/2,pi,-pi/2]:
try:
checkzerosub=checksimplezeroexpr.subs([(othervar,value),(sothervar,sin(value).evalf(n=30)),(cothervar,cos(value).evalf(n=30))])
if self.isValidSolution(checkzerosub) and checkzerosub.evalf(n=30) == S.Zero:
if s.jointeval is None:
s.jointeval = []
s.jointeval.append(S.One*value)
except AssertionError,e:
log.warn('othervar %s=%f: %s',str(othervar),value,e)
if s.jointeval is not None and len(s.jointeval) > 0:
ss = [s]
else:
ss = []
try:
# checksimplezeroexpr can be simple like -cj4*r21 - r20*sj4
# in which case the solutions would be [-atan2(-r21, -r20), -atan2(-r21, -r20) + 3.14159265358979]
ss += self.solveSingleVariable([checksimplezeroexpr.subs([(sothervar,sin(othervar)),(cothervar,cos(othervar))])],othervar,othersolvedvars)
except PolynomialError:
# checksimplezeroexpr was too complex
pass
except self.CannotSolveError,e:
# this is actually a little tricky, sometimes really good solutions can have a divide that looks like:
# ((0.405 + 0.331*cj2)**2 + 0.109561*sj2**2 (manusarm_left)
# This will never be 0, but the solution cannot be solved. Instead of rejecting, add a condition to check if checksimplezeroexpr itself is 0 or not
pass
for s in ss:
# can actually simplify Positions and possibly get a new solution!
if s.jointeval is not None:
for eq in s.jointeval:
eq = self._SubstituteGlobalSymbols(eq)
# why checking for just number? ok to check if solution doesn't contain any other variableS?
# if the equation is non-numerical, make sure it isn't deep in the degenerate cases
if eq.is_number or (len(currentcases) <= 1 and not eq.has(*allothersolvedvars) and self.codeComplexity(eq) < 100):
isimaginary = self.AreAllImaginaryByEval(eq)
# TODO should use the fact that eq is imaginary
if isimaginary:
log.warn('eq %s is imaginary, but currently do not support this', eq)
continue
dictequations = []
if not eq.is_number and not eq.has(*allothersolvedvars):
# not dependent on variables, so it could be in the form of atan(px,py), so convert to a global symbol since it never changes
sym = self.gsymbolgen.next()
dictequations.append((sym,eq))
#eq = sym
sineq = self.gsymbolgen.next()
dictequations.append((sineq,self.SimplifyAtan2(sin(eq))))
coseq = self.gsymbolgen.next()
dictequations.append((coseq,self.SimplifyAtan2(cos(eq))))
else:
sineq = sin(eq).evalf(n=30)
coseq = cos(eq).evalf(n=30)
cond=Abs(othervar-eq.evalf(n=30))
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs+localsubstitutioneqs])),cond):
if self.IsHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,sineq),(sin(othervar),sineq),(cothervar,coseq),(cos(othervar),coseq),(othervar,eq)], dictequations])
elif s.jointevalsin is not None:
for eq in s.jointevalsin:
eq = self.SimplifyAtan2(self._SubstituteGlobalSymbols(eq))
if eq.is_number or (len(currentcases) <= 1 and not eq.has(*allothersolvedvars) and self.codeComplexity(eq) < 100):
dictequations = []
# test when cos(othervar) > 0
# don't use asin(eq)!! since eq = (-pz**2/py**2)**(1/2), which would produce imaginary numbers
#cond=othervar-asin(eq).evalf(n=30)
# test if eq is imaginary, if yes, then only solution is when sothervar==0 and eq==0
isimaginary = self.AreAllImaginaryByEval(eq)
if isimaginary:
cond = abs(sothervar) + abs((eq**2).evalf(n=30)) + abs(sign(cothervar)-1)
else:
if not eq.is_number and not eq.has(*allothersolvedvars):
# not dependent on variables, so it could be in the form of atan(px,py), so convert to a global symbol since it never changes
sym = self.gsymbolgen.next()
dictequations.append((sym,eq))
#eq = sym
cond=abs(sothervar-eq.evalf(n=30)) + abs(sign(cothervar)-1)
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs+localsubstitutioneqs])),cond):
if self.IsHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
if isimaginary:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,S.Zero),(sin(othervar),S.Zero),(cothervar,S.One),(cos(othervar),S.One),(othervar,S.One)], dictequations])
else:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,eq),(sin(othervar),eq),(cothervar,sqrt(1-eq*eq).evalf(n=30)),(cos(othervar),sqrt(1-eq*eq).evalf(n=30)),(othervar,asin(eq).evalf(n=30))], dictequations])
# test when cos(othervar) < 0
if isimaginary:
cond = abs(sothervar) + abs((eq**2).evalf(n=30)) + abs(sign(cothervar)+1)
else:
cond=abs(sothervar-eq.evalf(n=30))+abs(sign(cothervar)+1)
#cond=othervar-(pi-asin(eq).evalf(n=30))
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs+localsubstitutioneqs])),cond):
if self.IsHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
if isimaginary:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,S.Zero),(sin(othervar),S.Zero),(cothervar,-S.One),(cos(othervar),-S.One),(othervar,pi.evalf(n=30))], dictequations])
else:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,eq),(sin(othervar),eq),(cothervar,-sqrt(1-eq*eq).evalf(n=30)),(cos(othervar),-sqrt(1-eq*eq).evalf(n=30)),(othervar,(pi-asin(eq)).evalf(n=30))], dictequations])
elif s.jointevalcos is not None:
for eq in s.jointevalcos:
eq = self.SimplifyAtan2(self._SubstituteGlobalSymbols(eq))
if eq.is_number or (len(currentcases) <= 1 and not eq.has(*allothersolvedvars) and self.codeComplexity(eq) < 100):
dictequations = []
# test when sin(othervar) > 0
# don't use acos(eq)!! since eq = (-pz**2/px**2)**(1/2), which would produce imaginary numbers
#cond=othervar-acos(eq).evalf(n=30)
isimaginary = self.AreAllImaginaryByEval(eq)
if isimaginary:
cond=abs(cothervar)+abs((eq**2).evalf(n=30)) + abs(sign(sothervar)-1)
else:
if not eq.is_number and not eq.has(*allothersolvedvars):
# not dependent on variables, so it could be in the form of atan(px,py), so convert to a global symbol since it never changes
sym = self.gsymbolgen.next()
originalGlobalSymbols.append((sym,eq))
eq = sym
cond=abs(cothervar-eq.evalf(n=30)) + abs(sign(sothervar)-1)
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs+localsubstitutioneqs])),cond):
if self.IsHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
if isimaginary:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,S.One),(sin(othervar),S.One),(cothervar,S.Zero),(cos(othervar),S.Zero),(othervar,(pi/2).evalf(n=30))], dictequations])
else:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,sqrt(1-eq*eq).evalf(n=30)),(sin(othervar),sqrt(1-eq*eq).evalf(n=30)),(cothervar,eq),(cos(othervar),eq),(othervar,acos(eq).evalf(n=30))], dictequations])
#cond=othervar+acos(eq).evalf(n=30)
if isimaginary:
cond=abs(cothervar)+abs((eq**2).evalf(n=30)) + abs(sign(sothervar)+1)
else:
cond=abs(cothervar-eq.evalf(n=30)) + abs(sign(sothervar)+1)
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs+localsubstitutioneqs])),cond):
if self.IsHinge(othervar.name):
evalcond=fmod(cond+pi,2*pi)-pi
else:
evalcond=cond
if isimaginary:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,-S.One),(sin(othervar),-S.One),(cothervar,S.Zero),(cos(othervar),S.Zero),(othervar,(-pi/2).evalf(n=30))], dictequations])
else:
localsubstitutioneqs.append([[cond],evalcond,[(sothervar,-sqrt(1-eq*eq).evalf(n=30)),(sin(othervar),-sqrt(1-eq*eq).evalf(n=30)),(cothervar,eq),(cos(othervar),eq),(othervar,-acos(eq).evalf(n=30))], dictequations])
flatzerosubstitutioneqs += localsubstitutioneqs
zerosubstitutioneqs.append(localsubstitutioneqs)
if not var in nextsolutions:
try:
newvars=curvars[:]
newvars.remove(var)
olddegeneratecases = self.degeneratecases
self.degeneratecases = olddegeneratecases.clone()
nextsolutions[var] = self.SolveAllEquations(AllEquations,curvars=newvars,othersolvedvars=othersolvedvars+[var],solsubs=solsubs+self.Variable(var).subs,endbranchtree=endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
finally:
self.degeneratecases = olddegeneratecases
if len(checkforzeros) > 0:
hascheckzeros = True
solvercheckzeros = AST.SolverCheckZeros(jointname=var.name,jointcheckeqs=checkforzeros,nonzerobranch=[solution]+nextsolutions[var],zerobranch=prevbranch,anycondition=True,thresh=solution.GetZeroThreshold())
# have to transfer the dictionary!
solvercheckzeros.dictequations = originalGlobalSymbols + solution.dictequations
solvercheckzeros.equationsused = AllEquations
solution.dictequations = []
prevbranch=[solvercheckzeros]
else:
prevbranch = [solution]+nextsolutions[var]
if len(prevbranch) == 0:
raise self.CannotSolveError('failed to add solution!')
if len(currentcases) >= self.maxcasedepth:
log.warn('c=%d, %d levels deep in checking degenerate cases, skipping...: %r', scopecounter, self.maxcasedepth, AllEquations)
lastbranch.append(AST.SolverBreak('%d cases reached'%self.maxcasedepth, [(var,self.SimplifyAtan2(self._SubstituteGlobalSymbols(eq))) for var, eq in currentcasesubs], othersolvedvars, solsubs, originalGlobalSymbols, endbranchtree))
return prevbranch
# fill the last branch with all the zero conditions
if hascheckzeros:
# count the number of rotation symbols seen in the current cases
numRotSymbolsInCases = 0
if self._iktype == 'transform6d' or self._iktype == 'rotation3d':
rotsymbols = set(self.Tee[:3,:3]).union([Symbol('new_r00'), Symbol('new_r01'), Symbol('new_r02'), Symbol('new_r10'), Symbol('new_r11'), Symbol('new_r12'), Symbol('new_r20'), Symbol('new_r21'), Symbol('new_r22')])
for var, eq in currentcasesubs:
if var in rotsymbols:
numRotSymbolsInCases += 1
else:
rotsymbols = []
# if not equations found, try setting two variables at once
# also try setting px, py, or pz to 0 (barrettwam4 lookat)
# sometimes can get the following: cj3**2*sj4**2 + cj4**2
for isolution,(solution,var) in enumerate(usedsolutions[::-1]):
localsubstitutioneqs = []
for checkzero in solution.checkforzeros:
if checkzero.has(*allvars):
log.info('ignoring special check for zero 2 since it has symbols %s: %s',str(allvars), str(checkzero))
continue
# don't bother trying to extract something if too complex (takes a lot of computation time to check and most likely nothing will be extracted). 120 is an arbitrary value
if self.codeComplexity(checkzero) > 120:
continue
possiblesubs = []
ishinge = []
for preal in self.Tee[:3,3]:
if checkzero.has(preal):
possiblesubs.append([(preal,S.Zero)])
ishinge.append(False)
# have to be very careful with the rotations since they are dependent on each other. For example if r00 and r01 are both 0, then r02 and r20 can never be 0 and either r10 or r11 has to be non-zero.
if numRotSymbolsInCases < 2:
for preal in rotsymbols:
if checkzero.has(preal):
possiblesubs.append([(preal,S.Zero)])
ishinge.append(False)
for othervar in othersolvedvars:
othervarobj = self.Variable(othervar)
if checkzero.has(*othervarobj.vars):
if not self.IsHinge(othervar.name):
possiblesubs.append([(othervar,S.Zero)])
ishinge.append(False)
continue
else:
sothervar = othervarobj.svar
cothervar = othervarobj.cvar
for value in [S.Zero,pi/2,pi,-pi/2]:
possiblesubs.append([(othervar,value),(sothervar,sin(value).evalf(n=30)),(sin(othervar),sin(value).evalf(n=30)), (cothervar,cos(value).evalf(n=30)), (cos(othervar),cos(value).evalf(n=30))])
ishinge.append(True)
# all possiblesubs are present in checkzero
for ipossiblesub, possiblesub in enumerate(possiblesubs):
eq = checkzero.subs(possiblesub).evalf(n=30)
if not self.isValidSolution(eq):
continue
# only take the first index
possiblevar,possiblevalue = possiblesub[0]
cond = Abs(possiblevar-possiblevalue.evalf(n=30))
if ishinge[ipossiblesub]:
evalcond = Abs(fmod(possiblevar-possiblevalue+pi,2*pi)-pi)
else:
evalcond = cond
if eq == S.Zero:
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs])),cond):
log.info('c=%d, adding case %s=%s in %s', scopecounter, possiblevar, possiblevalue,checkzero)
# if the variable is 1 and part of the rotation matrix, can deduce other variables
if possiblevar in rotsymbols and (possiblevalue == S.One or possiblevalue == -S.One):
row1 = int(possiblevar.name[-2])
col1 = int(possiblevar.name[-1])
possiblesub.append((Symbol('%s%d%d'%(possiblevar.name[:-2], row1, (col1+1)%3)), S.Zero))
possiblesub.append((Symbol('%s%d%d'%(possiblevar.name[:-2], row1, (col1+2)%3)), S.Zero))
possiblesub.append((Symbol('%s%d%d'%(possiblevar.name[:-2], (row1+1)%3, col1)), S.Zero))
possiblesub.append((Symbol('%s%d%d'%(possiblevar.name[:-2], (row1+2)%3, col1)), S.Zero))
checkexpr = [[cond],evalcond,possiblesub, []]
flatzerosubstitutioneqs.append(checkexpr)
localsubstitutioneqs.append(checkexpr)
continue
# try another possiblesub
for ipossiblesub2, possiblesub2 in enumerate(possiblesubs[ipossiblesub+1:]):
eq2 = eq.subs(possiblesub2).evalf(n=30)
if not self.isValidSolution(eq2):
continue
if eq2 == S.Zero:
possiblevar2,possiblevalue2 = possiblesub2[0]
cond2 = Abs(possiblevar2-possiblevalue2.evalf(n=30))
if ishinge[ipossiblesub+ipossiblesub2+1]:
evalcond2 = Abs(fmod(possiblevar2-possiblevalue2+pi,2*pi)-pi) + evalcond
else:
evalcond2 = cond2 + evalcond
cond2 += cond
if self.CheckExpressionUnique(handledconds+list(chain.from_iterable([tempeq[0] for tempeq in flatzerosubstitutioneqs])),cond2):
checkexpr = [[cond2],evalcond2,possiblesub+possiblesub2, []]
flatzerosubstitutioneqs.append(checkexpr)
localsubstitutioneqs.append(checkexpr)
# if the variables are both part of the rotation matrix and both zeros, can deduce other rotation variables
if self._iktype == 'transform6d' and possiblevar in rotsymbols and possiblevalue == S.Zero and possiblevar2 in rotsymbols and possiblevalue2 == S.Zero:
row1 = int(possiblevar.name[-2])
col1 = int(possiblevar.name[-1])
row2 = int(possiblevar2.name[-2])
col2 = int(possiblevar2.name[-1])
row3 = 3 - row1 - row2
col3 = 3 - col1 - col2
if row1 == row2:
# (row1, col3) is either 1 or -1, but don't know which.
# know that (row1+1,col3) and (row1+2,col3) are zero though...
checkexpr[2].append((Symbol('%s%d%d'%(possiblevar.name[:-2], (row2+1)%3, col3)), S.Zero))
checkexpr[2].append((Symbol('%s%d%d'%(possiblevar.name[:-2], (row1+2)%3, col3)), S.Zero))
# furthermore can defer that the left over 4 values are [cos(ang), sin(ang), cos(ang), -sin(ang)] = abcd
if row1 == 1:
minrow = 0
maxrow = 2
else:
minrow = (row1+1)%3
maxrow = (row1+2)%3
ra = Symbol('%s%d%d'%(possiblevar.name[:-2], minrow, col1))
rb = Symbol('%s%d%d'%(possiblevar.name[:-2], minrow, col2))
rc = Symbol('%s%d%d'%(possiblevar.name[:-2], maxrow, col1))
rd = Symbol('%s%d%d'%(possiblevar.name[:-2], maxrow, col2))
checkexpr[2].append((rb**2, S.One-ra**2))
checkexpr[2].append((rb**3, rb-rb*ra**2)) # need 3rd power since sympy cannot divide out the square
checkexpr[2].append((rc**2, S.One-ra**2))
checkexpr[2].append((rc, -rb))
checkexpr[2].append((rd, ra))
elif col1 == col2:
# (row3, col1) is either 1 or -1, but don't know which.
# know that (row3,col1+1) and (row3,col1+2) are zero though...
checkexpr[2].append((Symbol('%s%d%d'%(possiblevar.name[:-2], row3, (col1+1)%3)), S.Zero))
checkexpr[2].append((Symbol('%s%d%d'%(possiblevar.name[:-2], row3, (col1+2)%3)), S.Zero))
# furthermore can defer that the left over 4 values are [cos(ang), sin(ang), cos(ang), -sin(ang)] = abcd
if col1 == 1:
mincol = 0
maxcol = 2
else:
mincol = (col1+1)%3
maxcol = (col1+2)%3
ra = Symbol('%s%d%d'%(possiblevar.name[:-2], row1, mincol))
rb = Symbol('%s%d%d'%(possiblevar.name[:-2], row2, mincol))
rc = Symbol('%s%d%d'%(possiblevar.name[:-2], row1, maxcol))
rd = Symbol('%s%d%d'%(possiblevar.name[:-2], row2, maxcol))
checkexpr[2].append((rb**2, S.One-ra**2))
checkexpr[2].append((rb**3, rb-rb*ra**2)) # need 3rd power since sympy cannot divide out the square
checkexpr[2].append((rc**2, S.One-ra**2))
checkexpr[2].append((rc, -rb))
checkexpr[2].append((rd, ra))
log.info('dual constraint %r in %s', checkexpr[2],checkzero)
zerosubstitutioneqs.append(localsubstitutioneqs)
# test the solutions
# have to take the cross product of all the zerosubstitutioneqs in order to form stronger constraints on the equations because the following condition will be executed only if all SolverCheckZeros evalute to 0
zerobranches = []
accumequations = []
# since sequence_cross_product requires all lists to be non-empty, insert None for empty lists
for conditioneqs in zerosubstitutioneqs:
if len(conditioneqs) == 0:
conditioneqs.append(None)
for conditioneqs in self.sequence_cross_product(*zerosubstitutioneqs):
validconditioneqs = [c for c in conditioneqs if c is not None]
if len(validconditioneqs) > 1:
# merge the equations, be careful not to merge equations constraining the same variable
cond = []
evalcond = S.Zero
othervarsubs = []
dictequations = []
duplicatesub = False
for subcond, subevalcond, subothervarsubs, subdictequations in validconditioneqs:
cond += subcond
evalcond += abs(subevalcond)
for subothervarsub in subothervarsubs:
if subothervarsub[0] in [sym for sym,value in othervarsubs]:
# variable is duplicated
duplicatesub = True
break
othervarsubs.append(subothervarsub)
if duplicatesub:
break
dictequations += subdictequations
if not duplicatesub:
flatzerosubstitutioneqs.append([cond,evalcond,othervarsubs,dictequations])
if self._iktype == 'transform6d' or self._iktype == 'rotation3d':
trysubstitutions = self.ppsubs+self.npxyzsubs+self.rxpsubs
else:
trysubstitutions = self.ppsubs
for cond, evalcond, othervarsubs, dictequations in flatzerosubstitutioneqs:
# have to convert to fractions before substituting!
if not all([self.isValidSolution(v) for s,v in othervarsubs]):
continue
othervarsubs = [(s,self.ConvertRealToRationalEquation(v)) for s,v in othervarsubs]
NewEquations = [eq.subs(othervarsubs) for eq in AllEquations]
NewEquationsClean = self.PropagateSolvedConstants(NewEquations, othersolvedvars, curvars)
try:
# forcing a value, so have to check if all equations in NewEquations that do not contain
# unknown variables are really 0
extrazerochecks=[]
for i in range(len(NewEquations)):
expr = NewEquations[i]
if not self.isValidSolution(expr):
log.warn('not valid: %s',expr)
extrazerochecks=None
break
if not expr.has(*allvars) and self.CheckExpressionUnique(extrazerochecks,expr):
extrazerochecks.append(expr.subs(solsubs).evalf(n=30))
if extrazerochecks is not None:
newcases = set(currentcases)
for singlecond in cond:
newcases.add(singlecond)
if not self.degeneratecases.hascases(newcases):
log.info('c=%d, starting newcases: %r', scopecounter, newcases)
if len(NewEquationsClean) > 0:
newcasesubs = currentcasesubs+othervarsubs
self.globalsymbols = []
for casesub in newcasesubs:
self._AddToGlobalSymbols(casesub[0], casesub[1])
extradictequations = []
for s,v in trysubstitutions:
neweq = v.subs(newcasesubs)
if neweq != v:
# should we make sure we're not adding it a second time?
newcasesubs.append((s, neweq))
extradictequations.append((s, neweq))
self._AddToGlobalSymbols(s, neweq)
for var, eq in chain(originalGlobalSymbols, dictequations):
neweq = eq.subs(othervarsubs)
if not self.isValidSolution(neweq):
raise self.CannotSolveError('equation %s is invalid because of the following substitutions: %s'%(eq, othervarsubs))
if neweq == S.Zero:
extradictequations.append((var, S.Zero))
self._AddToGlobalSymbols(var, neweq)
if len(extradictequations) > 0:
# have to re-substitute since some equations evaluated to zero
NewEquationsClean = [eq.subs(extradictequations) for eq in NewEquationsClean]
newtree = self.SolveAllEquations(NewEquationsClean,curvars,othersolvedvars,solsubs,endbranchtree,currentcases=newcases, currentcasesubs=newcasesubs, unknownvars=unknownvars)
accumequations.append(NewEquationsClean) # store the equations for debugging purposes
else:
log.info('there are no new equations, so most likely the following variables can be freely determined: %r', curvars)
# unfortunately cannot add as a FreeVariable since all the left over variables will have complex dependencies
# therefore, iterate a couple of jointevals
newtree = []
for curvar in curvars:
newtree.append(AST.SolverSolution(curvar.name, jointeval=[S.Zero,pi/2,pi,-pi/2], isHinge=self.IsHinge(curvar.name)))
newtree += endbranchtree
zerobranches.append(([evalcond]+extrazerochecks,newtree,dictequations))
log.info('c=%d, adding newcases: %r', scopecounter, newcases)
self.degeneratecases.addcases(newcases)
else:
log.warn('already has handled cases %r', newcases)
except self.CannotSolveError, e:
log.debug(e)
continue
finally:
# restore the global symbols
self.globalsymbols = originalGlobalSymbols
if len(zerobranches) > 0:
branchconds = AST.SolverBranchConds(zerobranches+[(None,[AST.SolverBreak('branch miss %r'%curvars, [(var,self._SubstituteGlobalSymbols(eq)) for var, eq in currentcasesubs], othersolvedvars, solsubs, originalGlobalSymbols, endbranchtree)],[])])
branchconds.accumequations = accumequations
lastbranch.append(branchconds)
else:
# add GuessValuesAndSolveEquations?
lastbranch.append(AST.SolverBreak('no branches %r'%curvars, [(var,self.SimplifyAtan2(self._SubstituteGlobalSymbols(eq))) for var, eq in currentcasesubs], othersolvedvars, solsubs, originalGlobalSymbols, endbranchtree))
return prevbranch
def GuessValuesAndSolveEquations(self, AllEquations, curvars, othersolvedvars, solsubs, endbranchtree, currentcases=None, unknownvars=None, currentcasesubs=None):
# perhaps there's a degree of freedom that is not trivial to compute?
# take the highest hinge variable and set it
scopecounter = int(self._scopecounter)
hingevariables = [curvar for curvar in sorted(curvars,reverse=True) if self.IsHinge(curvar.name)]
if len(hingevariables) > 0 and len(curvars) >= 2:
curvar = hingevariables[0]
leftovervars = list(curvars)
leftovervars.remove(curvar)
newtree = [AST.SolverConditionedSolution([])]
zerovalues = []
for jointeval in [S.Zero,pi/2,pi,-pi/2]:
checkzeroequations = []
NewEquations = []
for eq in AllEquations:
neweq = eq.subs(curvar, jointeval)
neweqeval = neweq.evalf()
if neweq.is_number:
# if zero, then can ignore
if neweq == S.Zero:
continue
# if not zero, then a contradiciton, so jointeval is bad
NewEquations = None
break
if neweq.has(*leftovervars):
NewEquations.append(neweq)
else:
checkzeroequations.append(neweq)
if NewEquations is None:
continue
# check to make sure all leftover vars are in scope
cansolve = True
for leftovervar in leftovervars:
if not any([eq.has(leftovervar) for eq in NewEquations]):
cansolve = False
break
if not cansolve:
continue
if len(checkzeroequations) > 0:
solution = AST.SolverSolution(curvar.name, jointeval=[jointeval], isHinge=self.IsHinge(curvar.name))
solution.checkforzeros = checkzeroequations
solution.FeasibleIsZeros = True
newtree[0].solversolutions.append(solution)
else:
# one value is enough
zerovalues.append(jointeval)
if len(zerovalues) > 0:
# prioritize these solutions since they don't come with any extra checks
solution = AST.SolverSolution(curvar.name, jointeval=zerovalues, isHinge=self.IsHinge(curvar.name))
solution.FeasibleIsZeros = True
newtree = [solution]
elif len(newtree[0].solversolutions) == 0:
# nothing found so remove the condition node
newtree = []
if len(newtree) > 0:
log.warn('c=%d, think there is a free variable, but cannot solve relationship, so setting variable %s', scopecounter, curvar)
newtree += self.SolveAllEquations(AllEquations, leftovervars, othersolvedvars+[curvar], solsubs+self.Variable(curvar).subs, endbranchtree,currentcases=currentcases, currentcasesubs=currentcasesubs, unknownvars=unknownvars)
return newtree
raise self.CannotSolveError('cannot find a good variable')
def SolvePairVariablesHalfAngle(self,raweqns,var0,var1,othersolvedvars,subs=None):
"""solves equations of two variables in sin and cos
"""
varsym0 = self.Variable(var0)
varsym1 = self.Variable(var1)
varsyms = [varsym0,varsym1]
unknownvars=[varsym0.cvar,varsym0.svar,varsym1.cvar,varsym1.svar]
varsubs=varsym0.subs+varsym1.subs
varsubsinv = varsym0.subsinv+varsym1.subsinv
halftansubs = []
for varsym in varsyms:
halftansubs += [(varsym.cvar,(1-varsym.htvar**2)/(1+varsym.htvar**2)),(varsym.svar,2*varsym.htvar/(1+varsym.htvar**2))]
dummyvars = []
for othervar in othersolvedvars:
v = self.Variable(othervar)
dummyvars += [v.cvar,v.svar,v.var,v.htvar]
polyeqs = []
for eq in raweqns:
trigsubs = [(varsym0.svar**2,1-varsym0.cvar**2), (varsym0.svar**3,varsym0.svar*(1-varsym0.cvar**2)), (varsym1.svar**2,1-varsym1.cvar**2), (varsym1.svar**3,varsym1.svar*(1-varsym1.cvar**2))]
peq = Poly(eq.subs(varsubs).subs(trigsubs).expand().subs(trigsubs),*unknownvars)
if peq.has(varsym0.var) or peq.has(varsym1.var):
raise self.CannotSolveError('expecting only sin and cos! %s'%peq)
maxmonoms = [0,0,0,0]
maxdenom = [0,0]
for monoms in peq.monoms():
for i in range(4):
maxmonoms[i] = max(maxmonoms[i],monoms[i])
maxdenom[0] = max(maxdenom[0],monoms[0]+monoms[1])
maxdenom[1] = max(maxdenom[1],monoms[2]+monoms[3])
eqnew = S.Zero
for monoms,c in peq.terms():
term = c
for i in range(4):
num,denom = fraction(halftansubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
for i in [0,2]:
denom = fraction(halftansubs[i][1])[1]
term *= denom**(maxdenom[i/2]-monoms[i]-monoms[i+1])
complexityvalue = self.codeComplexity(term.expand())
if complexityvalue < 1000:
eqnew += simplify(term)
else:
# too big, so don't simplify?
eqnew += term
polyeq = Poly(eqnew,varsym0.htvar,varsym1.htvar)
if polyeq.TC() == S.Zero:
# might be able to divide out variables?
minmonoms = None
for monom in polyeq.monoms():
if minmonoms is None:
minmonoms = list(monom)
else:
for i in range(len(minmonoms)):
minmonoms[i] = min(minmonoms[i],monom[i])
newpolyeq = Poly(S.Zero,*polyeq.gens)
for m,c in polyeq.terms():
newm = list(m)
for i in range(len(minmonoms)):
newm[i] -= minmonoms[i]
newpolyeq = newpolyeq.add(Poly.from_dict({tuple(newm):c},*newpolyeq.gens))
log.warn('converting polyeq "%s" to "%s"'%(polyeq,newpolyeq))
# check if any equations are only in one variable
polyeq = newpolyeq
polyeqs.append(polyeq)
try:
return self.solveSingleVariable(self.sortComplexity([e.as_expr() for e in polyeqs if not e.has(varsym1.htvar)]),varsym0.var,othersolvedvars,unknownvars=[])
except self.CannotSolveError:
pass
try:
return self.solveSingleVariable(self.sortComplexity([e.as_expr() for e in polyeqs if not e.has(varsym0.htvar)]),varsym1.var,othersolvedvars,unknownvars=[])
except self.CannotSolveError:
pass
complexity = [(self.codeComplexity(peq.as_expr()),peq) for peq in polyeqs]
complexity.sort(key=itemgetter(0))
polyeqs = [peq[1] for peq in complexity]
solutions = [None,None]
linearsolution = None
for ileftvar in range(2):
if linearsolution is not None:
break
leftvar = varsyms[ileftvar].htvar
newpolyeqs = [Poly(eq,varsyms[1-ileftvar].htvar) for eq in polyeqs]
mindegree = __builtin__.min([max(peq.degree_list()) for peq in newpolyeqs])
maxdegree = __builtin__.max([max(peq.degree_list()) for peq in newpolyeqs])
for peq in newpolyeqs:
if len(peq.monoms()) == 1:
possiblefinaleq = self.checkFinalEquation(Poly(peq.LC(),leftvar),subs)
if possiblefinaleq is not None:
solutions[ileftvar] = [possiblefinaleq]
break
for degree in range(mindegree,maxdegree+1):
if solutions[ileftvar] is not None or linearsolution is not None:
break
newpolyeqs2 = [peq for peq in newpolyeqs if max(peq.degree_list()) <= degree]
if degree+1 <= len(newpolyeqs2):
# in order to avoid wrong solutions, have to get resultants for all equations
possibilities = []
unusedindices = range(len(newpolyeqs2))
for eqsindices in combinations(range(len(newpolyeqs2)),degree+1):
Mall = zeros((degree+1,degree+1))
totalcomplexity = 0
for i,eqindex in enumerate(eqsindices):
eq = newpolyeqs2[eqindex]
for j,c in eq.terms():
totalcomplexity += self.codeComplexity(c.expand())
Mall[i,j[0]] = c
if degree >= 4 and totalcomplexity > 5000:
# the determinant will never finish otherwise
continue
# det_bareis freezes when there are huge fractions
#det=self.det_bareis(Mall,*(self.pvars+dummyvars+[leftvar]))
# for i in range(Mall.shape[0]):
# for j in range(Mall.shape[1]):
# Mall[i,j] = Poly(Mall[i,j],leftvar)
Malldet = Mall.berkowitz_det()
possiblefinaleq = self.checkFinalEquation(Poly(Malldet,leftvar),subs)
if possiblefinaleq is not None:
# sometimes +- I are solutions, so remove them
q,r = div(possiblefinaleq,leftvar+I)
if r == S.Zero:
possiblefinaleq = Poly(q,leftvar)
q,r = div(possiblefinaleq,leftvar-I)
if r == S.Zero:
possiblefinaleq = Poly(q,leftvar)
possibilities.append(possiblefinaleq)
for eqindex in eqsindices:
if eqindex in unusedindices:
unusedindices.remove(eqindex)
if len(unusedindices) == 0:
break
if len(possibilities) > 0:
if len(possibilities) > 1:
try:
linearsolutions = self.solveVariablesLinearly(possibilities,othersolvedvars)
# if can solve for a unique solution linearly, then prioritize this over anything
prevsolution = AST.SolverBreak('SolvePairVariablesHalfAngle fail')
for divisor,linearsolution in linearsolutions:
assert(len(linearsolution)==1)
divisorsymbol = self.gsymbolgen.next()
solversolution = AST.SolverSolution(varsyms[ileftvar].name,jointeval=[2*atan(linearsolution[0]/divisorsymbol)],isHinge=self.IsHinge(varsyms[ileftvar].name))
prevsolution = AST.SolverCheckZeros(varsyms[ileftvar].name,[divisorsymbol],zerobranch=[prevsolution],nonzerobranch=[solversolution],thresh=1e-6)
prevsolution.dictequations = [(divisorsymbol,divisor)]
linearsolution = prevsolution
break
except self.CannotSolveError:
pass
# sort with respect to degree
equationdegrees = [(max(peq.degree_list())*100000+self.codeComplexity(peq.as_expr()),peq) for peq in possibilities]
equationdegrees.sort(key=itemgetter(0))
solutions[ileftvar] = [peq[1] for peq in equationdegrees]
break
if linearsolution is not None:
return [linearsolution]
# take the solution with the smallest degree
pfinals = None
ileftvar = None
if solutions[0] is not None:
if solutions[1] is not None:
if max(solutions[1][0].degree_list()) < max(solutions[0][0].degree_list()):
pfinals = solutions[1]
ileftvar = 1
elif max(solutions[1][0].degree_list()) == max(solutions[0][0].degree_list()) and self.codeComplexity(solutions[1][0].as_expr()) < self.codeComplexity(solutions[0][0].as_expr()):
pfinals = solutions[1]
ileftvar = 1
else:
pfinals = solutions[0]
ileftvar = 0
else:
pfinals = solutions[0]
ileftvar = 0
elif solutions[1] is not None:
pfinals = solutions[1]
ileftvar = 1
dictequations = []
if pfinals is None:
#simplifyfn = self._createSimplifyFn(self.freejointvars,self.freevarsubs,self.freevarsubsinv)
for newreducedeqs in combinations(polyeqs,2):
try:
Mall = None
for ileftvar in range(2):
# TODO, sometimes this works and sometimes this doesn't
try:
Mall, allmonoms = self.solveDialytically(newreducedeqs,ileftvar,returnmatrix=True)
if Mall is not None:
leftvar=polyeqs[0].gens[ileftvar]
break
except self.CannotSolveError, e:
log.debug(e)
if Mall is None:
continue
shape=Mall[0].shape
assert(shape[0] == 4 and shape[1] == 4)
Malltemp = [None]*len(Mall)
M = zeros(shape)
for idegree in range(len(Mall)):
Malltemp[idegree] = zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
if Mall[idegree][i,j] != S.Zero:
if self.codeComplexity(Mall[idegree][i,j])>5:
sym = self.gsymbolgen.next()
Malltemp[idegree][i,j] = sym
dictequations.append((sym,Mall[idegree][i,j]))
else:
Malltemp[idegree][i,j] = Mall[idegree][i,j]
M += Malltemp[idegree]*leftvar**idegree
tempsymbols = [self.gsymbolgen.next() for i in range(16)]
tempsubs = []
for i in range(16):
if M[i] != S.Zero:
tempsubs.append((tempsymbols[i],Poly(M[i],leftvar)))
else:
tempsymbols[i] = S.Zero
Mtemp = Matrix(4,4,tempsymbols)
dettemp=Mtemp.det()
log.info('multiplying all determinant coefficients for solving %s',leftvar)
eqadds = []
for arg in dettemp.args:
eqmuls = [Poly(arg2.subs(tempsubs),leftvar) for arg2 in arg.args]
if sum(eqmuls[0].degree_list()) == 0:
eq = eqmuls.pop(0)
eqmuls[0] = eqmuls[0]*eq
while len(eqmuls) > 1:
ioffset = 0
eqmuls2 = []
while ioffset < len(eqmuls)-1:
eqmuls2.append(eqmuls[ioffset]*eqmuls[ioffset+1])
ioffset += 2
eqmuls = eqmuls2
eqadds.append(eqmuls[0])
det = Poly(S.Zero,leftvar)
for eq in eqadds:
det += eq
if len(Mall) <= 3:
# need to simplify further since self.globalsymbols can have important substitutions that can yield the entire determinant to zero
log.info('attempting to simplify determinant...')
newdet = Poly(S.Zero,leftvar)
for m,c in det.terms():
origComplexity = self.codeComplexity(c)
# 100 is a guess
if origComplexity < 100:
neweq = c.subs(dictequations)
if self.codeComplexity(neweq) < 100:
neweq = self._SubstituteGlobalSymbols(neweq).expand()
newComplexity = self.codeComplexity(neweq)
if newComplexity < origComplexity:
c = neweq
newdet += c*leftvar**m[0]
det = newdet
if det.degree(0) <= 0:
continue
pfinals = [det]
break
except self.CannotSolveError,e:
log.debug(e)
if pfinals is None:
raise self.CannotSolveError('SolvePairVariablesHalfAngle: failed to solve dialytically with %d equations'%(len(polyeqs)))
jointsol = 2*atan(varsyms[ileftvar].htvar)
solution = AST.SolverPolynomialRoots(jointname=varsyms[ileftvar].name,poly=pfinals[0],jointeval=[jointsol],isHinge=self.IsHinge(varsyms[ileftvar].name))
solution.checkforzeros = []
solution.postcheckforzeros = []
if len(pfinals) > 1:
# verify with at least one solution
solution.postcheckfornonzeros = [peq.as_expr() for peq in pfinals[1:2]]
solution.postcheckforrange = []
solution.dictequations = dictequations
solution.thresh = 1e-9 # depending on the degree, can expect small coefficients to be still valid
solution.AddHalfTanValue = True
return [solution]
def _createSimplifyFn(self,vars,varsubs,varsubsinv):
return lambda eq: self.trigsimp(eq.subs(varsubsinv),vars).subs(varsubs)
def solveVariablesLinearly(self,polyeqs,othersolvedvars,maxsolvabledegree=4):
log.debug('solvevariables=%r, othersolvedvars=%r',polyeqs[0].gens,othersolvedvars)
nummonoms = [len(peq.monoms())-int(peq.TC()!=S.Zero) for peq in polyeqs]
mindegree = __builtin__.min(nummonoms)
maxdegree = min(__builtin__.max(nummonoms),len(polyeqs))
complexity = [(self.codeComplexity(peq.as_expr()),peq) for peq in polyeqs]
complexity.sort(key=itemgetter(0))
polyeqs = [peq[1] for peq in complexity]
trigsubs = []
trigsubsinv = []
for othervar in othersolvedvars:
v = self.Variable(othervar)
trigsubs += v.subs
trigsubsinv += v.subsinv
symbolscheck = []
for i,solvevar in enumerate(polyeqs[0].gens):
monom = [0]*len(polyeqs[0].gens)
monom[i] = 1
symbolscheck.append(tuple(monom))
solutions = []
for degree in range(mindegree,maxdegree+1):
allindices = [i for i,n in enumerate(nummonoms) if n <= degree]
if len(allindices) >= degree:
allmonoms = set()
for index in allindices:
allmonoms = allmonoms.union(set(polyeqs[index].monoms()))
allmonoms = list(allmonoms)
allmonoms.sort()
if __builtin__.sum(allmonoms[0]) == 0:
allmonoms.pop(0)
# allmonoms has to have symbols as a single variable
if not all([check in allmonoms for check in symbolscheck]):
continue
if len(allmonoms) == degree:
if degree > maxsolvabledegree:
log.warn('cannot handle linear solving for more than 4 equations')
continue
systemequations = []
consts = []
for index in allindices:
pdict = polyeqs[index].as_dict()
systemequations.append([pdict.get(monom,S.Zero) for monom in allmonoms])
consts.append(-polyeqs[index].TC())
# generate at least two solutions in case first's determinant is 0
solutions = []
for startrow in range(len(systemequations)):
rows = [startrow]
M = Matrix(1,len(allmonoms),systemequations[rows[0]])
for i in range(startrow+1,len(systemequations)):
numequationsneeded = M.shape[1] - M.shape[0]
if i+numequationsneeded > len(systemequations):
# cannot do anything
break
mergedsystemequations = list(systemequations[i])
for j in range(1,numequationsneeded):
mergedsystemequations += systemequations[i+j]
M2 = M.col_join(Matrix(numequationsneeded,len(allmonoms),mergedsystemequations))
Mdet = M2.det()
if Mdet != S.Zero:
M = M2
for j in range(numequationsneeded):
rows.append(i+j)
break
if M.shape[0] == M.shape[1]:
Mdet = self.trigsimp(Mdet.subs(trigsubsinv),othersolvedvars).subs(trigsubs)
#Minv = M.inv()
B = Matrix(M.shape[0],1,[consts[i] for i in rows])
Madjugate = M.adjugate()
solution = []
for check in symbolscheck:
value = Madjugate[allmonoms.index(check),:]*B
solution.append(self.trigsimp(value[0].subs(trigsubsinv),othersolvedvars).subs(trigsubs))
solutions.append([Mdet,solution])
if len(solutions) >= 2:
break
if len(solutions) > 0:
break
if len(solutions) == 0:
raise self.CannotSolveError('solveVariablesLinearly failed')
return solutions
def solveSingleVariableLinearly(self,raweqns,solvevar,othervars,maxnumeqs=2,douniquecheck=True):
"""tries to linearly solve for one variable treating everything else as constant.
need at least 3 equations
"""
cvar = Symbol('c%s'%solvevar.name)
svar = Symbol('s%s'%solvevar.name)
varsubs = [(cos(solvevar),cvar),(sin(solvevar),svar)]
othervarsubs = [(sin(v)**2,1-cos(v)**2) for v in othervars]
eqpolys = [Poly(eq.subs(varsubs),cvar,svar) for eq in raweqns]
eqpolys = [eq for eq in eqpolys if sum(eq.degree_list()) == 1 and not eq.TC().has(solvevar)]
#eqpolys.sort(lambda x,y: iksolver.codeComplexity(x) - iksolver.codeComplexity(y))
partialsolutions = []
neweqs = []
for p0,p1 in combinations(eqpolys,2):
p0dict = p0.as_dict()
p1dict = p1.as_dict()
M = Matrix(2,3,[p0dict.get((1,0),S.Zero),p0dict.get((0,1),S.Zero),p0.TC(),p1dict.get((1,0),S.Zero),p1dict.get((0,1),S.Zero),p1.TC()])
M = M.subs(othervarsubs).expand()
partialsolution = [-M[1,1]*M[0,2]+M[0,1]*M[1,2],M[1,0]*M[0,2]-M[0,0]*M[1,2],M[0,0]*M[1,1]-M[0,1]*M[1,0]]
partialsolution = [eq.expand().subs(othervarsubs).expand() for eq in partialsolution]
rank = [self.codeComplexity(eq) for eq in partialsolution]
partialsolutions.append([rank,partialsolution])
# cos(A)**2 + sin(A)**2 - 1 = 0, useful equation but the squares introduce wrong solutions
#neweqs.append(partialsolution[0]**2+partialsolution[1]**2-partialsolution[2]**2)
# try to cross
partialsolutions.sort(lambda x, y: int(min(x[0])-min(y[0])))
for (rank0,ps0),(rank1,ps1) in combinations(partialsolutions,2):
if self.equal(ps0[0]*ps1[2]-ps1[0]*ps0[2],S.Zero):
continue
neweqs.append(ps0[0]*ps1[2]-ps1[0]*ps0[2])
neweqs.append(ps0[1]*ps1[2]-ps1[1]*ps0[2])
# probably a linear combination of the first two
#neweqs.append(ps0[0]*ps1[1]-ps1[0]*ps0[1])
# too long
#neweqs.append(ps0[0]*ps1[0]+ps0[1]*ps1[1]-ps0[2]*ps1[2])
if len(neweqs) >= maxnumeqs:
break;
neweqs2 = [eq.expand().subs(othervarsubs).expand() for eq in neweqs]
if douniquecheck:
reducedeqs = []
i = 0
while i < len(neweqs2):
reducedeq = self.removecommonexprs(neweqs2[i])
if neweqs2[i] != S.Zero and self.CheckExpressionUnique(reducedeqs,reducedeq):
reducedeqs.append(reducedeq)
i += 1
else:
eq=neweqs2.pop(i)
return neweqs2
def solveHighDegreeEquationsHalfAngle(self,lineareqs,varsym,subs=None):
"""solve a set of equations in one variable with half-angle substitution
"""
dummysubs = [(varsym.cvar,(1-varsym.htvar**2)/(1+varsym.htvar**2)),(varsym.svar,2*varsym.htvar/(1+varsym.htvar**2))]
polyeqs = []
for eq in lineareqs:
trigsubs = [(varsym.svar**2,1-varsym.cvar**2), (varsym.svar**3,varsym.svar*(1-varsym.cvar**2))]
try:
peq = Poly(eq.subs(varsym.subs).subs(trigsubs),varsym.cvar,varsym.svar)
except PolynomialError, e:
raise self.CannotSolveError('solveHighDegreeEquationsHalfAngle: poly error (%r)'%eq)
if peq.has(varsym.var):
raise self.CannotSolveError('solveHighDegreeEquationsHalfAngle: expecting only sin and cos! %s'%peq)
if sum(peq.degree_list()) == 0:
continue
# check if all terms are multiples of cos/sin
maxmonoms = [0,0]
maxdenom = 0
for monoms in peq.monoms():
for i in range(2):
maxmonoms[i] = max(maxmonoms[i],monoms[i])
maxdenom = max(maxdenom,monoms[0]+monoms[1])
eqnew = S.Zero
for monoms,c in peq.terms():
if c.evalf() != S.Zero: # big fractions might make this difficult to reduce to 0
term = c
for i in range(2):
num,denom = fraction(dummysubs[i][1])
term *= num**monoms[i]
# the denoms for 0,1 and 2,3 are the same
denom = fraction(dummysubs[0][1])[1]
term *= denom**(maxdenom-monoms[0]-monoms[1])
eqnew += simplify(term)
polyeqs.append(Poly(eqnew,varsym.htvar))
for peq in polyeqs:
# do some type of resultants, for now just choose first polynomial
finaleq = simplify(peq.as_expr()).expand()
pfinal = Poly(self.removecommonexprs(finaleq,onlygcd=False,onlynumbers=True),varsym.htvar)
pfinal = self.checkFinalEquation(pfinal,subs)
if pfinal is not None and pfinal.degree(0) > 0:
jointsol = 2*atan(varsym.htvar)
solution = AST.SolverPolynomialRoots(jointname=varsym.name,poly=pfinal,jointeval=[jointsol],isHinge=self.IsHinge(varsym.name))
solution.AddHalfTanValue = True
solution.checkforzeros = []
solution.postcheckforzeros = []
solution.postcheckfornonzeros = []
solution.postcheckforrange = []
return solution
raise self.CannotSolveError('half-angle substitution for joint %s failed, %d equations examined'%(varsym.var,len(polyeqs)))
def checkFinalEquation(self,pfinal,subs=None):
"""check an equation in one variable for validity
"""
assert(len(pfinal.gens)==1)
if subs is None:
subs = []
htvar = pfinal.gens[0]
# remove all trivial 0s
while sum(pfinal.degree_list()) > 0 and pfinal.TC() == S.Zero:
pfinalnew = Poly(S.Zero,htvar)
for m,c in pfinal.terms():
if m[0] > 0:
pfinalnew += c*htvar**(m[0]-1)
pfinal = pfinalnew
# check to see that LC is non-zero for at least one solution
if pfinal.LC().evalf() == S.Zero or all([pfinal.LC().subs(subs).subs(self.globalsymbols).subs(testconsistentvalue).evalf()==S.Zero for testconsistentvalue in self.testconsistentvalues]):
return None
# sanity check that polynomial can produce a solution and is not actually very small values
found = False
LCnormalized, common = self.removecommonexprs(pfinal.LC(),returncommon=True,onlygcd=False,onlynumbers=True)
pfinaldict = pfinal.as_dict()
for testconsistentvalue in self.testconsistentvalues:
coeffs = []
globalsymbols = [(s,v.subs(self.globalsymbols).subs(testconsistentvalue).evalf()) for s,v in self.globalsymbols]
for degree in range(pfinal.degree(0),-1,-1):
coeffs.append(pfinaldict.get((degree,),S.Zero).subs(subs).subs(globalsymbols+testconsistentvalue).evalf()/common.evalf())
# since coeffs[0] is normalized with the LC constant, can compare for precision
if len(coeffs) == 1 and Abs(coeffs[0]) < 2*(10.0**-self.precision):
coeffs = None
break
if coeffs is None:
continue
if not all([c.is_number for c in coeffs]):
# cannot evalute
log.warn('cannot evalute: %s',coeffs)
found = True
break
realsolution = pfinal.gens[0].subs(subs).subs(self.globalsymbols).subs(testconsistentvalue).evalf()
# need to convert to float64 first, X.evalf() is still a sympy object
roots = mpmath.polyroots(numpy.array(numpy.array(coeffs),numpy.float64))
for root in roots:
if Abs(float(root.imag)) < 10.0**-self.precision and Abs(float(root.real)-realsolution) < 10.0**-(self.precision-2):
found = True
break
if found:
break
return pfinal if found else None
def solveSingleVariable(self,raweqns,var,othersolvedvars,maxsolutions=4,maxdegree=2,subs=None, unknownvars=None):
varsym = self.Variable(var)
vars = [varsym.cvar,varsym.svar,varsym.htvar,var]
othersubs = []
for othersolvedvar in othersolvedvars:
othersubs += self.Variable(othersolvedvar).subs
# eqns = []
# for eq in raweqns:
# if eq.has(*vars):
# # for equations that are very complex, make sure at least one set of values yields a non zero equation
# testeq = eq.subs(varsym.subs+othersubs)
# if any([testeq.subs(testconsistentvalue).evalf()!=S.Zero for testconsistentvalue in self.testconsistentvalues]):
# eqns.append(eq)
eqns = [eq.expand() for eq in raweqns if eq.has(*vars)]
if len(eqns) == 0:
raise self.CannotSolveError('not enough equations')
# prioritize finding a solution when var is alone
returnfirstsolutions = []
for eq in eqns:
symbolgen = cse_main.numbered_symbols('const')
eqnew, symbols = self.groupTerms(eq.subs(varsym.subs), vars, symbolgen)
try:
ps = Poly(eqnew,varsym.svar)
pc = Poly(eqnew,varsym.cvar)
if sum(ps.degree_list()) > 0 or sum(pc.degree_list()) > 0 or ps.TC() == S.Zero or pc.TC() == S.Zero:
continue
except PolynomialError:
continue
numvar = self.countVariables(eqnew,var)
if numvar >= 1 and numvar <= 2:
tempsolutions = solve(eqnew,var)
jointsolutions = [self.trigsimp(s.subs(symbols),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions)>0:
# check if any solutions don't have divide by zero problems
returnfirstsolutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.IsHinge(var.name)))
hasdividebyzero = any([len(self.checkForDivideByZero(self._SubstituteGlobalSymbols(s)))>0 for s in jointsolutions])
if not hasdividebyzero:
return returnfirstsolutions
numvar = self.countVariables(eqnew,varsym.htvar)
if Poly(eqnew,varsym.htvar).TC() != S.Zero and numvar >= 1 and numvar <= 2:
tempsolutions = solve(eqnew,varsym.htvar)
jointsolutions = [2*atan(self.trigsimp(s.subs(symbols),othersolvedvars)) for s in tempsolutions]
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions)>0:
returnfirstsolutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.IsHinge(var.name)))
hasdividebyzero = any([len(self.checkForDivideByZero(self._SubstituteGlobalSymbols(s)))>0 for s in jointsolutions])
if not hasdividebyzero:
return returnfirstsolutions
if len(returnfirstsolutions) > 0:
# already computed some solutions, so return them, note that this means that all solutions have a divide-by-zero condition
return returnfirstsolutions
solutions = []
if len(eqns) > 1:
neweqns = []
listsymbols = []
symbolgen = cse_main.numbered_symbols('const')
for e in eqns:
enew, symbols = self.groupTerms(e.subs(varsym.subs),[varsym.cvar,varsym.svar,var], symbolgen)
try:
# remove coupled equations
if any([(m[0]>0)+(m[1]>0)+(m[2]>0)>1 for m in Poly(enew,varsym.cvar,varsym.svar,var).monoms()]):
continue
except PolynomialError:
continue
try:
# ignore any equations with degree 3 or more
if max(Poly(enew,varsym.svar).degree_list()) > maxdegree or max(Poly(enew,varsym.cvar).degree_list()) > maxdegree:
log.debug('ignoring equation: ',enew)
continue
except PolynomialError:
continue
try:
if Poly(enew,varsym.svar).TC() == S.Zero or Poly(enew,varsym.cvar) == S.Zero or Poly(enew,varsym.var) == S.Zero:
log.debug('equation %s is allowing trivial solution for variable %s, ignoring ',e,varsym.name)
continue
except PolynomialError:
continue
rank = self.codeComplexity(enew)
for s in symbols:
rank += self.codeComplexity(s[1])
neweqns.append((rank,enew))
listsymbols += symbols
# since we're solving for two variables, we only want to use two equations, so
# start trying all the equations starting from the least complicated ones to the most until a solution is found
eqcombinations = []
for eqs in combinations(neweqns,2):
eqcombinations.append((eqs[0][0]+eqs[1][0],[Eq(e[1],0) for e in eqs]))
eqcombinations.sort(lambda x, y: x[0]-y[0])
hasgoodsolution = False
for icomb,comb in enumerate(eqcombinations):
# skip if too complex
if len(solutions) > 0 and comb[0] > 200:
break
# try to solve for both sin and cos terms
if not self.has(comb[1],varsym.svar) or not self.has(comb[1], varsym.cvar):
continue
try:
s = solve(comb[1],[varsym.svar,varsym.cvar])
except (PolynomialError,CoercionFailed), e:
log.debug('solveSingleVariable: failed: %s',e)
continue
if s is not None:
sollist = None
if hasattr(s,'has_key'):
if s.has_key(varsym.svar) and s.has_key(varsym.cvar):
sollist = [(s[varsym.svar],s[varsym.cvar])]
else:
sollist = []
else:
sollist = s
solversolution = AST.SolverSolution(var.name,jointeval=[],isHinge=self.IsHinge(var.name))
goodsolution = 0
for svarsol,cvarsol in sollist:
# solutions cannot be trivial
soldiff = (svarsol-cvarsol).subs(listsymbols)
soldiffComplexity = self.codeComplexity(soldiff)
if soldiffComplexity < 1000 and soldiff.expand() == S.Zero:
break
svarComplexity = self.codeComplexity(svarsol.subs(listsymbols))
cvarComplexity = self.codeComplexity(cvarsol.subs(listsymbols))
if (svarComplexity < 600 and svarsol.subs(listsymbols).expand() == S.Zero) and (cvarComplexity < 600 and Abs(cvarsol.subs(listsymbols).expand()) - S.One != S.Zero):
break
if (cvarComplexity < 600 and cvarsol.subs(listsymbols).expand() == S.Zero) and (svarComplexity < 600 and Abs(svarsol.subs(listsymbols).expand()) - S.One != S.Zero):
break
# check the numerator and denominator if solutions are the same or for possible divide by zeros
svarfrac=fraction(svarsol)
svarfrac = [svarfrac[0].subs(listsymbols), svarfrac[1].subs(listsymbols)]
cvarfrac=fraction(cvarsol)
cvarfrac = [cvarfrac[0].subs(listsymbols), cvarfrac[1].subs(listsymbols)]
if self.equal(svarfrac[0],cvarfrac[0]) and self.equal(svarfrac[1],cvarfrac[1]):
break
if not self.isValidSolution(svarfrac[0]) or not self.isValidSolution(svarfrac[1]) or not self.isValidSolution(cvarfrac[0]) or not self.isValidSolution(cvarfrac[1]):
continue
# check if there exists at least one test solution with non-zero denominators
if subs is None:
testeqs = [svarfrac[1].subs(othersubs),cvarfrac[1].subs(othersubs)]
else:
testeqs = [svarfrac[1].subs(subs).subs(othersubs),cvarfrac[1].subs(subs).subs(othersubs)]
testsuccess = False
for testconsistentvalue in self.testconsistentvalues:
if all([testeq.subs(self.globalsymbols).subs(testconsistentvalue).evalf()!=S.Zero for testeq in testeqs]):
testsuccess = True
break
if not testsuccess:
continue
scomplexity = self.codeComplexity(svarfrac[0])+self.codeComplexity(svarfrac[1])
ccomplexity = self.codeComplexity(cvarfrac[0])+self.codeComplexity(cvarfrac[1])
if scomplexity > 1200 or ccomplexity > 1200:
log.debug('equation too complex for single variable solution (%d,%d).... (probably wrong?)',scomplexity,ccomplexity)
break
if scomplexity < 500:
svarfrac[1] = simplify(svarfrac[1])
if self.chop(svarfrac[1])== 0:
break
if ccomplexity < 500:
cvarfrac[1] = simplify(cvarfrac[1])
if self.chop(cvarfrac[1])== 0:
break
# sometimes the returned simplest solution makes really gross approximations
svarfracsimp_denom = self.trigsimp(svarfrac[1],othersolvedvars)
cvarfracsimp_denom = self.trigsimp(cvarfrac[1],othersolvedvars)
# self.SimplifyTransform could help in reducing denoms further...
denomsequal = False
if self.equal(svarfracsimp_denom,cvarfracsimp_denom):
denomsequal = True
elif self.equal(svarfracsimp_denom,-cvarfracsimp_denom):
cvarfrac[0] = -cvarfrac[0]
cvarfracsimp_denom = -cvarfracsimp_denom
if self.equal(svarfracsimp_denom,cvarfracsimp_denom) and not svarfracsimp_denom.is_number:
log.debug('%s solution: denominator is equal %s, doing a global substitution',var.name,svarfracsimp_denom)
denom = self.gsymbolgen.next()
solversolution.dictequations.append((denom,sign(svarfracsimp_denom)))
svarsolsimp = self.trigsimp(svarfrac[0],othersolvedvars)*denom
cvarsolsimp = self.trigsimp(cvarfrac[0],othersolvedvars)*denom
solversolution.FeasibleIsZeros = False
solversolution.presetcheckforzeros.append(svarfracsimp_denom)
expandedsol = atan2(svarsolsimp,cvarsolsimp)
else:
svarfracsimp_num = self.trigsimp(svarfrac[0],othersolvedvars)
cvarfracsimp_num = self.trigsimp(cvarfrac[0],othersolvedvars)
svarsolsimp = svarfracsimp_num/svarfracsimp_denom
cvarsolsimp = cvarfracsimp_num/cvarfracsimp_denom
if svarsolsimp.is_number and cvarsolsimp.is_number:
if Abs(svarsolsimp**2+cvarsolsimp**2-S.One).evalf() > 1e-10:
log.debug('%s solution: atan2(%s,%s), sin/cos not on circle so ignoring',var.name,svarsolsimp,cvarsolsimp)
continue
expandedsol = atan2check(svarsolsimp,cvarsolsimp)
solversolution.FeasibleIsZeros = False
log.debug('%s solution: atan2 check for joint',var.name)
solversolution.jointeval.append(expandedsol)
if unknownvars is not None:
unsolvedsymbols = []
for unknownvar in unknownvars:
if unknownvar != var:
unsolvedsymbols += self.Variable(unknownvar).vars
if len(unsolvedsymbols) > 0:
solversolution.equationsused = [eq for eq in eqns if not eq.has(*unsolvedsymbols)]
else:
solversolution.equationsused = eqns
if len(solversolution.equationsused) > 0:
log.info('%s solution: equations used for atan2: %s',var.name, str(solversolution.equationsused))
if len(self.checkForDivideByZero(self._SubstituteGlobalSymbols(expandedsol.subs(solversolution.dictequations)))) == 0:
goodsolution += 1
if len(solversolution.jointeval) == len(sollist) and len(sollist) > 0:
solutions.append(solversolution)
if goodsolution > 0:
hasgoodsolution = True
if len(sollist) == goodsolution and goodsolution == 1 and len(solutions) >= 2:
break
if len(solutions) >= maxsolutions:
# probably more than enough already?
break
if len(solutions) > 0 or hasgoodsolution: # found a solution without any divides, necessary for pr2 head_torso lookat3d ik
return solutions
# solve one equation
for ieq,eq in enumerate(eqns):
symbolgen = cse_main.numbered_symbols('const')
eqnew, symbols = self.groupTerms(eq.subs(varsym.subs), [varsym.cvar,varsym.svar,varsym.var], symbolgen)
try:
# ignore any equations with degree 3 or more
ps = Poly(eqnew,varsym.svar)
pc = Poly(eqnew,varsym.cvar)
if max(ps.degree_list()) > maxdegree or max(pc.degree_list()) > maxdegree:
log.debug('cannot solve equation with high degree: %s',str(eqnew))
continue
if ps.TC() == S.Zero and len(ps.monoms()) > 0:
log.debug('equation %s has trivial solution, ignoring...', ps)
continue
if pc.TC() == S.Zero and len(pc.monoms()) > 0:
log.debug('equation %s has trivial solution, ignoring...', pc)
continue
except PolynomialError:
# might not be a polynomial, so ignore
continue
equationsused = None
if unknownvars is not None:
unsolvedsymbols = []
for unknownvar in unknownvars:
if unknownvar != var:
unsolvedsymbols += self.Variable(unknownvar).vars
if len(unsolvedsymbols) > 0:
equationsused = [eq2 for ieq2,eq2 in enumerate(eqns) if ieq2!=ieq and not eq2.has(*unsolvedsymbols)]
else:
equationsused = eqns[:]
equationsused.pop(ieq)
numcvar = self.countVariables(eqnew,varsym.cvar)
numsvar = self.countVariables(eqnew,varsym.svar)
if numcvar == 1 and numsvar == 1:
a = Wild('a',exclude=[varsym.svar,varsym.cvar])
b = Wild('b',exclude=[varsym.svar,varsym.cvar])
c = Wild('c',exclude=[varsym.svar,varsym.cvar])
m = eqnew.match(a*varsym.cvar+b*varsym.svar+c)
if m is not None:
symbols += [(varsym.svar,sin(var)),(varsym.cvar,cos(var))]
asinsol = trigsimp(asin(-m[c]/Abs(sqrt(m[a]*m[a]+m[b]*m[b]))).subs(symbols),deep=True)
constsol = self._SubstituteGlobalSymbols(-atan2(m[a],m[b]).subs(symbols)).evalf()
jointsolutions = [constsol+asinsol,constsol+pi.evalf()-asinsol]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
#self.checkForDivideByZero(expandedsol)
solutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.IsHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
if numcvar > 0:
try:
# substitute cos
if self.countVariables(eqnew,varsym.svar) <= 1 or (self.countVariables(eqnew,varsym.cvar) <= 2 and self.countVariables(eqnew,varsym.svar) == 0): # anything more than 1 implies quartic equation
tempsolutions = solve(eqnew.subs(varsym.svar,sqrt(1-varsym.cvar**2)),varsym.cvar)
jointsolutions = [self.trigsimp(s.subs(symbols+varsym.subsinv),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointevalcos=jointsolutions,isHinge=self.IsHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
except self.CannotSolveError,e:
log.debug(e)
except NotImplementedError:
pass
if numsvar > 0:
# substitute sin
try:
if self.countVariables(eqnew,varsym.svar) <= 1 or (self.countVariables(eqnew,varsym.svar) <= 2 and self.countVariables(eqnew,varsym.cvar) == 0): # anything more than 1 implies quartic equation
tempsolutions = solve(eqnew.subs(varsym.cvar,sqrt(1-varsym.svar**2)),varsym.svar)
jointsolutions = [self.trigsimp(s.subs(symbols+varsym.subsinv),othersolvedvars) for s in tempsolutions]
if all([self.isValidSolution(s) and self.isValidSolution(s) for s in jointsolutions]):
solutions.append(AST.SolverSolution(var.name,jointevalsin=jointsolutions,isHinge=self.IsHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
except self.CannotSolveError,e:
log.debug(e)
except NotImplementedError:
pass
if numcvar == 0 and numsvar == 0:
tempsolutions = solve(eqnew,var)
jointsolutions = []
for s in tempsolutions:
eqsub = s.subs(symbols)
if self.codeComplexity(eqsub) < 2000:
eqsub = self.trigsimp(eqsub,othersolvedvars)
jointsolutions.append(eqsub)
if all([self.isValidSolution(s) and s != S.Zero for s in jointsolutions]) and len(jointsolutions) > 0:
solutions.append(AST.SolverSolution(var.name,jointeval=jointsolutions,isHinge=self.IsHinge(var.name)))
solutions[-1].equationsused = equationsused
continue
try:
solution = self.solveHighDegreeEquationsHalfAngle([eqnew],varsym,symbols)
solutions.append(solution.subs(symbols))
solutions[-1].equationsused = equationsused
except self.CannotSolveError,e:
log.debug(e)
if len(solutions) > 0:
return solutions
return [self.solveHighDegreeEquationsHalfAngle(eqns,varsym)]
def SolvePrismaticHingePairVariables(self, raweqns, var0,var1,othersolvedvars,unknownvars=None):
"""solves one hinge and one prismatic variable together
"""
if self.IsPrismatic(var0.name) and self.IsHinge(var1.name):
prismaticSymbol = var0
hingeSymbol = var1
elif self.IsHinge(var0.name) and self.IsPrismatic(var1.name):
hingeSymbol = var0
prismaticSymbol = var1
else:
raise self.CannotSolveError('need to have one hinge and one prismatic variable')
prismaticVariable = self.Variable(prismaticSymbol)
hingeVariable = self.Variable(hingeSymbol)
chingeSymbol,shingeSymbol = hingeVariable.cvar, hingeVariable.svar
varsubs=prismaticVariable.subs+hingeVariable.subs
varsubsinv = prismaticVariable.subsinv+hingeVariable.subsinv
unknownvars=[chingeSymbol,shingeSymbol,prismaticSymbol]
reducesubs = [(shingeSymbol**2,1-chingeSymbol**2)]
polyeqs = [Poly(eq.subs(varsubs).subs(reducesubs).expand(),unknownvars) for eq in raweqns if eq.has(prismaticSymbol,hingeSymbol)]
if len(polyeqs) <= 1:
raise self.CannotSolveError('not enough equations')
# try to solve one variable in terms of the others
solvevariables = []
for polyeq in polyeqs:
if polyeq.degree(0) == 1 and polyeq.degree(1) == 0:
chingeSolutions = solve(polyeq,chingeSymbol)
solvevariables.append((prismaticSymbol,[(chingeSymbol,chingeSolutions[0])]))
elif polyeq.degree(0) == 0 and polyeq.degree(1) == 1:
shingeSolutions = solve(polyeq,shingeSymbol)
solvevariables.append((prismaticSymbol,[(shingeSymbol,shingeSolutions[0])]))
elif polyeq.degree(2) == 1:
prismaticSolutions = solve(polyeq,prismaticSymbol)
solvevariables.append((hingeSymbol,[(prismaticSymbol,prismaticSolutions[0])]))
# prioritize solving the hingeSymbol out
for solveSymbol in [hingeSymbol,prismaticSymbol]:
for solveSymbol2, solvesubs in solvevariables:
if solveSymbol == solveSymbol2:
# have a solution for one variable, so substitute it in and see if the equations become solvable with one variable
reducedeqs = []
for polyeq2 in polyeqs:
eqnew = simplify(polyeq2.as_expr().subs(solvesubs))
if eqnew != S.Zero:
reducedeqs.append(eqnew)
self.sortComplexity(reducedeqs)
try:
rawsolutions = self.solveSingleVariable(reducedeqs,solveSymbol,othersolvedvars, unknownvars=unknownvars)
if len(rawsolutions) > 0:
return rawsolutions
except self.CannotSolveError:
pass
raise self.CannotSolveError(u'SolvePrismaticHingePairVariables: failed to find variable with degree 1')
def SolvePairVariables(self,raweqns,var0,var1,othersolvedvars,maxcomplexity=50,unknownvars=None):
"""solves two hinge variables together
"""
# make sure both variables are hinges
if not self.IsHinge(var0.name) or not self.IsHinge(var1.name):
raise self.CannotSolveError('pairwise variables only supports hinge joints')
varsym0 = self.Variable(var0)
varsym1 = self.Variable(var1)
cvar0,svar0 = varsym0.cvar, varsym0.svar
cvar1,svar1 = varsym1.cvar, varsym1.svar
varsubs=varsym0.subs+varsym1.subs
varsubsinv = varsym0.subsinv+varsym1.subsinv
unknownvars=[cvar0,svar0,cvar1,svar1]
reducesubs = [(svar0**2,1-cvar0**2),(svar1**2,1-cvar1**2)]
eqns = [eq.subs(varsubs).subs(reducesubs).expand() for eq in raweqns if eq.has(var0,var1)]
if len(eqns) <= 1:
raise self.CannotSolveError('not enough equations')
# group equations with single variables
symbolgen = cse_main.numbered_symbols('const')
orgeqns = []
allsymbols = []
for eq in eqns:
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
orgeqns.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
orgeqns.sort(lambda x, y: x[0]-y[0])
neweqns = orgeqns[:]
pairwisesubs = [(svar0*cvar1,Symbol('s0c1')),(svar0*svar1,Symbol('s0s1')),(cvar0*cvar1,Symbol('c0c1')),(cvar0*svar1,Symbol('c0s1')),(cvar0*svar0,Symbol('s0c0')),(cvar1*svar1,Symbol('c1s1'))]
pairwiseinvsubs = [(f[1],f[0]) for f in pairwisesubs]
pairwisevars = [f[1] for f in pairwisesubs]
reduceeqns = [Poly(eq.as_expr().subs(pairwisesubs),*pairwisevars) for rank,eq in orgeqns if rank < 4*maxcomplexity]
for i,eq in enumerate(reduceeqns):
if eq.TC != S.Zero and not eq.TC().is_Symbol:
n=symbolgen.next()
allsymbols.append((n,eq.TC().subs(allsymbols)))
reduceeqns[i] += n-eq.TC()
# try to at least subtract as much paired variables out
eqcombs = [c for c in combinations(reduceeqns,2)]
while len(eqcombs) > 0 and len(neweqns) < 20:
eq0,eq1 = eqcombs.pop()
eq0dict = eq0.as_dict()
eq1dict = eq1.as_dict()
for i in range(6):
monom = [0,0,0,0,0,0]
monom[i] = 1
eq0value = eq0dict.get(tuple(monom),S.Zero)
eq1value = eq1dict.get(tuple(monom),S.Zero)
if eq0value != 0 and eq1value != 0:
tempeq = (eq0.as_expr()*eq1value-eq0value*eq1.as_expr()).subs(allsymbols+pairwiseinvsubs).expand()
if self.codeComplexity(tempeq) > 200:
continue
eq = simplify(tempeq)
if eq == S.Zero:
continue
peq = Poly(eq,*pairwisevars)
if max(peq.degree_list()) > 0 and self.codeComplexity(eq) > maxcomplexity:
# don't need such complex equations
continue
if not self.CheckExpressionUnique(eqns,eq):
continue
if eq.has(*unknownvars): # be a little strict about new candidates
eqns.append(eq)
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
neweqns.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
orgeqns = neweqns[:]
# try to solve for all pairwise variables
systemofequations = []
for i in range(len(reduceeqns)):
if reduceeqns[i].has(pairwisevars[4],pairwisevars[5]):
continue
if not all([__builtin__.sum(m) <= 1 for m in reduceeqns[i].monoms()]):
continue
arr = [S.Zero]*5
for m,c in reduceeqns[i].terms():
if __builtin__.sum(m) == 1:
arr[list(m).index(1)] = c
else:
arr[4] = c
systemofequations.append(arr)
if len(systemofequations) >= 4:
singleeqs = None
for eqs in combinations(systemofequations,4):
M = zeros((4,4))
B = zeros((4,1))
for i,arr in enumerate(eqs):
for j in range(4):
M[i,j] = arr[j]
B[i] = -arr[4]
det = self.det_bareis(M,*(self.pvars+unknownvars)).subs(allsymbols)
if det.evalf() != S.Zero:
X = M.adjugate()*B
singleeqs = []
for i in range(4):
eq = (pairwisesubs[i][0]*det - X[i]).subs(allsymbols)
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
singleeqs.append([self.codeComplexity(eq),Poly(eqnew,*unknownvars)])
break
if singleeqs is not None:
neweqns += singleeqs
neweqns.sort(lambda x, y: x[0]-y[0])
# check if any equations are at least degree 1 (if not, try to compute some)
for ivar in range(2):
polyunknown = []
for rank,eq in orgeqns:
p = Poly(eq,unknownvars[2*ivar],unknownvars[2*ivar+1])
if sum(p.degree_list()) == 1 and __builtin__.sum(p.LM()) == 1:
polyunknown.append((rank,p))
if len(polyunknown) > 0:
break
if len(polyunknown) == 0:
addedeqs = eqns[:]
polyeqs = []
for ivar in range(2):
polyunknown = []
for rank,eq in orgeqns:
p = Poly(eq,unknownvars[2*ivar],unknownvars[2*ivar+1])
polyunknown.append(Poly(p.subs(unknownvars[2*ivar+1]**2,1-unknownvars[2*ivar]**2),unknownvars[2*ivar],unknownvars[2*ivar+1]))
if len(polyunknown) >= 2:
monomtoremove = [[polyunknown,(2,0)],[polyunknown,(1,1)]]
for curiter in range(2):
# remove the square
polyunknown,monom = monomtoremove[curiter]
pbase = [p for p in polyunknown if p.as_dict().get(monom,S.Zero) != S.Zero]
if len(pbase) == 0:
continue
pbase = pbase[0]
pbasedict = pbase.as_dict()
for i in range(len(polyunknown)):
eq = (polyunknown[i]*pbasedict.get(monom,S.Zero)-pbase*polyunknown[i].as_dict().get(monom,S.Zero)).as_expr().subs(allsymbols).expand()
if eq != S.Zero and self.CheckExpressionUnique(addedeqs,eq):
eqnew, symbols = self.groupTerms(eq, unknownvars, symbolgen)
allsymbols += symbols
p = Poly(eqnew,*pbase.gens)
if p.as_dict().get((1,1),S.Zero) != S.Zero and curiter == 0:
monomtoremove[1][0].insert(0,p)
polyeqs.append([self.codeComplexity(eqnew),Poly(eqnew,*unknownvars)])
addedeqs.append(eq)
neweqns += polyeqs
neweqns.sort(lambda x,y: x[0]-y[0])
rawsolutions = []
# try single variable solution, only return if a single solution has been found
# returning multiple solutions when only one exists can lead to wrong results.
try:
rawsolutions += self.solveSingleVariable(self.sortComplexity([e.as_expr().subs(varsubsinv).expand() for score,e in neweqns if not e.has(cvar1,svar1,var1)]),var0,othersolvedvars,subs=allsymbols,unknownvars=unknownvars)
except self.CannotSolveError:
pass
try:
rawsolutions += self.solveSingleVariable(self.sortComplexity([e.as_expr().subs(varsubsinv).expand() for score,e in neweqns if not e.has(cvar0,svar0,var0)]),var1,othersolvedvars,subs=allsymbols,unknownvars=unknownvars)
except self.CannotSolveError:
pass
if len(rawsolutions) > 0:
solutions = []
for s in rawsolutions:
try:
solutions.append(s.subs(allsymbols))
except self.CannotSolveError:
pass
if len(solutions) > 0:
return solutions
groups=[]
for i,unknownvar in enumerate(unknownvars):
listeqs = []
listeqscmp = []
for rank,eq in neweqns:
# if variable ever appears, it should be alone
if all([m[i] == 0 or (__builtin__.sum(m) == m[i] and m[i]>0) for m in eq.monoms()]) and any([m[i] > 0 for m in eq.monoms()]):
# make sure there's only one monom that includes other variables
othervars = [__builtin__.sum(m) - m[i] > 0 for m in eq.monoms()]
if __builtin__.sum(othervars) <= 1:
eqcmp = self.removecommonexprs(eq.subs(allsymbols).as_expr(),onlynumbers=False,onlygcd=True)
if self.CheckExpressionUnique(listeqscmp,eqcmp):
listeqs.append(eq)
listeqscmp.append(eqcmp)
groups.append(listeqs)
# find a group that has two or more equations:
useconic=False
goodgroup = [(i,g) for i,g in enumerate(groups) if len(g) >= 2]
if len(goodgroup) == 0:
# might have a set of equations that can be solved with conics
# look for equations where the variable and its complement are alone
groups=[]
for i in [0,2]:
unknownvar = unknownvars[i]
complementvar = unknownvars[i+1]
listeqs = []
listeqscmp = []
for rank,eq in neweqns:
# if variable ever appears, it should be alone
addeq = False
if all([__builtin__.sum(m) == m[i]+m[i+1] for m in eq.monoms()]):
addeq = True
else:
# make sure there's only one monom that includes other variables
othervars = 0
for m in eq.monoms():
if __builtin__.sum(m) > m[i]+m[i+1]:
if m[i] == 0 and m[i+1]==0:
othervars += 1
else:
othervars = 10000
if othervars <= 1:
addeq = True
if addeq:
eqcmp = self.removecommonexprs(eq.subs(allsymbols).as_expr(),onlynumbers=False,onlygcd=True)
if self.CheckExpressionUnique(listeqscmp,eqcmp):
listeqs.append(eq)
listeqscmp.append(eqcmp)
groups.append(listeqs)
groups.append([]) # necessary to get indices correct
goodgroup = [(i,g) for i,g in enumerate(groups) if len(g) >= 2]
useconic=True
if len(goodgroup) == 0:
try:
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
except self.CannotSolveError,e:
log.warn('%s',e)
# try a separate approach where the two variables are divided on both sides
neweqs = []
for rank,eq in neweqns:
p = Poly(eq,unknownvars[0],unknownvars[1])
iscoupled = False
for m,c in p.terms():
if __builtin__.sum(m) > 0:
if c.has(unknownvars[2],unknownvars[3]):
iscoupled = True
break
if not iscoupled:
neweqs.append([p-p.TC(),Poly(-p.TC(),unknownvars[2],unknownvars[3])])
if len(neweqs) > 0:
for ivar in range(2):
lineareqs = [eq for eq in neweqs if __builtin__.sum(eq[ivar].LM())==1]
for paireq0,paireq1 in combinations(lineareqs,2):
log.info('solving separated equations with linear terms')
eq0 = paireq0[ivar]
eq0dict = eq0.as_dict()
eq1 = paireq1[ivar]
eq1dict = eq1.as_dict()
disc = (eq0dict.get((1,0),S.Zero)*eq1dict.get((0,1),S.Zero) - eq0dict.get((0,1),S.Zero)*eq1dict.get((1,0),S.Zero)).subs(allsymbols).expand()
if disc == S.Zero:
continue
othereq0 = paireq0[1-ivar].as_expr() - eq0.TC()
othereq1 = paireq1[1-ivar].as_expr() - eq1.TC()
csol = - eq1dict.get((0,1),S.Zero) * othereq0 + eq0dict.get((0,1),S.Zero) * othereq1
ssol = eq1dict.get((1,0),S.Zero) * othereq0 - eq0dict.get((1,0),S.Zero) * othereq1
polysymbols = paireq0[1-ivar].gens
totaleq = (csol**2+ssol**2-disc**2).subs(allsymbols).expand()
if self.codeComplexity(totaleq) < 4000:
log.info('simplifying final equation to %d',self.codeComplexity(totaleq))
totaleq = simplify(totaleq)
ptotal_cos = Poly(Poly(totaleq,*polysymbols).subs(polysymbols[0]**2,1-polysymbols[1]**2).subs(polysymbols[1]**2,1-polysymbols[0]**2),*polysymbols)
ptotal_sin = Poly(S.Zero,*polysymbols)
for m,c in ptotal_cos.terms():
if m[1] > 0:
assert(m[1] == 1)
ptotal_sin = ptotal_sin.sub(Poly.from_dict({(m[0],0):c},*ptotal_sin.gens))
ptotal_cos = ptotal_cos.sub(Poly.from_dict({m:c},*ptotal_cos.gens))
finaleq = (ptotal_cos.as_expr()**2 - (1-polysymbols[0]**2)*ptotal_sin.as_expr()**2).expand()
# sometimes denominators can accumulate
pfinal = Poly(self.removecommonexprs(finaleq,onlygcd=False,onlynumbers=True),polysymbols[0])
pfinal = self.checkFinalEquation(pfinal)
if pfinal is not None:
jointsol = atan2(ptotal_cos.as_expr()/ptotal_sin.as_expr(), polysymbols[0])
var = var1 if ivar == 0 else var0
solution = AST.SolverPolynomialRoots(jointname=var.name,poly=pfinal,jointeval=[jointsol],isHinge=self.IsHinge(var.name))
solution.postcheckforzeros = [ptotal_sin.as_expr()]
solution.postcheckfornonzeros = []
solution.postcheckforrange = []
return [solution]
# if maxnumeqs is any less, it will miss linearly independent equations
lineareqs = self.solveSingleVariableLinearly(raweqns,var0,[var1],maxnumeqs=len(raweqns))
if len(lineareqs) > 0:
try:
return [self.solveHighDegreeEquationsHalfAngle(lineareqs,varsym1)]
except self.CannotSolveError,e:
log.warn('%s',e)
raise self.CannotSolveError('cannot cleanly separate pair equations')
varindex=goodgroup[0][0]
var = var0 if varindex < 2 else var1
varsym = varsym0 if varindex < 2 else varsym1
unknownvar=unknownvars[goodgroup[0][0]]
eqs = goodgroup[0][1][0:2]
simpleterms = []
complexterms = []
domagicsquare = False
for i in range(2):
if useconic:
terms=[(c,m) for m,c in eqs[i].terms() if __builtin__.sum(m) - m[varindex] - m[varindex+1] > 0]
else:
terms=[(c,m) for m,c in eqs[i].terms() if __builtin__.sum(m) - m[varindex] > 0]
if len(terms) > 0:
simpleterms.append(eqs[i].sub(Poly.from_dict({terms[0][1]:terms[0][0]},*eqs[i].gens)).as_expr()/terms[0][0]) # divide by the coeff
complexterms.append(Poly({terms[0][1]:S.One},*unknownvars).as_expr())
domagicsquare = True
else:
simpleterms.append(eqs[i].as_expr())
complexterms.append(S.Zero)
finaleq = None
checkforzeros = []
if domagicsquare:
# here is the magic transformation:
finaleq = self.trigsimp(expand(((complexterms[0]**2+complexterms[1]**2) - simpleterms[0]**2 - simpleterms[1]**2).subs(varsubsinv)),othersolvedvars+[var0,var1]).subs(varsubs)
denoms = [fraction(simpleterms[0])[1], fraction(simpleterms[1])[1], fraction(complexterms[0])[1], fraction(complexterms[1])[1]]
lcmvars = self.pvars+unknownvars
for othersolvedvar in othersolvedvars:
lcmvars += self.Variable(othersolvedvar).vars
denomlcm = Poly(S.One,*lcmvars)
for denom in denoms:
if denom != S.One:
checkforzeros.append(self.removecommonexprs(denom,onlygcd=False,onlynumbers=True))
denomlcm = Poly(lcm(denomlcm,denom),*lcmvars)
finaleq = simplify(finaleq*denomlcm.as_expr()**2)
complementvarindex = varindex-(varindex%2)+((varindex+1)%2)
complementvar = unknownvars[complementvarindex]
finaleq = simplify(finaleq.subs(complementvar**2,1-unknownvar**2)).subs(allsymbols).expand()
else:
# try to reduce finaleq
p0 = Poly(simpleterms[0],unknownvars[varindex],unknownvars[varindex+1])
p1 = Poly(simpleterms[1],unknownvars[varindex],unknownvars[varindex+1])
if max(p0.degree_list()) > 1 and max(p1.degree_list()) > 1 and max(p0.degree_list()) == max(p1.degree_list()) and p0.LM() == p1.LM():
finaleq = (p0*p1.LC()-p1*p0.LC()).as_expr()
finaleq = expand(simplify(finaleq.subs(allsymbols)))
if finaleq == S.Zero:
finaleq = expand(p0.as_expr().subs(allsymbols))
if finaleq is None:
log.warn('SolvePairVariables: did not compute a final variable. This is a weird condition...')
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
if not self.isValidSolution(finaleq):
log.warn('failed to solve pairwise equation: %s'%str(finaleq))
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
newunknownvars = unknownvars[:]
newunknownvars.remove(unknownvar)
if finaleq.has(*newunknownvars):
log.warn('equation relies on unsolved variables(%s): %s',newunknownvars,finaleq)
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
if not finaleq.has(unknownvar):
# somehow removed all variables, so try the general method
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
try:
if self.codeComplexity(finaleq) > 100000:
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
except self.CannotSolveError:
pass
if useconic:
# conic roots solver not as robust as half-angle transform!
#return [SolverConicRoots(var.name,[finaleq],isHinge=self.IsHinge(var.name))]
solution = self.solveHighDegreeEquationsHalfAngle([finaleq],varsym)
solution.checkforzeros += checkforzeros
return [solution]
# now that everything is with respect to one variable, simplify and solve the equation
eqnew, symbols = self.groupTerms(finaleq, unknownvars, symbolgen)
allsymbols += symbols
solutions=solve(eqnew,unknownvar)
log.info('pair solution: %s, %s', eqnew,solutions)
if solutions:
solversolution=AST.SolverSolution(var.name, isHinge=self.IsHinge(var.name))
processedsolutions = []
for s in solutions:
processedsolution = s.subs(allsymbols+varsubsinv).subs(varsubs)
# trigsimp probably won't work on long solutions
if self.codeComplexity(processedsolution) < 5000:
processedsolution = self.trigsimp(processedsolution,othersolvedvars)
processedsolutions.append(processedsolution.subs(varsubs))
if (varindex%2)==0:
solversolution.jointevalcos=processedsolutions
else:
solversolution.jointevalsin=processedsolutions
return [solversolution]
return self.SolvePairVariablesHalfAngle(raweqns,var0,var1,othersolvedvars)
#raise self.CannotSolveError('cannot solve pair equation')
## SymPy helper routines
@staticmethod
def isValidSolution(expr):
"""return true if solution does not contain any nan or inf terms"""
if expr.is_number:
e=expr.evalf()
if e.has(I) or isinf(e) or isnan(e):
return False
return True
if expr.is_Mul:
# first multiply all numbers
number = S.One
for arg in expr.args:
if arg.is_number:
number *= arg
elif not IKFastSolver.isValidSolution(arg):
return False
# finally evalute the multiplied form
return IKFastSolver.isValidSolution(number.evalf())
for arg in expr.args:
if not IKFastSolver.isValidSolution(arg):
return False
return True
@staticmethod
def _GetSumSquares(expr):
"""if expr is a sum of squares, returns the list of individual expressions that were squared. otherwise returns None
"""
values = []
if expr.is_Add:
for arg in expr.args:
if arg.is_Pow and arg.exp.is_number and arg.exp > 0 and (arg.exp%2) == 0:
values.append(arg.base)
else:
return []
elif expr.is_Mul:
values = IKFastSolver._GetSumSquares(expr.args[0])
for arg in expr.args[1:]:
values2 = IKFastSolver._GetSumSquares(arg)
if len(values2) > 0:
values = [x*y for x,y in product(values,values2)]
else:
values = [x*arg for x in values]
return values
else:
if expr.is_Pow and expr.exp.is_number and expr.exp > 0 and (expr.exp%2) == 0:
values.append(expr.base)
return values
@staticmethod
def recursiveFraction(expr):
"""return the numerator and denominator of th eexpression as if it was one fraction
"""
if expr.is_Add:
allpoly = []
finaldenom = S.One
for arg in expr.args:
n,d = IKFastSolver.recursiveFraction(arg)
finaldenom = finaldenom*d
allpoly.append([n,d])
finalnum = S.Zero
for n,d in allpoly:
finalnum += n*(finaldenom/d)
return finalnum,finaldenom
elif expr.is_Mul:
finalnum = S.One
finaldenom = S.One
for arg in expr.args:
n,d = IKFastSolver.recursiveFraction(arg)
finalnum = finalnum * n
finaldenom = finaldenom * d
return finalnum,finaldenom
elif expr.is_Pow and expr.exp.is_number:
n,d=IKFastSolver.recursiveFraction(expr.base)
if expr.exp < 0:
exponent = -expr.exp
n,d = d,n
else:
exponent = expr.exp
return n**exponent,d**exponent
else:
return fraction(expr)
@staticmethod
def groupTerms(expr,vars,symbolgen = None):
"""Separates all terms that do have var in them"""
if symbolgen is None:
symbolgen = cse_main.numbered_symbols('const')
symbols = []
try:
p = Poly(expr,*vars)
except PolynomialError:
return expr, symbols
newexpr = S.Zero
for m,c in p.terms():
# make huge numbers into constants too
if (c.is_number and len(str(c)) > 40) or (not c.is_number and not c.is_Symbol):
# if it is a product of a symbol and a number, then ignore
if not c.is_Mul or not all([e.is_number or e.is_Symbol for e in c.args]):
sym = symbolgen.next()
symbols.append((sym,c))
c = sym
if __builtin__.sum(m) == 0:
newexpr += c
else:
for i,degree in enumerate(m):
c = c*vars[i]**degree
newexpr += c
return newexpr,symbols
@staticmethod
def replaceNumbers(expr,symbolgen = None):
"""Replaces all numbers with symbols, this is to make gcd faster when fractions get too big"""
if symbolgen is None:
symbolgen = cse_main.numbered_symbols('const')
symbols = []
if expr.is_number:
result = symbolgen.next()
symbols.append((result,expr))
elif expr.is_Mul:
result = S.One
for arg in expr.args:
newresult, newsymbols = IKFastSolver.replaceNumbers(arg,symbolgen)
result *= newresult
symbols += newsymbols
elif expr.is_Add:
result = S.Zero
for arg in expr.args:
newresult, newsymbols = IKFastSolver.replaceNumbers(arg,symbolgen)
result += newresult
symbols += newsymbols
elif expr.is_Pow:
# don't replace the exponent
newresult, newsymbols = IKFastSolver.replaceNumbers(expr.base,symbolgen)
symbols += newsymbols
result = newresult**expr.exp
else:
result = expr
return result,symbols
@staticmethod
def frontnumbers(eq):
if eq.is_Number:
return [eq]
if eq.is_Mul:
n = []
for arg in eq.args:
n += IKFastSolver.frontnumbers(arg)
return n
return []
def IsAnyImaginaryByEval(self, eq):
"""checks if an equation ever evaluates to an imaginary number
"""
for testconsistentvalue in self.testconsistentvalues:
value = eq.subs(testconsistentvalue).evalf()
if value.is_complex and not value.is_real:
return True
return False
def AreAllImaginaryByEval(self, eq):
"""checks if an equation ever evaluates to an imaginary number
"""
for testconsistentvalue in self.testconsistentvalues:
value = eq.subs(testconsistentvalue).evalf()
if not (value.is_complex and not value.is_real):
return False
return True
def IsDeterminantNonZeroByEval(self, A):
"""checks if a determinant is non-zero by evaluating all the possible solutions.
:return: True if there exist values where det(A) is not zero
"""
N = A.shape[0]
thresh = 0.01**N
nummatrixsymbols = __builtin__.sum([1 for a in A if not a.is_number])
if nummatrixsymbols == 0:
return A.det().evalf() > thresh
for testconsistentvalue in self.testconsistentvalues:
detvalue = A.subs(testconsistentvalue).evalf().det()
if abs(detvalue) > thresh:
return True
return False
@staticmethod
def removecommonexprs(eq,returncommon=False,onlygcd=False,onlynumbers=True):
"""removes common expressions from a sum. Assumes all the coefficients are rationals. For example:
a*c_0 + a*c_1 + a*c_2 = 0
will return in
c_0 + c_1 + c_2 = 0
"""
eq = eq.expand() # doesn't work otherwise
if eq.is_Add:
exprs = eq.args
totaldenom = S.One
common = S.One
if onlynumbers:
for i in range(len(exprs)):
denom = S.One
for d in IKFastSolver.frontnumbers(fraction(exprs[i])[1]):
denom *= d
if denom != S.One:
exprs = [expr*denom for expr in exprs]
totaldenom *= denom
if onlygcd:
common = None
for i in range(len(exprs)):
coeff = S.One
for n in IKFastSolver.frontnumbers(exprs[i]):
coeff *= n
if common == None:
common = coeff
else:
common = igcd(common,coeff)
if common == S.One:
break
else:
for i in range(len(exprs)):
denom = fraction(exprs[i])[1]
if denom != S.One:
exprs = [expr*denom for expr in exprs]
totaldenom *= denom
# there are no fractions, so can start simplifying
common = exprs[0]/fraction(cancel(exprs[0]/exprs[1]))[0]
for i in range(2,len(exprs)):
common = common/fraction(cancel(common/exprs[i]))[0]
if common.is_number:
common=S.One
# find the smallest number and divide by it
if not onlygcd:
smallestnumber = None
for expr in exprs:
if expr.is_number:
if smallestnumber is None or smallestnumber > Abs(expr):
smallestnumber = Abs(expr)
elif expr.is_Mul:
n = S.One
for arg in expr.args:
if arg.is_number:
n *= arg
if smallestnumber is None or smallestnumber > Abs(n):
smallestnumber = Abs(n)
if smallestnumber is not None:
common = common*smallestnumber
eq = S.Zero
for expr in exprs:
eq += expr/common
if returncommon:
return eq,common/totaldenom
elif eq.is_Mul:
coeff = S.One
for d in IKFastSolver.frontnumbers(eq):
coeff *= d
if returncommon:
return eq/coeff,coeff
return eq/coeff
if returncommon:
return eq,S.One
return eq
# def det_bareis(M,*vars,**kwargs):
# return M.det_bareis()
@staticmethod
def det_bareis(M,*vars,**kwargs):
"""Function from sympy with a couple of improvements.
Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
Function from sympy/matrices/matrices.py
"""
if not M.is_square:
raise NonSquareMatrixException()
n = M.rows
M = M[:,:] # make a copy
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
if len(vars) > 0 and D != S.Zero and not M[k-1, k-1].is_number:
try:
D,r = div(Poly(D,*vars),M[k-1, k-1])
except UnificationFailed:
log.warn('unification failed, trying direct division')
D /= M[k-1, k-1]
else:
D /= M[k-1, k-1]
if D.is_Atom:
M[i, j] = D
else:
if len(vars) > 0:
M[i, j] = D
else:
M[i, j] = Poly.cancel(D)
det = sign * M[n-1, n-1]
return det.expand()
@staticmethod
def LUdecompositionFF(self,*vars):
"""
Compute a fraction-free LU decomposition.
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
If the elements of the matrix belong to some integral domain I, then all
elements of L, D and U are guaranteed to belong to I.
**Reference**
- W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms
for LU and QR factors". Frontiers in Computer Science in China,
Vol 2, no. 1, pp. 67-80, 2008.
"""
n, m = self.rows, self.cols
U, L, P = self[:,:], eye(n), eye(n)
DD = zeros(n) # store it smarter since it's just diagonal
oldpivot = S.One
for k in range(n-1):
log.info('row=%d', k)
if U[k,k] == 0:
for kpivot in range(k+1, n):
if U[kpivot, k] != 0:
break
else:
raise ValueError("Matrix is not full rank")
U[k, k:], U[kpivot, k:] = U[kpivot, k:], U[k, k:]
L[k, :k], L[kpivot, :k] = L[kpivot, :k], L[k, :k]
P[k, :], P[kpivot, :] = P[kpivot, :], P[k, :]
L[k,k] = Ukk = U[k,k]
DD[k,k] = oldpivot * Ukk
for i in range(k+1, n):
L[i,k] = Uik = U[i,k]
for j in range(k+1, m):
#U[i,j] = simplify((Ukk * U[i,j] - U[k,j]*Uik) / oldpivot)
D = Ukk * U[i,j] - U[k,j]*Uik
if len(vars) > 0 and D != S.Zero and not oldpivot.is_number:
try:
D,r = div(Poly(D,*vars),oldpivot)
except UnificationFailed:
log.warn('unification failed, trying direct division')
D /= oldpivot
else:
D /= oldpivot
# save
if D.is_Atom:
U[i,j] = D.as_expr()
else:
if len(vars) > 0:
U[i,j] = D.as_expr()
else:
U[i,j] = D.cancel()
U[i,k] = 0
oldpivot = Ukk
DD[n-1,n-1] = oldpivot
return P, L, DD, U
@staticmethod
def sequence_cross_product(*sequences):
"""iterates through the cross product of all items in the sequences"""
# visualize an odometer, with "wheels" displaying "digits"...:
wheels = map(iter, sequences)
digits = [it.next( ) for it in wheels]
while True:
yield tuple(digits)
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = wheels[i].next( )
break
except StopIteration:
wheels[i] = iter(sequences[i])
digits[i] = wheels[i].next( )
else:
break
@staticmethod
def tolatex(e):
s = printing.latex(e)
s1 = re.sub('\\\\operatorname\{(sin|cos)\}\\\\left\(j_\{(\d)\}\\\\right\)','\g<1>_\g<2>',s)
s2 = re.sub('1\.(0*)([^0-9])','1\g<2>',s1)
s3 = re.sub('1 \\\\(sin|cos)','\g<1>',s2)
s4 = re.sub('(\d*)\.([0-9]*[1-9])(0*)([^0-9])','\g<1>.\g<2>\g<4>',s3)
s5 = re.sub('sj_','s_',s4)
s5 = re.sub('cj_','c_',s5)
s5 = re.sub('sin','s',s5)
s5 = re.sub('cos','c',s5)
replacements = [('px','p_x'),('py','p_y'),('pz','p_z'),('r00','r_{00}'),('r01','r_{01}'),('r02','r_{02}'),('r10','r_{10}'),('r11','r_{11}'),('r12','r_{12}'),('r20','r_{20}'),('r21','r_{21}'),('r022','r_{22}')]
for old,new in replacements:
s5 = re.sub(old,new,s5)
return s5
@staticmethod
def GetSolvers():
"""Returns a dictionary of all the supported solvers and their official identifier names"""
return {'transform6d':IKFastSolver.solveFullIK_6D,
'rotation3d':IKFastSolver.solveFullIK_Rotation3D,
'translation3d':IKFastSolver.solveFullIK_Translation3D,
'direction3d':IKFastSolver.solveFullIK_Direction3D,
'ray4d':IKFastSolver.solveFullIK_Ray4D,
'lookat3d':IKFastSolver.solveFullIK_Lookat3D,
'translationdirection5d':IKFastSolver.solveFullIK_TranslationDirection5D,
'translationxy2d':IKFastSolver.solveFullIK_TranslationXY2D,
'translationxyorientation3d':IKFastSolver.solveFullIK_TranslationXYOrientation3D,
'translationxaxisangle4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationyaxisangle4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationzaxisangle4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationxaxisangleznorm4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationyaxisanglexnorm4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D,
'translationzaxisangleynorm4d':IKFastSolver.solveFullIK_TranslationAxisAngle4D
}
if __name__ == '__main__':
import openravepy
parser = OptionParser(description="""IKFast: The Robot Kinematics Compiler
Software License Agreement (Lesser GPL v3).
Copyright (C) 2009-2011 Rosen Diankov.
IKFast is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
IKFast is part of OpenRAVE. This program can be used with robots or kinbodies defined and is independent of the OpenRAVE databases.
Example usage for 7 DOF Barrett WAM where the 3rd joint is a free parameter:
python ikfast.py --robot=robots/barrettwam.robot.xml --baselink=0 --eelink=7 --savefile=ik.cpp --freeindex=2
""",version=__version__)
parser.add_option('--robot', action='store', type='string', dest='robot',default=None,
help='robot file (COLLADA or OpenRAVE XML)')
parser.add_option('--savefile', action='store', type='string', dest='savefile',default='ik.cpp',
help='filename where to store the generated c++ code')
parser.add_option('--baselink', action='store', type='int', dest='baselink',
help='base link index to start extraction of ik chain')
parser.add_option('--eelink', action='store', type='int', dest='eelink',
help='end effector link index to end extraction of ik chain')
parser.add_option('--freeindex', action='append', type='int', dest='freeindices',default=[],
help='Optional joint index specifying a free parameter of the manipulator. If not specified, assumes all joints not solving for are free parameters. Can be specified multiple times for multiple free parameters.')
parser.add_option('--iktype', action='store', dest='iktype',default='transform6d',
help='The iktype to generate the ik for. Possible values are: %s'%(', '.join(name for name,fn in IKFastSolver.GetSolvers().iteritems())))
parser.add_option('--lang', action='store',type='string',dest='lang',default='cpp',
help='The language to generate the code in (default=%default), available=('+','.join(name for name,value in CodeGenerators.iteritems())+')')
parser.add_option('--debug','-d', action='store', type='int',dest='debug',default=logging.INFO,
help='Debug level for python nose (smaller values allow more text).')
(options, args) = parser.parse_args()
if options.robot is None or options.baselink is None or options.eelink is None:
print('Error: Not all arguments specified')
sys.exit(1)
format = logging.Formatter('%(levelname)s: %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(format)
log.addHandler(handler)
log.setLevel(options.debug)
solvefn=IKFastSolver.GetSolvers()[options.iktype]
if options.robot is not None:
try:
env=openravepy.Environment()
kinbody=env.ReadRobotXMLFile(options.robot)
env.Add(kinbody)
solver = IKFastSolver(kinbody,kinbody)
chaintree = solver.generateIkSolver(options.baselink,options.eelink,options.freeindices,solvefn=solvefn)
code=solver.writeIkSolver(chaintree,lang=options.lang)
finally:
openravepy.RaveDestroy()
if len(code) > 0:
open(options.savefile,'w').write(code)
|
vitan/openrave
|
python/ikfast.py
|
Python
|
lgpl-3.0
| 450,836
|
[
"Gaussian"
] |
d55cd880c1e5a082b7d6836be7947fcc01490604cb5fd2f6456aa28d1c29ef67
|
'''
Individual stages of the pipeline implemented as functions from
input files to output files.
The run_stage function knows everything about submitting jobs and, given
the state parameter, has full access to the state of the pipeline, such
as config, options, DRMAA and the logger.
'''
from utils import safe_make_dir
from runner import run_stage
import os
PICARD_JAR = '$PICARD_HOME/picard.jar'
GATK_JAR = '$GATK_HOME/GenomeAnalysisTK.jar'
def java_command(jar_path, mem_in_gb, command_args):
'''Build a string for running a java command'''
# Bit of room between Java's max heap memory and what was requested.
# Allows for other Java memory usage, such as stack.
java_mem = mem_in_gb - 2
return 'java -Xmx{mem}g -jar {jar_path} {command_args}'.format(
jar_path=jar_path, mem=java_mem, command_args=command_args)
def run_java(state, stage, jar_path, mem, args):
command = java_command(jar_path, mem, args)
run_stage(state, stage, command)
class Stages(object):
def __init__(self, state):
self.state = state
self.reference = self.get_options('ref_hg19')
self.dbsnp_hg19 = self.get_options('dbsnp_hg19')
self.mills_hg19 = self.get_options('mills_hg19')
self.one_k_g_snps = self.get_options('one_k_g_snps')
self.one_k_g_indels = self.get_options('one_k_g_indels')
self.one_k_g_highconf_snps = self.get_options('one_k_g_highconf_snps')
self.hapmap = self.get_options('hapmap')
self.exome_bed_hg19 = self.get_options('exome_bed_hg19')
self.CEU_mergeGvcf = self.get_options('CEU_mergeGvcf')
# self.GBR_mergeGvcf = self.get_options('GBR_mergeGvcf')
# self.FIN_mergeGvcf = self.get_options('FIN_mergeGvcf')
def run_picard(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, PICARD_JAR, mem, args)
def run_gatk(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, GATK_JAR, mem, args)
def get_stage_options(self, stage, *options):
return self.state.config.get_stage_options(stage, *options)
def get_options(self, *options):
return self.state.config.get_options(*options)
def original_fastqs(self, output):
'''Original fastq files'''
# print output
pass
def qc_fastqc(self, inputs, output, sample_id):
# def qc_fastqc(self, inputs, bam_out, sample_id):
'''Assess read quality data on the input FASTQ files'''
fastq_file = inputs
cores = self.get_stage_options('qc_fastqc', 'cores')
# safe_make_dir('fastqc/{sample}'.format(sample=sample_id))
# read_group = '"@RG\\tID:{readid}\\tSM:{sample}\\tPU:lib1\\tLN:{lane}\\tPL:Illumina"' \
# .format(readid=read_id, lib=lib, lane=lane, sample=sample_id)
command = 'fastqc --quiet -t {cores} -o {output_dir} {seq} ' \
.format(output_dir=output, cores=cores,
seq=fastq_file)
run_stage(self.state, 'qc_fastqc', command)
def align_bwa(self, inputs, bam_out, read_id, lib, lane, sample_id):
# def align_bwa(self, inputs, bam_out, sample_id):
'''Align the paired end fastq files to the reference genome using bwa'''
fastq_read1_in, fastq_read2_in = inputs
cores = self.get_stage_options('align_bwa', 'cores')
safe_make_dir('alignments/{sample}'.format(sample=sample_id))
read_group = '"@RG\\tID:{readid}\\tSM:{sample}\\tPU:{lib}\\tLN:{lane}\\tPL:Illumina"' \
.format(readid=read_id, lib=lib, lane=lane, sample=sample_id)
command = 'bwa mem -t {cores} -R {read_group} {reference} {fastq_read1} {fastq_read2} ' \
'| samtools view -b -h -o {bam} -' \
.format(cores=cores,
read_group=read_group,
fastq_read1=fastq_read1_in,
fastq_read2=fastq_read2_in,
reference=self.reference,
bam=bam_out)
run_stage(self.state, 'align_bwa', command)
def sort_bam_picard(self, bam_in, sorted_bam_out):
'''Sort the BAM file using Picard'''
picard_args = 'SortSam INPUT={bam_in} OUTPUT={sorted_bam_out} ' \
'VALIDATION_STRINGENCY=LENIENT SORT_ORDER=coordinate ' \
'MAX_RECORDS_IN_RAM=5000000 CREATE_INDEX=True'.format(
bam_in=bam_in, sorted_bam_out=sorted_bam_out)
self.run_picard('sort_bam_picard', picard_args)
def mark_duplicates_picard(self, bam_in, outputs):
'''Mark duplicate reads using Picard'''
dedup_bam_out, metrics_out = outputs
picard_args = 'MarkDuplicates INPUT={bam_in} OUTPUT={dedup_bam_out} ' \
'METRICS_FILE={metrics_out} VALIDATION_STRINGENCY=LENIENT ' \
'MAX_RECORDS_IN_RAM=5000000 ASSUME_SORTED=True ' \
'CREATE_INDEX=True'.format(bam_in=bam_in, dedup_bam_out=dedup_bam_out,
metrics_out=metrics_out)
self.run_picard('mark_duplicates_picard', picard_args)
def realigner_target_creator(self, inputs, intervals_out):
'''Generate chromosome intervals using GATK'''
bam_in, _metrics_dup = inputs
cores = self.get_stage_options('chrom_intervals_gatk', 'cores')
gatk_args = '-T RealignerTargetCreator -R {reference} -I {bam} ' \
'--num_threads {threads} --known {mills_hg19} ' \
'--known {one_k_g_indels} -L {exome_bed_hg19} ' \
'-o {out}'.format(reference=self.reference, bam=bam_in,
threads=cores, mills_hg19=self.mills_hg19,
one_k_g_indels=self.one_k_g_indels,
exome_bed_hg19=self.exome_hg19,
out=intervals_out)
self.run_gatk('chrom_intervals_gatk', gatk_args)
def local_realignment_gatk(self, inputs, bam_out):
'''Local realign reads using GATK'''
target_intervals_in, bam_in = inputs
gatk_args = "-T IndelRealigner -R {reference} -I {bam} -L {exome_bed_hg19} " \
"-targetIntervals {target_intervals} -known {mills_hg19} " \
"-known {one_k_g_indels} " \
"-o {out}".format(reference=self.reference, bam=bam_in,
mills_hg19=self.mills_hg19,
one_k_g_indels=self.one_k_g_indels,
exome_bed_hg19=self.exome_hg19,
target_intervals=target_intervals_in,
out=bam_out)
self.run_gatk('local_realignment_gatk', gatk_args)
# XXX I'm not sure that --num_cpu_threads_per_data_thread has any benefit
# here
def base_recalibration_gatk(self, bam_in, outputs):
'''Base recalibration using GATK'''
csv_out, log_out = outputs
gatk_args = "-T BaseRecalibrator -R {reference} -I {bam} " \
"--num_cpu_threads_per_data_thread 4 --knownSites {dbsnp_hg19} " \
"--knownSites {mills_hg19} --knownSites {one_k_g_indels} " \
"-L {exome_bed_hg19} -log {log} -o {out}".format(reference=self.reference, bam=bam_in,
mills_hg19=self.mills_hg19, dbsnp_hg19=self.dbsnp_hg19,
exome_bed_hg19=self.exome_hg19,
one_k_g_indels=self.one_k_g_indels,
log=log_out, out=csv_out)
self.run_gatk('base_recalibration_gatk', gatk_args)
# XXX I'm not sure that --num_cpu_threads_per_data_thread has any benefit
# here
def print_reads_gatk(self, inputs, bam_out):
'''Print reads using GATK'''
[csv_in, _log], bam_in = inputs
gatk_args = "-T PrintReads -R {reference} -I {bam} --BQSR {recal_csv} " \
"-o {out} -L {exome_bed_hg19} --num_cpu_threads_per_data_thread 4".format(reference=self.reference,
bam=bam_in, recal_csv=csv_in, out=bam_out, exome_bed_hg19=self.exome_hg19)
self.run_gatk('print_reads_gatk', gatk_args)
# Merge per lane bam into a single bam per sample
def merge_sample_bams(self, bam_files_in, bam_out):
'''Merge per lane bam into a merged bam file'''
bam_files = ' '.join(['INPUT=' + bam for bam in bam_files_in])
picard_args = 'MergeSamFiles {bams_in} OUTPUT={merged_bam_out} ' \
'VALIDATION_STRINGENCY=LENIENT ' \
'MAX_RECORDS_IN_RAM=5000000 ASSUME_SORTED=True ' \
'CREATE_INDEX=True'.format(
bams_in=bam_files, merged_bam_out=bam_out)
self.run_picard('merge_sample_bams', picard_args)
def call_haplotypecaller_gatk(self, bam_in, vcf_out):
'''Call variants using GATK'''
# safe_make_dir('variants}'.format(sample=sample_id))
gatk_args = "-T HaplotypeCaller -R {reference} --min_base_quality_score 20 " \
"--emitRefConfidence GVCF " \
"-A AlleleBalance -A AlleleBalanceBySample " \
"-A ChromosomeCounts -A ClippingRankSumTest " \
"-A Coverage -A DepthPerAlleleBySample " \
"-A DepthPerSampleHC -A FisherStrand " \
"-A GCContent -A GenotypeSummaries " \
"-A HardyWeinberg -A HomopolymerRun " \
"-A LikelihoodRankSumTest -A LowMQ " \
"-A MappingQualityRankSumTest -A MappingQualityZero " \
"-A QualByDepth " \
"-A RMSMappingQuality -A ReadPosRankSumTest " \
"-A SampleList -A SpanningDeletions " \
"-A StrandBiasBySample -A StrandOddsRatio " \
"-A TandemRepeatAnnotator -A VariantType " \
"-I {bam} -L {exome_bed_hg19} -o {out}".format(reference=self.reference,
bam=bam_in, exome_bed_hg19=self.exome_hg19, out=vcf_out)
self.run_gatk('call_haplotypecaller_gatk', gatk_args)
def call_haplotypecaller_gatk_nct(self, bam_in, vcf_out):
'''Call variants using GATK'''
# safe_make_dir('variants}'.format(sample=sample_id))
gatk_args = "-T HaplotypeCaller -R {reference} --min_base_quality_score 20 " \
"--standard_min_confidence_threshold_for_calling 30.0 " \
"--num_cpu_threads_per_data_thread 4 " \
"--variant_index_type LINEAR " \
"--standard_min_confidence_threshold_for_emitting 30.0 " \
"-I {bam} -L {exome_bed_hg19} -o {out}".format(reference=self.reference,
bam=bam_in, exome_bed_hg19=self.exome_hg19, out=vcf_out)
self.run_gatk('call_haplotypecaller_gatk', gatk_args)
def combine_gvcf_gatk(self, vcf_files_in, vcf_out):
'''Combine G.VCF files for all samples using GATK'''
g_vcf_files = ' '.join(['--variant ' + vcf for vcf in vcf_files_in])
gatk_args = "-T CombineGVCFs -R {reference} " \
"--disable_auto_index_creation_and_locking_when_reading_rods " \
"{g_vcf_files} -o {vcf_out}".format(reference=self.reference,
g_vcf_files=g_vcf_files, vcf_out=vcf_out)
# "{g_vcf_files} -o {vcf_out} --variant {CEU}".format(reference=self.reference,
# g_vcf_files=g_vcf_files, vcf_out=vcf_out, CEU=self.CEU_mergeGvcf)
self.run_gatk('combine_gvcf_gatk', gatk_args)
def genotype_gvcf_gatk(self, merged_vcf_in, vcf_out):
'''Genotype G.VCF files using GATK'''
cores = self.get_stage_options('genotype_gvcf_gatk', 'cores')
gatk_args = "-T GenotypeGVCFs -R {reference} " \
"--disable_auto_index_creation_and_locking_when_reading_rods " \
"-A AlleleBalance -A AlleleBalanceBySample " \
"-A ChromosomeCounts -A ClippingRankSumTest " \
"-A Coverage -A DepthPerAlleleBySample " \
"-A DepthPerSampleHC -A FisherStrand " \
"-A GCContent -A GenotypeSummaries " \
"-A HardyWeinberg -A HomopolymerRun " \
"-A LikelihoodRankSumTest " \
"-A MappingQualityRankSumTest -A MappingQualityZero " \
"-A QualByDepth " \
"-A RMSMappingQuality -A ReadPosRankSumTest " \
"-A SampleList -A SpanningDeletions " \
"-A StrandBiasBySample -A StrandOddsRatio " \
"-A TandemRepeatAnnotator -A VariantType " \
"--dbsnp {dbsnp} " \
"--num_threads {cores} --variant {merged_vcf} --out {vcf_out}" \
.format(reference=self.reference, dbsnp=self.dbsnp_hg19,
cores=cores, merged_vcf=merged_vcf_in, vcf_out=vcf_out)
self.run_gatk('genotype_gvcf_gatk', gatk_args)
# def genotype_gvcf_gatk(self, merged_vcf_in, vcf_out):
# '''Genotype G.VCF files using GATK'''
# cores = self.get_stage_options('genotype_gvcf_gatk', 'cores')
# gatk_args = "-T GenotypeGVCFs -R {reference} " \
# "--disable_auto_index_creation_and_locking_when_reading_rods " \
# "--num_threads {cores} --variant {merged_vcf} --out {vcf_out} " \
# "--variant {CEU_mergeGvcf} --variant {GBR_mergeGvcf} " \
# "--variant {FIN_mergeGvcf}".format(reference=self.reference,
# cores=cores, merged_vcf=merged_vcf_in, vcf_out=vcf_out,
# CEU_mergeGvcf=self.CEU_mergeGvcf, GBR_mergeGvcf=self.GBR_mergeGvcf,
# FIN_mergeGvcf=self.FIN_mergeGvcf)
# self.run_gatk('genotype_gvcf_gatk', gatk_args)
def snp_recalibrate_gatk(self, genotype_vcf_in, outputs):
'''SNP recalibration using GATK'''
recal_snp_out, tranches_snp_out, snp_plots_r_out = outputs
cores = self.get_stage_options('snp_recalibrate_gatk', 'cores')
gatk_args = "-T VariantRecalibrator --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --minNumBadVariants 5000 --num_threads {cores} " \
"-resource:hapmap,known=false,training=true,truth=true,prior=15.0 {hapmap} " \
"-resource:omni,known=false,training=true,truth=true,prior=12.0 {one_k_g_snps} " \
"-resource:1000G,known=false,training=true,truth=false,prior=10.0 {one_k_g_highconf_snps} " \
"-an DP -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR " \
"-input {genotype_vcf} --recal_file {recal_snp} --tranches_file {tranches_snp} " \
"-rscriptFile {snp_plots} -mode SNP".format(reference=self.reference,
cores=cores, hapmap=self.hapmap, one_k_g_snps=self.one_k_g_snps,
one_k_g_highconf_snps=self.one_k_g_highconf_snps, genotype_vcf=genotype_vcf_in,
recal_snp=recal_snp_out, tranches_snp=tranches_snp_out, snp_plots=snp_plots_r_out)
self.run_gatk('snp_recalibrate_gatk', gatk_args)
def indel_recalibrate_gatk(self, genotype_vcf_in, outputs):
'''INDEL recalibration using GATK'''
recal_indel_out, tranches_indel_out, indel_plots_r_out = outputs
cores = self.get_stage_options('indel_recalibrate_gatk', 'cores')
gatk_args = "-T VariantRecalibrator --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --minNumBadVariants 5000 --num_threads {cores} " \
"-resource:mills,known=false,training=true,truth=true,prior=12.0 {mills_hg19} " \
"-resource:1000G,known=false,training=true,truth=true,prior=10.0 {one_k_g_indels} " \
"-an DP -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR " \
"-input {genotype_vcf} -recalFile {recal_indel} " \
"-tranchesFile {tranches_indel} -rscriptFile {indel_plots} " \
" -mode INDEL --maxGaussians 4".format(reference=self.reference,
cores=cores, mills_hg19=self.mills_hg19, one_k_g_indels=self.one_k_g_indels,
genotype_vcf=genotype_vcf_in, recal_indel=recal_indel_out,
tranches_indel=tranches_indel_out, indel_plots=indel_plots_r_out)
self.run_gatk('indel_recalibrate_gatk', gatk_args)
def apply_snp_recalibrate_gatk(self, inputs, vcf_out):
'''Apply SNP recalibration using GATK'''
genotype_vcf_in, [recal_snp, tranches_snp] = inputs
cores = self.get_stage_options('apply_snp_recalibrate_gatk', 'cores')
gatk_args = "-T ApplyRecalibration --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --ts_filter_level 99.5 --excludeFiltered --num_threads {cores} " \
"-input {genotype_vcf} -recalFile {recal_snp} -tranchesFile {tranches_snp} " \
"-mode SNP -o {vcf_out}".format(reference=self.reference,
cores=cores, genotype_vcf=genotype_vcf_in, recal_snp=recal_snp,
tranches_snp=tranches_snp, vcf_out=vcf_out)
self.run_gatk('apply_snp_recalibrate_gatk', gatk_args)
def apply_indel_recalibrate_gatk(self, inputs, vcf_out):
'''Apply INDEL recalibration using GATK'''
genotype_vcf_in, [recal_indel, tranches_indel] = inputs
cores = self.get_stage_options('apply_indel_recalibrate_gatk', 'cores')
gatk_args = "-T ApplyRecalibration --disable_auto_index_creation_and_locking_when_reading_rods " \
"-R {reference} --ts_filter_level 99.0 --excludeFiltered --num_threads {cores} " \
"-input {genotype_vcf} -recalFile {recal_indel} -tranchesFile {tranches_indel} " \
"-mode INDEL -o {vcf_out}".format(reference=self.reference,
cores=cores, genotype_vcf=genotype_vcf_in, recal_indel=recal_indel,
tranches_indel=tranches_indel, vcf_out=vcf_out)
self.run_gatk('apply_indel_recalibrate_gatk', gatk_args)
def combine_variants_gatk(self, inputs, vcf_out):
'''Combine variants using GATK'''
recal_snp, [recal_indel] = inputs
cores = self.get_stage_options('combine_variants_gatk', 'cores')
gatk_args = "-T CombineVariants -R {reference} --disable_auto_index_creation_and_locking_when_reading_rods " \
"--num_threads {cores} --genotypemergeoption UNSORTED --variant {recal_snp} " \
"--variant {recal_indel} -o {vcf_out}".format(reference=self.reference,
cores=cores, recal_snp=recal_snp, recal_indel=recal_indel,
vcf_out=vcf_out)
self.run_gatk('combine_variants_gatk', gatk_args)
def select_variants_gatk(self, combined_vcf, vcf_out):
'''Select variants using GATK'''
gatk_args = "-T SelectVariants -R {reference} --disable_auto_index_creation_and_locking_when_reading_rods " \
"--variant {combined_vcf} -select 'DP > 100' -o {vcf_out}".format(reference=self.reference,
combined_vcf=combined_vcf, vcf_out=vcf_out)
self.run_gatk('select_variants_gatk', gatk_args)
|
khalidm/thepipelinex
|
src/stages.py
|
Python
|
bsd-3-clause
| 20,415
|
[
"BWA"
] |
40cc44081a2754a4c15405f1bbfc8ff711cd19fcf37fc11059b9b5c089e7f42f
|
# PD is a free, modular C++ library for biomolecular simulation with a
# flexible and scriptable Python interface.
# Copyright (C) 2003-2013 Mike Tyka and Jon Rea
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pd import *
cseed(4)
info()
timer()
ffps = FFParamSet()
## Use that Charmm forcefield
ffps.readLib( after("-ff","amber03aa.ff") )
## test simple loading of PDB file
sim = System( ffps );
## Create a Protein helix using evry amino acid
sim.add( NewProteinHelix(ffps,"*A-(CDEFGHIKLMNPQRSTVW)-Y*") );
# create workspace
wspace = WorkSpace( sim )
wspace.printPDB("rotamer_perturb_random_start.pdb");
tra = OutTra_BTF("rotamer_perturb_random_end",wspace)
wspace.addTra(tra)
rot = RotamerLibrary(ffps);
rot.convertLib("../../../param/rotlib/shetty/scl-B30-occ1.0-rmsd1.0-prop20.0",RotLibConvert_Shetty());
#2A
rot.writeLib("shetty.rotamer");
# }
# else
# {
#rot.readLib("rotlib/shetty.rotamer");
# }
timeMe = StatClock()
timeMe.Begin();
app = RandomRotamerApplicator ( wspace, rot, ApplyCartesian );
## if( _testSterics )
## app.addFilter_DefaultSteric(); // Add the linear-time clash filter for all rotamer states
app.test(250); ## 250 random rotamer applications over the entire PickedResidueList
app.addFilter_DefaultSteric(); ## Add the linear-time clash filter for all rotamer states
app.test(250); ## 250 random rotamer applications over the entire PickedResidueList
timeMe.End();
timeMe.ReportMilliSeconds();
wspace.printPDB("rotamer_perturb_random_end.pdb");
|
mtyka/pd
|
examples/rotamer/rotamer_application/run.py
|
Python
|
gpl-3.0
| 2,091
|
[
"CHARMM"
] |
ac5f20e84e0a063cb4871573c76a530b744160a6f3d8b1300d67caf7552e7959
|
# SYSTEM LIBS
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt; plt.ion();
import matplotlib
import numpy as np
import pandas as pd
# from root_pandas import read_root
import sys
from itertools import islice
from scipy import signal # To find peaks, for edge-detecting the crystal
from scipy.optimize import curve_fit
import os
from sklearn import mixture
import random
# MY LIBS
import editable_input as ei # My script for editable text input
from bin_dataframe import bin2D_dataframe
import mie_utils as my
######################################
################# FUNCTIONS
def gaussian(x, mu, sig, c):
return c*matplotlib.mlab.normpdf(x, mu, sig)
def line(x, m, q):
return m*x + q
def fit_and_get_efficiency(input_data, lowest_percentage,
highest_percentage, low_data_threshold, dech_start, dech_end,
AM_means_init, CH_means_init, AM_sigma_init,
CH_sigma_init,fit_tolerance,max_iterations):
"""
Function to be applied on a groupby to get channeling efficiency.
input_data: input dataset.
return: channeling efficiency (0 < efficiency < 1), basically
channeling peak weight.
"""
clf = mixture.GaussianMixture(
n_components=2,
covariance_type='full',
verbose=0,
verbose_interval=10,
random_state=random.SystemRandom().randrange(0,2147483647), # 2**31-1
means_init=[[AM_means_init], [CH_means_init]],
# weights_init=[1 / 2, 1 / 2],
init_params="kmeans",
n_init = 2,
tol=fit_tolerance, # Typical 1e-6
precisions_init = [[[1/AM_sigma_init**2]],[[1/CH_sigma_init**2]]], # [murad^-2] 23 15
#warm_start=True,
max_iter=max_iterations) # Typical 200
################# GET THE DATA FROM THE DATAFRAME
# lowest_percentage = 5
# highest_percentage = 95
input_data = input_data.loc[(input_data < dech_start) | \
(input_data > dech_end)]
first_percentile = np.percentile(input_data, lowest_percentage)
last_percentile = np.percentile(input_data, highest_percentage)
data_reduced = input_data.values[(input_data.values>=first_percentile) & (input_data.values<=last_percentile)]
data = data_reduced.reshape(-1, 1)
#data = input_data.reshape(-1, 1)
################# FIT THE DATA
# Check that we have enough data for a fit, otherwise just return eff=0
efficiency = np.NaN
if data.size > low_data_threshold:
clf.fit(data)
if not clf.converged_:
print("[LOG]: Fit did not converge in this bin, bin ignored")
efficiency = np.NaN
r_m1, r_m2 = clf.means_
w1, w2 = clf.weights_
m1, m2 = r_m1[0], r_m2[0]
r_c1, r_c2 = clf.covariances_
c1, c2 = r_c1[0][0], r_c2[0][0]
# print("Means: ", clf.means_, "\n")
# print("Weights: ", clf.weights_, "\n")
# print("Precisions: ", 1/c1, " ", 1/c2, "\n")
# print("Covariances: ", c1, " ", c2, "\n")
# Save the weights in the right array
# Lower delta_thetax is the AM peak, higher CH
if (m1 < m2):
weights_AM = w1
weights_CH = w2
means_AM = m1
means_CH = m2
else:
weights_AM = w2
weights_CH = w1
means_AM = m2
means_CH = m1
efficiency = weights_CH
else:
print("[LOG]: Too few data for this bin, bin ignored")
efficiency = np.NaN
return efficiency
######################################
################# MAIN
################# GET CLI ARGUMENTS AND FIND THE CORRESPONDING CONF FILE
file_name = sys.argv[1]
crystal_name = sys.argv[2]
run_number = sys.argv[3]
particle_name = sys.argv[4]
particle_energy = sys.argv[5]
# Use a run specific params file, otherwise look for a crystal specific one,
# otherwise use the general one.
if os.path.isfile(run_number + '_analysis_configuration_params.csv'):
analysis_configuration_params_file = run_number + '_analysis_configuration_params.csv'
elif os.path.isfile(crystal_name + '_analysis_configuration_params.csv.csv'):
analysis_configuration_params_file = crystal_name + '_analysis_configuration_params.csv'
else:
analysis_configuration_params_file = 'analysis_configuration_params.csv'
print("[LOG]: Reading crystal analysis parameters from ", analysis_configuration_params_file)
# Check if the run number is in the actual data file name, otherwise print a
# warning
if '_'+run_number+'_' not in file_name:
print("[WARNING]: '_{}_' not found in file name '{}', maybe check if "
"correct run number or correct file.".format(run_number, file_name))
#################
# if os.path.isfile('crystal_analysis_parameters.csv'):
# parameters_table = pd.read_csv("crystal_analysis_parameters.csv", sep="\t", index_col=0)
# else: #
# raise FileNotFoundError("[ERROR]: File crystal_analysis_parameters.csv not "
# "found. Create it with save_as_hdf.py")
#
# cut_left = float(parameters_table.loc['xmin'])
# cut_right = float(parameters_table.loc['xmax'])
# init_scan = float(parameters_table.loc['init_scan'])
# Read the parameters from the .csv
# .csv example:
#
# parameter_name value
# init_scan 1570674.0
# xmin 0.0
# xmax 0.475
# if os.path.isfile(crystal_name + '_crystal_analysis_parameters.csv'):
#
# crystal_analysis_parameters_file = crystal_name + '_crystal_analysis_parameters.csv'
# else if os.path.isfile(run_number + '_crystal_analysis_parameters.csv'):
# crystal_analysis_parameters_file = run_number + '_crystal_analysis_parameters.csv'
# print("[LOG]: Reading crystal analysis parameters from ", crystal_analysis_parameters_file)
cut_left, cut_right = my.get_from_csv("crystal_analysis_parameters.csv", "xmin", "xmax")
cut_y_low, cut_y_high = my.get_from_csv(analysis_configuration_params_file, "cut_y_low", "cut_y_high")
#################
################# READ THE DATA
# Read the data $chunksize lines at a time. evts=iterator on the groups of lines
# DATAFRAME COLUMNS:
# 'Time', 'Date', 'Event_run', 'Event_evtnum', 'Event_nuclear',
# 'Event_nuclearRaw', 'GonioPos_x', 'GonioPos_y', 'GonioPos_z',
# 'MultiHits_thetaIn_x', 'MultiHits_thetaIn_y',
# 'MultiHits_thetaInErr_x', 'MultiHits_thetaInErr_y',
# 'MultiHits_d0_x', 'MultiHits_d0_y', 'MultiHits_d0Err_x',
# 'MultiHits_d0Err_y', 'Tracks_thetaIn_x', 'Tracks_thetaIn_y',
# 'Tracks_thetaOut_x', 'Tracks_thetaOut_y', 'Tracks_thetaInErr_x',
# 'Tracks_thetaInErr_y', 'Tracks_thetaOutErr_x',
# 'Tracks_thetaOutErr_y', 'Tracks_d0_x', 'Tracks_d0_y',
# 'Tracks_d0Err_x', 'Tracks_d0Err_y', 'Tracks_chi2_x',
# 'Tracks_chi2_y', 'SingleTrack', 'MultiHit'
chunksize = 2000000
interesting_columns = ["Tracks_d0_y", "Tracks_thetaOut_x", "Tracks_thetaIn_x"]
# Important to remember that the columns need to be indexed with
# data_columns=[...] when .hdf is created, to be able to use "where" on them
cuts_and_selections = ["SingleTrack == 1", "Tracks_d0_x > cut_left",
"Tracks_d0_x < cut_right","Tracks_d0_y > cut_y_low",
"Tracks_d0_y < cut_y_high"]
evts = pd.read_hdf(file_name, chunksize = chunksize, columns=interesting_columns, where=cuts_and_selections)
loaded_rows = 0
events = pd.DataFrame(columns=interesting_columns)
print("[LOG]: Loading data...")
for df in evts:
loaded_rows = loaded_rows + df.shape[0]
print("\n[LOG] loaded ", loaded_rows, " rows\n")
df.info()
events = events.append(df,ignore_index=True) # join inner maybe unnecessary here
# break; # Uncomment to get only the first chunk
print("\n[LOG]: Loaded data!\n")
events.info()
# Change angular units to microradians
events["Delta_Theta_x"] = 1e6*(events.loc[:,'Tracks_thetaOut_x'].values -
events.loc[:,'Tracks_thetaIn_x'].values)
events["Tracks_thetaIn_x"] = 1e6*events["Tracks_thetaIn_x"]
events['Tracks_thetaOut_x'] = 1e6*events['Tracks_thetaOut_x']
#################
################# BIN THE DATA AND CREATE EFF PLOT
y_nbins, thetain_x_nbins = my.get_from_csv(analysis_configuration_params_file,
"torcorr_eff_y_nbins",
"torcorr_eff_thetain_x_nbins")
eff_range_y_low, eff_range_y_high = my.get_from_csv(
analysis_configuration_params_file,
"torcorr_eff_range_y_low",
"torcorr_eff_range_y_high")
eff_range_tx_low, eff_range_tx_high = my.get_from_csv(
analysis_configuration_params_file,
"torcorr_eff_range_tx_low",
"torcorr_eff_range_tx_high")
gruppi = bin2D_dataframe(events, "Tracks_d0_y", "Tracks_thetaIn_x",
# (-2,2),(-30e-5,30e-5),17*4,12*4)
(eff_range_y_low,eff_range_y_high),
(eff_range_tx_low,eff_range_tx_high),
y_nbins, thetain_x_nbins)
lowest_percentage, highest_percentage, \
low_data_threshold = my.get_from_csv(analysis_configuration_params_file,
"torcorr_eff_low_percentage",
"torcorr_eff_high_percentage",
"torcorr_eff_low_data_threshold")
dech_start, dech_stop = my.get_from_csv(analysis_configuration_params_file,
"dech_start",
"dech_end")
AM_means_init, CH_means_init, AM_sigma_init, \
CH_sigma_init, fit_tolerance = my.get_from_csv( \
analysis_configuration_params_file,
"torcorr_eff_AM_means_init",
"torcorr_eff_CH_means_init",
"torcorr_eff_AM_sigma_init",
"torcorr_eff_CH_sigma_init",
"torcorr_eff_fit_tolerance")
max_iterations = int(my.get_from_csv(analysis_configuration_params_file,
"torcorr_eff_max_iterations"))
robust_fit = lambda x: fit_and_get_efficiency(x, lowest_percentage,
highest_percentage, low_data_threshold, dech_start,
dech_stop, AM_means_init, CH_means_init, AM_sigma_init,
CH_sigma_init, fit_tolerance, max_iterations)
efficiencies = gruppi["Delta_Theta_x"].aggregate(robust_fit)
# Theta_x bin close to the middle, to avoid NaNs
center_angle = efficiencies.index[int(thetain_x_nbins//2)][1]
# FIT THE TORSION
avg_Delta_Theta_x = [np.average(efficiencies.dropna().xs(xx,level=0).index.values, \
weights=efficiencies.dropna().xs(xx,level=0).values) for xx \
in efficiencies.xs(center_angle,level=1).index.values]
avg_Delta_Theta_x_fit_noNaN = [curve_fit(gaussian,efficiencies.xs(xx,level=0).index.values,efficiencies.fillna(0).xs(xx,level=0).values, \
#sigma=1/np.sqrt(gruppi.apply(len).xs(xx,level=0).values),
method="dogbox",loss="cauchy", max_nfev=1000*thetain_x_nbins)[0][0] for xx \
in efficiencies.xs(center_angle,level=1).index.values]
# avg_Delta_Theta_x_fit_NaNzero = [curve_fit(gaussian,efficiencies.fillna(0).xs(xx,level=0).index.values,efficiencies.fillna(0).xs(xx,level=0).values,method="dogbox",loss="cauchy")[0][0] for xx \
# in efficiencies.fillna(0).xs(0.5,level=1).index.values]
#plt.plot(efficiencies.xs(0.5,level=1).index.get_values(),avg_Delta_Theta_x, "-", label="Avg")
#plt.plot(efficiencies.xs(0.5,level=1).index.get_values(),avg_Delta_Theta_x_fit_noNaN, "-", label="Filtered fit")
# plt.plot(avg_Delta_Theta_x_fit_NaNzero, "-", label="fit NaN zero")
line_par, line_par_cov = curve_fit(line,efficiencies.xs(center_angle,level=1).index.get_values(),avg_Delta_Theta_x_fit_noNaN, method="dogbox", loss="cauchy")
p, pc = curve_fit(line,efficiencies.xs(center_angle,level=1).index.get_values(),avg_Delta_Theta_x)
#
# # Plot as 2D array
plt.figure()
grid_for_histo=np.array([list(v) for v in efficiencies.index.values])
plt.hist2d(grid_for_histo[:,0],grid_for_histo[:,1], weights=efficiencies.values,
bins=[y_nbins, thetain_x_nbins], range=[[eff_range_y_low, eff_range_y_high],[eff_range_tx_low, eff_range_tx_high]]) # TODO
plt.suptitle(r"Crystal {}, run {} — {} {} GeV".format(crystal_name, run_number, particle_name, particle_energy),fontweight='bold')
plt.title(r"Efficiency as function of {}".format(r"$x_{in}$ and $\Delta \theta_{x}$"))
#plt.plot(efficiencies.xs(0.5,level=1).index.get_values(),avg_Delta_Theta_x, "-", label="Avg")
#plt.plot(efficiencies.xs(0.5,level=1).index.get_values(),avg_Delta_Theta_x_fit_noNaN, "-", label="Filtered fit")
plt.plot(efficiencies.xs(center_angle,level=1).index.get_values(),line(efficiencies.xs(center_angle,level=1).index.get_values(), *line_par), label="Torsion linear fit", color = 'r')
plt.xlabel(r'$y_{in}\ [mm]$')
plt.ylabel(r'$\theta_{x_{in}}\ [\mu rad]$')
# print(events)
plt.colorbar()
#plt.tight_layout()
plt.savefig("latex/img/efficiency_histo.pdf")
plt.show()
################# SAVE FIT PLOT TO FILE
plt.figure()
plt.plot(efficiencies.xs(center_angle,level=1).index.get_values(),avg_Delta_Theta_x, "-", label="Avg")
plt.plot(efficiencies.xs(center_angle,level=1).index.get_values(),avg_Delta_Theta_x_fit_noNaN, "-", label="Filtered fit")
plt.plot(efficiencies.xs(center_angle,level=1).index.get_values(),line(efficiencies.xs(center_angle,level=1).index.get_values(), *line_par), label="Torsion linear fit", color = 'r')
plt.suptitle(r"Crystal {}, run {} — {} {} GeV".format(crystal_name, run_number, particle_name, particle_energy),fontweight='bold')
plt.title(r"Torsion fit: {}".format(r"$y_{in}$ vs $\Delta \theta_{x}$"))
plt.xlabel(r'$y_{in}\ [mm]$')
plt.ylabel(r'$\theta_{x_{in}}\ [\mu rad]$')
#plt.tight_layout()
plt.legend()
plt.savefig("latex/img/torsion_fit.pdf")
plt.show()
#################
line_par_err = np.sqrt(np.diag(line_par_cov))
pe = np.sqrt(np.diag(pc))
print("m: {:.5} +- {:.5}\nq: {:.5} +- {:.5}".format(line_par[0], line_par_err[0],
line_par[1], line_par_err[1]))
################# SAVE PARAMETERS TO FILE
tor_m = line_par[0]
tor_q = line_par[1]
my.save_in_csv("crystal_analysis_parameters.csv",
torsion_m=line_par[0],
torsion_m_err=line_par_err[0],
torsion_q=line_par[1],
torsion_q_err=line_par_err[1],)
#################
################# UNCORRECTED DATA
plt.figure()
hist_tx_nbins, hist_dtx_nbins = my.get_from_csv(analysis_configuration_params_file,
"torcorr_hist_tx_nbins",
"torcorr_hist_dtx_nbins")
hist_range_tx_low, hist_range_tx_high = my.get_from_csv(
analysis_configuration_params_file,
"torcorr_hist_range_tx_low",
"torcorr_hist_range_tx_high")
hist_range_dtx_low, hist_range_dtx_high = my.get_from_csv(
analysis_configuration_params_file,
"torcorr_hist_range_dtx_low",
"torcorr_hist_range_dtx_high")
plt.hist2d(events.loc[:,'Tracks_thetaIn_x'].values ,events.loc[:,'Tracks_thetaOut_x'].values - events.loc[:,'Tracks_thetaIn_x'].values,\
bins=[hist_tx_nbins,hist_dtx_nbins],
norm=LogNorm(),
range=[[hist_range_tx_low, hist_range_tx_high],
[hist_range_dtx_low, hist_range_dtx_high]])
plt.suptitle(r"Crystal {}, run {} — {} {} GeV".format(crystal_name, run_number, particle_name, particle_energy),fontweight='bold')
plt.title(r"Histogram not corrected: {}".format(r"$\theta_{x}$ vs $\Delta \theta_{x}$"))
plt.xlabel(r'$\theta_{x_{in}}\ [\mu rad]$')
plt.ylabel(r'$\Delta \theta_{x}\ [\mu rad]$')
# print(events)
plt.colorbar()
#plt.tight_layout()
plt.savefig("latex/img/nocorr_histo.pdf")
plt.show()
#################
################# SAVE TO HDF FILE THE ORIGINAL DATA (BUT WITH CUTS)
events.to_hdf("nocorr_"+file_name+".hdf","simpleEvent",
format="table",
fletcher32=True, mode="a", complevel=9, append=False,
data_columns=['Tracks_thetaIn_x', 'Tracks_thetaOut_x'])
#################
################# CORRECT FOR TORSION AND SHOW THE PLOT
#events["Tracks_thetaIn_x"]
events["Tracks_thetaIn_x"] = (events["Tracks_thetaIn_x"] -
(tor_m*events["Tracks_d0_y"]+tor_q))# + init_scan
plt.figure()
plt.hist2d(events.loc[:,'Tracks_thetaIn_x'].values ,events.loc[:,'Delta_Theta_x'].values,\
bins=[hist_tx_nbins,hist_dtx_nbins],
norm=LogNorm(),
range=[[hist_range_tx_low, hist_range_tx_high],
[hist_range_dtx_low, hist_range_dtx_high]])
plt.suptitle(r"Crystal {}, run {} — {} {} GeV".format(crystal_name, run_number, particle_name, particle_energy),fontweight='bold')
plt.title(r"Histogram corrected for torsion: {}".format(r"$\theta_{x}$ vs $\Delta \theta_{x}$"))
plt.xlabel(r'$\theta_{x_{in}}\ [\mu rad]$')
plt.ylabel(r'$\Delta \theta_{x}\ [\mu rad]$')
# print(events)
plt.colorbar()
#plt.tight_layout()
plt.savefig("latex/img/corrected_histo.pdf")
plt.show()
#################
################# SAVE TO HDF FILE THE CORRECTED DATA
events.to_hdf("torsion_corrected_"+file_name+".hdf","simpleEvent",
format="table",
fletcher32=True, mode="a", complevel=9, append=False,
data_columns=['Tracks_thetaIn_x', 'Tracks_thetaOut_x','Delta_Theta_x'])
#################
|
f-forcher/crystal-channeling-analysis
|
torsion_correction.py
|
Python
|
gpl-3.0
| 18,175
|
[
"CRYSTAL",
"Gaussian"
] |
5d9e1a53021869517c719fb53db5a7c6b04e4e11e282fd73452ebc5a4d988e3f
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-21 03:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def create_card_set_seed_data(apps, schema_editor):
CardSet = apps.get_model("cards", "CardSet")
CardSet.objects.using(schema_editor.connection.alias).bulk_create([
CardSet(name="Core")
])
def delete_card_set_seed_data(apps, schema_editor):
CardSet = apps.get_model("cards", "CardSet")
db_alias = schema_editor.connection.alias
CardSet.objects.using(db_alias).filter(name="Core").delete()
def create_hero_team_seed_data(apps, schema_editor):
HeroicTeam = apps.get_model("cards", "HeroicTeam")
HeroicTeam.objects.using(schema_editor.connection.alias).bulk_create([
HeroicTeam(team_name="X-Men", description="Born as mutants, with strange superpowers that set them apart, the X-Men are sworn to protect a world that hates and fears them."),
HeroicTeam(team_name="Avengers", description="And there came a day unlike any other, when Earth's mightiest heroes and heroines found themselves united against a common threat. On that day, the Avengers were born."),
HeroicTeam(team_name="S.H.I.E.L.D.", description="The 'Strategic Hazard Intervention Espionage Logistics Directorate' is a clandestine military and espionage organization led by Director Nick Fury."),
HeroicTeam(team_name="Spider Friends", description="Spider-Man received his powers after being bit by a radioactive spider and along with his allies will protect New York from those who would threaten the innocent.")
])
def delete_hero_team_seed_data(apps, schema_editor):
HeroicTeam = apps.get_model("cards", "HeroicTeam")
db_alias = schema_editor.connection.alias
HeroicTeam.objects.using(db_alias).filter(team_name="X-Men").delete()
HeroicTeam.objects.using(db_alias).filter(team_name="Avengers").delete()
HeroicTeam.objects.using(db_alias).filter(team_name="S.H.I.E.L.D.").delete()
HeroicTeam.objects.using(db_alias).filter(team_name="Spider Friends").delete()
def create_hero_class_seed_data(apps, schema_editor):
HeroClass = apps.get_model("cards", "HeroClass")
HeroClass.objects.using(schema_editor.connection.alias).bulk_create([
HeroClass(class_name="Strength", description="Strength heroes include heroes with raw strength, but also heroes with strength of will, determination, and strong leadership."),
HeroClass(class_name="Instinct", description="Instinct heroes use savagery and quick reflexes to dominate combats. Some Instinct heroes use superhuman senses to get an edge on their opponents."),
HeroClass(class_name="Covert", description="Covert heroes include heroes using trickery and deception to outwit their foes. They also include heroes making clever battle plans and heroes using subtle superpowers to gain subtle advantages."),
HeroClass(class_name="Tech", description="Tech heroes include heroes using advanced weaponry, incredible gadgets, brilliant inventions, or next-generation science."),
HeroClass(class_name="Ranged", description="Ranged heroes like to blow things up. Some Ranged heroes use inherent superpowers to blast things, while others use energy beams, elemental powers, and mental assaults."),
HeroClass(class_name="Basic", description="Basic heroes include all the starting S.H.I.E.L.D. heroes and officers. They are heroes in their own way, but they don't quite get the job done as well as high-flying super heroes.")
])
def delete_hero_class_seed_data(apps, schema_editor):
HeroClass = apps.get_model("cards", "HeroClass")
db_alias = schema_editor.connection.alias
HeroClass.objects.using(db_alias).filter(class_name="Strength").delete()
HeroClass.objects.using(db_alias).filter(class_name="Instinct").delete()
HeroClass.objects.using(db_alias).filter(class_name="Covert").delete()
HeroClass.objects.using(db_alias).filter(class_name="Tech").delete()
HeroClass.objects.using(db_alias).filter(class_name="Ranged").delete()
HeroClass.objects.using(db_alias).filter(class_name="Basic").delete()
def create_hero_seed_data(apps, schema_editor):
Hero = apps.get_model("cards", "Hero")
CardSet = apps.get_model("cards", "CardSet")
HeroicTeam = apps.get_model("cards", "HeroicTeam")
db_alias = schema_editor.connection.alias
core_set = CardSet.objects.using(db_alias).get(name="Core")
xmen = HeroicTeam.objects.using(db_alias).get(team_name="X-Men")
avengers = HeroicTeam.objects.using(db_alias).get(team_name="Avengers")
shield = HeroicTeam.objects.using(db_alias).get(team_name="S.H.I.E.L.D.")
spider = HeroicTeam.objects.using(db_alias).get(team_name="Spider Friends")
Hero.objects.using(db_alias).bulk_create([
Hero(name="Black Widow", static_folder="widow", hero_team=avengers, card_set=core_set),
Hero(name="Captain America", static_folder="captain", hero_team=avengers, card_set=core_set),
Hero(name="Cyclops", static_folder="cyclops", hero_team=xmen, card_set=core_set),
Hero(name="Deadpool", static_folder="deadpool", card_set=core_set),
Hero(name="Emma Frost", static_folder="emma", hero_team=xmen, card_set=core_set),
Hero(name="Nick Fury", static_folder="fury", hero_team=shield, card_set=core_set),
Hero(name="Gambit", static_folder="gambit", hero_team=xmen, card_set=core_set),
Hero(name="Hawkeye", static_folder="hawkeye", hero_team=avengers, card_set=core_set),
Hero(name="Hulk", static_folder="hulk", hero_team=avengers, card_set=core_set),
Hero(name="Iron Man", static_folder="ironman", hero_team=avengers, card_set=core_set),
Hero(name="Rogue", static_folder="rogue", hero_team=xmen, card_set=core_set),
Hero(name="Spider-Man", static_folder="spider", hero_team=spider, card_set=core_set),
Hero(name="Storm", static_folder="storm", hero_team=xmen, card_set=core_set),
Hero(name="Thor", static_folder="thor", hero_team=avengers, card_set=core_set),
Hero(name="Wolverine", static_folder="wolverine", hero_team=xmen, card_set=core_set),
])
def delete_hero_seed_data(apps, schema_editor):
Hero = apps.get_model("cards", "Hero")
db_alias = schema_editor.connection.alias
Hero.objects.using(db_alias).filter(name="Black Widow").delete()
Hero.objects.using(db_alias).filter(name="Captain America").delete()
Hero.objects.using(db_alias).filter(name="Cyclops").delete()
Hero.objects.using(db_alias).filter(name="Deadpool").delete()
Hero.objects.using(db_alias).filter(name="Emma Frost").delete()
Hero.objects.using(db_alias).filter(name="Nick Fury").delete()
Hero.objects.using(db_alias).filter(name="Gambit").delete()
Hero.objects.using(db_alias).filter(name="Hawkeye").delete()
Hero.objects.using(db_alias).filter(name="Hulk").delete()
Hero.objects.using(db_alias).filter(name="Iron Man").delete()
Hero.objects.using(db_alias).filter(name="Rogue").delete()
Hero.objects.using(db_alias).filter(name="Spider-Man").delete()
Hero.objects.using(db_alias).filter(name="Storm").delete()
Hero.objects.using(db_alias).filter(name="Thor").delete()
Hero.objects.using(db_alias).filter(name="Wolverine").delete()
def create_hero_card_seed_data(apps, schema_editor):
HeroCard = apps.get_model("cards", "HeroCard")
Hero = apps.get_model("cards", "Hero")
HeroClass = apps.get_model("cards", "HeroClass")
db_alias = schema_editor.connection.alias
widow = Hero.objects.using(db_alias).get(name="Black Widow")
captain = Hero.objects.using(db_alias).get(name="Captain America")
cyclops = Hero.objects.using(db_alias).get(name="Cyclops")
deadpool = Hero.objects.using(db_alias).get(name="Deadpool")
frost = Hero.objects.using(db_alias).get(name="Emma Frost")
fury = Hero.objects.using(db_alias).get(name="Nick Fury")
gambit = Hero.objects.using(db_alias).get(name="Gambit")
hawkeye = Hero.objects.using(db_alias).get(name="Hawkeye")
hulk = Hero.objects.using(db_alias).get(name="Hulk")
iron = Hero.objects.using(db_alias).get(name="Iron Man")
rogue = Hero.objects.using(db_alias).get(name="Rogue")
spider = Hero.objects.using(db_alias).get(name="Spider-Man")
storm = Hero.objects.using(db_alias).get(name="Storm")
thor = Hero.objects.using(db_alias).get(name="Thor")
wolverine = Hero.objects.using(db_alias).get(name="Wolverine")
strength = HeroClass.objects.using(db_alias).get(class_name="Strength")
instinct = HeroClass.objects.using(db_alias).get(class_name="Instinct")
covert = HeroClass.objects.using(db_alias).get(class_name="Covert")
tech = HeroClass.objects.using(db_alias).get(class_name="Tech")
ranged = HeroClass.objects.using(db_alias).get(class_name="Ranged")
HeroCard.objects.using(db_alias).bulk_create([
#Black Widow
HeroCard(major_text="Mission Accomplished", cost=2, base_recruit=0, base_attack=0, quantity=5, card_file_name="ma.jpg", hero=widow),
HeroCard(major_text="Dangerous Rescue", cost=3, base_recruit=0, base_attack=2, quantity=5, card_file_name="dr.jpg", hero=widow),
HeroCard(major_text="Covert Operation", cost=4, base_recruit=0, base_attack=0, quantity=3, card_file_name="co.jpg", hero=widow),
HeroCard(major_text="Silent Sniper", cost=7, base_recruit=0, base_attack=4, quantity=1, card_file_name="ss.jpg", hero=widow),
#Captain America
HeroCard(major_text="Avengers Assemble!", cost=3, base_recruit=0, base_attack=0, quantity=5, card_file_name="aa.jpg", hero=captain),
HeroCard(major_text="Perfect Teamwork", cost=4, base_recruit=0, base_attack=0, quantity=5, card_file_name="pt.jpg", hero=captain),
HeroCard(major_text="Diving Block", cost=6, base_recruit=0, base_attack=4, quantity=3, card_file_name="db.jpg", hero=captain),
HeroCard(major_text="A Day Unlike Any Other", cost=7, base_recruit=0, base_attack=3, quantity=1, card_file_name="aduao.jpg", hero=captain),
#Cyclops
HeroCard(major_text="Determination", cost=2, base_recruit=3, base_attack=0, quantity=5, card_file_name="d.jpg", hero=cyclops),
HeroCard(major_text="Optic Blast", cost=3, base_recruit=0, base_attack=3, quantity=5, card_file_name="ob.jpg", hero=cyclops),
HeroCard(major_text="Unending Energy", cost=6, base_recruit=0, base_attack=4, quantity=3, card_file_name="ue.jpg", hero=cyclops),
HeroCard(major_text="X-Men United", cost=8, base_recruit=0, base_attack=6, quantity=1, card_file_name="xmu.jpg", hero=cyclops),
#Deadpool
HeroCard(major_text="Here, Hold This For A Second", cost=3, base_recruit=2, base_attack=0, quantity=5, card_file_name="hhtfas.jpg", hero=deadpool),
HeroCard(major_text="Oddball", cost=5, base_recruit=0, base_attack=2, quantity=5, card_file_name="ob.jpg", hero=deadpool),
HeroCard(major_text="Hey, Can I Get A Do-Over", cost=3, base_recruit=0, base_attack=2, quantity=3, card_file_name="hcigado.jpg", hero=deadpool),
HeroCard(major_text="Random Acts of Unkindness", cost=7, base_recruit=0, base_attack=6, quantity=1, card_file_name="raou.jpg", hero=deadpool),
#Emma Frost
HeroCard(major_text="Mental Discipline", cost=3, base_recruit=1, base_attack=0, quantity=5, card_file_name="md.jpg", hero=frost),
HeroCard(major_text="Shadowed Thoughts", cost=4, base_recruit=0, base_attack=2, quantity=5, card_file_name="st.jpg", hero=frost),
HeroCard(major_text="Psychic Link", cost=5, base_recruit=0, base_attack=3, quantity=3, card_file_name="pl.jpg", hero=frost),
HeroCard(major_text="Diamond Form", cost=7, base_recruit=0, base_attack=5, quantity=1, card_file_name="df.jpg", hero=frost),
#Nick Fury
HeroCard(major_text="High-Tech Weaponry", cost=3, base_recruit=0, base_attack=2, quantity=5, card_file_name="htw.jpg", hero=fury),
HeroCard(major_text="Battlefield Promotion", cost=4, base_recruit=0, base_attack=0, quantity=5, card_file_name="bp.jpg", hero=fury),
HeroCard(major_text="Legendary Commander", cost=6, base_recruit=0, base_attack=1, quantity=3, card_file_name="lc.jpg", hero=fury),
HeroCard(major_text="Pure Fury", cost=8, base_recruit=0, base_attack=0, quantity=1, card_file_name="pf.jpg", hero=fury),
#Gambit
HeroCard(major_text="Stack The Deck", cost=2, base_recruit=0, base_attack=0, quantity=5, card_file_name="std.jpg", hero=gambit),
HeroCard(major_text="Card Shark", cost=4, base_recruit=0, base_attack=2, quantity=5, card_file_name="cs.jpg", hero=gambit),
HeroCard(major_text="Hypnotic Charm", cost=3, base_recruit=2, base_attack=0, quantity=3, card_file_name="hc.jpg", hero=gambit),
HeroCard(major_text="High Stakes Jackpot", cost=7, base_recruit=0, base_attack=4, quantity=1, card_file_name="hsj.jpg", hero=gambit),
#Hawkeye
HeroCard(major_text="Quick Draw", cost=3, base_recruit=0, base_attack=1, quantity=5, card_file_name="qd.jpg", hero=hawkeye),
HeroCard(major_text="Team Player", cost=4, base_recruit=0, base_attack=2, quantity=5, card_file_name="tp.jpg", hero=hawkeye),
HeroCard(major_text="Covering Fire", cost=5, base_recruit=0, base_attack=3, quantity=3, card_file_name="cf.jpg", hero=hawkeye),
HeroCard(major_text="Impossible Trick Shot", cost=7, base_recruit=0, base_attack=5, quantity=1, card_file_name="its.jpg", hero=hawkeye),
#Hulk
HeroCard(major_text="Growing Anger", cost=3, base_recruit=0, base_attack=2, quantity=5, card_file_name="ga.jpg", hero=hulk),
HeroCard(major_text="Unstoppable Hulk", cost=4, base_recruit=0, base_attack=2, quantity=5, card_file_name="uh.jpg", hero=hulk),
HeroCard(major_text="Crazed Rampage", cost=5, base_recruit=0, base_attack=4, quantity=3, card_file_name="cr.jpg", hero=hulk),
HeroCard(major_text="Hulk Smash!", cost=7, base_recruit=0, base_attack=5, quantity=1, card_file_name="hs.jpg", hero=hulk),
#Iron Man
HeroCard(major_text="Repulsor Rays", cost=3, base_recruit=0, base_attack=2, quantity=5, card_file_name="rr.jpg", hero=iron),
HeroCard(major_text="Endless Invention", cost=3, base_recruit=0, base_attack=0, quantity=5, card_file_name="ei.jpg", hero=iron),
HeroCard(major_text="Arc Reactor", cost=5, base_recruit=0, base_attack=3, quantity=3, card_file_name="ar.jpg", hero=iron),
HeroCard(major_text="Quantum Breakthrough", cost=7, base_recruit=0, base_attack=0, quantity=1, card_file_name="qb.jpg", hero=iron),
#Rogue
HeroCard(major_text="Energy Drain", cost=3, base_recruit=2, base_attack=0, quantity=5, card_file_name="ed.jpg", hero=rogue),
HeroCard(major_text="Borrowed Brawn", cost=4, base_recruit=0, base_attack=1, quantity=5, card_file_name="bb.jpg", hero=rogue),
HeroCard(major_text="Copy Powers", cost=5, base_recruit=0, base_attack=0, quantity=3, card_file_name="cp.jpg", hero=rogue),
HeroCard(major_text="Steal Abilities", cost=8, base_recruit=0, base_attack=4, quantity=1, card_file_name="sa.jpg", hero=rogue),
#Spider-Man
HeroCard(major_text="Great Responsibility", cost=2, base_recruit=0, base_attack=1, quantity=5, card_file_name="gr.jpg", hero=spider),
HeroCard(major_text="Astonishing Strength", cost=2, base_recruit=1, base_attack=0, quantity=5, card_file_name="as.jpg", hero=spider),
HeroCard(major_text="Web-Shooters", cost=2, base_recruit=0, base_attack=0, quantity=3, card_file_name="ws.jpg", hero=spider),
HeroCard(major_text="The Amazing Spider-Man", cost=2, base_recruit=0, base_attack=0, quantity=1, card_file_name="tasm.jpg", hero=spider),
#Storm
HeroCard(major_text="Lightning Bolt", cost=4, base_recruit=0, base_attack=2, quantity=5, card_file_name="lb.jpg", hero=storm),
HeroCard(major_text="Gathering Stormclouds", cost=3, base_recruit=2, base_attack=0, quantity=5, card_file_name="gs.jpg", hero=storm),
HeroCard(major_text="Spinning Cyclone", cost=4, base_recruit=0, base_attack=4, quantity=3, card_file_name="sc.jpg", hero=storm),
HeroCard(major_text="Tidal Wave", cost=7, base_recruit=0, base_attack=5, quantity=1, card_file_name="tw.jpg", hero=storm),
#Thor
HeroCard(major_text="Odinson", cost=3, base_recruit=2, base_attack=0, quantity=5, card_file_name="o.jpg", hero=thor),
HeroCard(major_text="Surge Of Power", cost=4, base_recruit=2, base_attack=0, quantity=5, card_file_name="sop.jpg", hero=thor),
HeroCard(major_text="Call Lightning", cost=6, base_recruit=0, base_attack=3, quantity=3, card_file_name="cl.jpg", hero=thor),
HeroCard(major_text="God Of Thunder", cost=8, base_recruit=5, base_attack=0, quantity=1, card_file_name="got.jpg", hero=thor),
#Wolverine
HeroCard(major_text="Keen Senses", cost=2, base_recruit=0, base_attack=1, quantity=5, card_file_name="ks.jpg", hero=wolverine),
HeroCard(major_text="Healing Factor", cost=3, base_recruit=0, base_attack=2, quantity=5, card_file_name="hf.jpg", hero=wolverine),
HeroCard(major_text="Frenzied Slashing", cost=5, base_recruit=0, base_attack=2, quantity=3, card_file_name="fs.jpg", hero=wolverine),
HeroCard(major_text="Berserker Rage", cost=8, base_recruit=0, base_attack=0, quantity=1, card_file_name="br.jpg", hero=wolverine)
])
#Black Widow
HeroCard.objects.using(db_alias).get(major_text="Mission Accomplished").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Dangerous Rescue").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Covert Operation").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Silent Sniper").hero_class.add(covert);
#Captain America
HeroCard.objects.using(db_alias).get(major_text="Avengers Assemble!").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Perfect Teamwork").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Diving Block").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="A Day Unlike Any Other").hero_class.add(covert);
#Cyclops
HeroCard.objects.using(db_alias).get(major_text="Determination").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Optic Blast").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Unending Energy").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="X-Men United").hero_class.add(ranged);
#Deadpool
HeroCard.objects.using(db_alias).get(major_text="Here, Hold This For A Second").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Oddball").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Hey, Can I Get A Do-Over").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Random Acts of Unkindness").hero_class.add(instinct);
#Emma Frost
HeroCard.objects.using(db_alias).get(major_text="Mental Discipline").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Shadowed Thoughts").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Psychic Link").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Diamond Form").hero_class.add(strength);
#Nick Fury
HeroCard.objects.using(db_alias).get(major_text="High-Tech Weaponry").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Battlefield Promotion").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Legendary Commander").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Pure Fury").hero_class.add(tech);
#Gambit
HeroCard.objects.using(db_alias).get(major_text="Stack The Deck").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Card Shark").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Hypnotic Charm").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="High Stakes Jackpot").hero_class.add(instinct);
#Hawkeye
HeroCard.objects.using(db_alias).get(major_text="Quick Draw").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Team Player").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Covering Fire").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Impossible Trick Shot").hero_class.add(tech);
#Hulk
HeroCard.objects.using(db_alias).get(major_text="Growing Anger").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Unstoppable Hulk").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Crazed Rampage").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Hulk Smash!").hero_class.add(strength);
#Iron Man
HeroCard.objects.using(db_alias).get(major_text="Repulsor Rays").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Endless Invention").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Arc Reactor").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="Quantum Breakthrough").hero_class.add(tech);
#Rogue
HeroCard.objects.using(db_alias).get(major_text="Energy Drain").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Borrowed Brawn").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Copy Powers").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Steal Abilities").hero_class.add(strength);
#Spider-Man
HeroCard.objects.using(db_alias).get(major_text="Great Responsibility").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Astonishing Strength").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Web-Shooters").hero_class.add(tech);
HeroCard.objects.using(db_alias).get(major_text="The Amazing Spider-Man").hero_class.add(covert);
#Storm
HeroCard.objects.using(db_alias).get(major_text="Lightning Bolt").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Gathering Stormclouds").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Spinning Cyclone").hero_class.add(covert);
HeroCard.objects.using(db_alias).get(major_text="Tidal Wave").hero_class.add(ranged);
#Thor
HeroCard.objects.using(db_alias).get(major_text="Odinson").hero_class.add(strength);
HeroCard.objects.using(db_alias).get(major_text="Surge Of Power").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="Call Lightning").hero_class.add(ranged);
HeroCard.objects.using(db_alias).get(major_text="God Of Thunder").hero_class.add(ranged);
#Wolverine
HeroCard.objects.using(db_alias).get(major_text="Keen Senses").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Healing Factor").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Frenzied Slashing").hero_class.add(instinct);
HeroCard.objects.using(db_alias).get(major_text="Berserker Rage").hero_class.add(instinct);
def delete_hero_card_seed_data(apps, schema_editor):
HeroCard = apps.get_model("cards", "HeroCard")
db_alias = schema_editor.connection.alias
#Black Widow
HeroCard.objects.using(db_alias).filter(major_text="Mission Accomplished").delete()
HeroCard.objects.using(db_alias).filter(major_text="Dangerous Rescue").delete()
HeroCard.objects.using(db_alias).filter(major_text="Covert Operation").delete()
HeroCard.objects.using(db_alias).filter(major_text="Silent Sniper").delete()
#Captain America
HeroCard.objects.using(db_alias).filter(major_text="Avengers Assemble!").delete()
HeroCard.objects.using(db_alias).filter(major_text="Perfect Teamwork").delete()
HeroCard.objects.using(db_alias).filter(major_text="Diving Block").delete()
HeroCard.objects.using(db_alias).filter(major_text="A Day Unlike Any Other").delete()
#Cyclops
HeroCard.objects.using(db_alias).filter(major_text="Determination").delete()
HeroCard.objects.using(db_alias).filter(major_text="Optic Blast").delete()
HeroCard.objects.using(db_alias).filter(major_text="Unending Energy").delete()
HeroCard.objects.using(db_alias).filter(major_text="X-Men United").delete()
#Deadpool
HeroCard.objects.using(db_alias).filter(major_text="Here, Hold This For A Second").delete()
HeroCard.objects.using(db_alias).filter(major_text="Oddball").delete()
HeroCard.objects.using(db_alias).filter(major_text="Hey, Can I Get A Do-Over").delete()
HeroCard.objects.using(db_alias).filter(major_text="Random Acts of Unkindness").delete()
#Emma Frost
HeroCard.objects.using(db_alias).filter(major_text="Mental Discipline").delete()
HeroCard.objects.using(db_alias).filter(major_text="Shadowed Thoughts").delete()
HeroCard.objects.using(db_alias).filter(major_text="Psychic Link").delete()
HeroCard.objects.using(db_alias).filter(major_text="Diamond Form").delete()
#Nick Fury
HeroCard.objects.using(db_alias).filter(major_text="High-Tech Weaponry").delete()
HeroCard.objects.using(db_alias).filter(major_text="Battlefield Promotion").delete()
HeroCard.objects.using(db_alias).filter(major_text="Legendary Commander").delete()
HeroCard.objects.using(db_alias).filter(major_text="Pure Fury").delete()
#Gambit
HeroCard.objects.using(db_alias).filter(major_text="Stack The Deck").delete()
HeroCard.objects.using(db_alias).filter(major_text="Card Shark").delete()
HeroCard.objects.using(db_alias).filter(major_text="Hypnotic Charm").delete()
HeroCard.objects.using(db_alias).filter(major_text="High Stakes Jackpot").delete()
#Hawkeye
HeroCard.objects.using(db_alias).filter(major_text="Quick Draw").delete()
HeroCard.objects.using(db_alias).filter(major_text="Team Player").delete()
HeroCard.objects.using(db_alias).filter(major_text="Covering Fire").delete()
HeroCard.objects.using(db_alias).filter(major_text="Impossible Trick Shot").delete()
#Hulk
HeroCard.objects.using(db_alias).filter(major_text="Growing Anger").delete()
HeroCard.objects.using(db_alias).filter(major_text="Unstoppable Hulk").delete()
HeroCard.objects.using(db_alias).filter(major_text="Crazed Rampage").delete()
HeroCard.objects.using(db_alias).filter(major_text="Hulk Smash!").delete()
#Iron Man
HeroCard.objects.using(db_alias).filter(major_text="Repulsor Rays").delete()
HeroCard.objects.using(db_alias).filter(major_text="Endless Invention").delete()
HeroCard.objects.using(db_alias).filter(major_text="Arc Reactor").delete()
HeroCard.objects.using(db_alias).filter(major_text="Quantum Breakthrough").delete()
#Rogue
HeroCard.objects.using(db_alias).filter(major_text="Energy Drain").delete()
HeroCard.objects.using(db_alias).filter(major_text="Borrowed Brawn").delete()
HeroCard.objects.using(db_alias).filter(major_text="Copy Powers").delete()
HeroCard.objects.using(db_alias).filter(major_text="Steal Abilities").delete()
#Storm
HeroCard.objects.using(db_alias).filter(major_text="Lightning Bolt").delete()
HeroCard.objects.using(db_alias).filter(major_text="Gathering Stormclouds").delete()
HeroCard.objects.using(db_alias).filter(major_text="Spinning Cyclone").delete()
HeroCard.objects.using(db_alias).filter(major_text="Tidal Wave").delete()
#Thor
HeroCard.objects.using(db_alias).filter(major_text="Odinson").delete()
HeroCard.objects.using(db_alias).filter(major_text="Surge Of Power").delete()
HeroCard.objects.using(db_alias).filter(major_text="Call Lightning").delete()
HeroCard.objects.using(db_alias).filter(major_text="God Of Thunder").delete()
#Wolverine
HeroCard.objects.using(db_alias).filter(major_text="Keen Senses").delete()
HeroCard.objects.using(db_alias).filter(major_text="Healing Factor").delete()
HeroCard.objects.using(db_alias).filter(major_text="Frenzied Slashing").delete()
HeroCard.objects.using(db_alias).filter(major_text="Berserker Rage").delete()
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CardSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Hero',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('static_folder', models.CharField(max_length=20)),
('card_set', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cards.CardSet')),
],
),
migrations.CreateModel(
name='HeroCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('major_text', models.CharField(max_length=50)),
('cost', models.PositiveSmallIntegerField()),
('base_recruit', models.PositiveSmallIntegerField()),
('base_attack', models.PositiveSmallIntegerField()),
('quantity', models.PositiveSmallIntegerField()),
('card_file_name', models.CharField(max_length=20)),
('hero', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cards.Hero')),
],
),
migrations.CreateModel(
name='HeroClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_name', models.CharField(db_column='Name', max_length=50)),
('description', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='HeroicTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('team_name', models.CharField(db_column='Name', max_length=50)),
('description', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='herocard',
name='hero_class',
field=models.ManyToManyField(db_table='cards_herocard_heroclass', to='cards.HeroClass'),
),
migrations.AddField(
model_name='hero',
name='hero_team',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='cards.HeroicTeam'),
),
migrations.RunPython(create_card_set_seed_data, delete_card_set_seed_data),
migrations.RunPython(create_hero_team_seed_data, delete_hero_team_seed_data),
migrations.RunPython(create_hero_class_seed_data, delete_hero_class_seed_data),
migrations.RunPython(create_hero_seed_data, delete_hero_seed_data),
migrations.RunPython(create_hero_card_seed_data, delete_hero_card_seed_data)
]
|
DaveHynek/LegendaryOnline
|
cards/migrations/0001_initial.py
|
Python
|
mit
| 31,608
|
[
"BLAST"
] |
5d5e1a167676e6d3c8192619b3d2f6720542af4b20054f75f9e00e5ca179b879
|
import json
import logging
import os
import urllib2
from galaxy import util
from galaxy.util.odict import odict
from galaxy.web import url_for
from tool_shed.util import encoding_util
from tool_shed.util import xml_util
log = logging.getLogger( __name__ )
REPOSITORY_OWNER = 'devteam'
def accumulate_tool_dependencies( tool_shed_accessible, tool_dependencies, all_tool_dependencies ):
if tool_shed_accessible:
if tool_dependencies:
for tool_dependency in tool_dependencies:
if tool_dependency not in all_tool_dependencies:
all_tool_dependencies.append( tool_dependency )
return all_tool_dependencies
def check_for_missing_tools( app, tool_panel_configs, latest_tool_migration_script_number ):
# Get the 000x_tools.xml file associated with the current migrate_tools version number.
tools_xml_file_path = os.path.abspath( os.path.join( 'scripts', 'migrate_tools', '%04d_tools.xml' % latest_tool_migration_script_number ) )
# Parse the XML and load the file attributes for later checking against the proprietary tool_panel_config.
migrated_tool_configs_dict = odict()
tree, error_message = xml_util.parse_xml( tools_xml_file_path )
if tree is None:
return False, odict()
root = tree.getroot()
tool_shed = root.get( 'name' )
tool_shed_url = get_tool_shed_url_from_tool_shed_registry( app, tool_shed )
# The default behavior is that the tool shed is down.
tool_shed_accessible = False
missing_tool_configs_dict = odict()
if tool_shed_url:
for elem in root:
if elem.tag == 'repository':
repository_dependencies = []
all_tool_dependencies = []
repository_name = elem.get( 'name' )
changeset_revision = elem.get( 'changeset_revision' )
tool_shed_accessible, repository_dependencies_dict = get_repository_dependencies( app,
tool_shed_url,
repository_name,
REPOSITORY_OWNER,
changeset_revision )
if tool_shed_accessible:
# Accumulate all tool dependencies defined for repository dependencies for display to the user.
for rd_key, rd_tups in repository_dependencies_dict.items():
if rd_key in [ 'root_key', 'description' ]:
continue
for rd_tup in rd_tups:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
parse_repository_dependency_tuple( rd_tup )
tool_shed_accessible, tool_dependencies = get_tool_dependencies( app,
tool_shed_url,
name,
owner,
changeset_revision )
all_tool_dependencies = accumulate_tool_dependencies( tool_shed_accessible, tool_dependencies, all_tool_dependencies )
tool_shed_accessible, tool_dependencies = get_tool_dependencies( app,
tool_shed_url,
repository_name,
REPOSITORY_OWNER,
changeset_revision )
all_tool_dependencies = accumulate_tool_dependencies( tool_shed_accessible, tool_dependencies, all_tool_dependencies )
for tool_elem in elem.findall( 'tool' ):
tool_config_file_name = tool_elem.get( 'file' )
if tool_config_file_name:
# We currently do nothing with repository dependencies except install them (we do not display repositories that will be
# installed to the user). However, we'll store them in the following dictionary in case we choose to display them in the
# future.
dependencies_dict = dict( tool_dependencies=all_tool_dependencies,
repository_dependencies=repository_dependencies )
migrated_tool_configs_dict[ tool_config_file_name ] = dependencies_dict
else:
break
if tool_shed_accessible:
# Parse the proprietary tool_panel_configs (the default is tool_conf.xml) and generate the list of missing tool config file names.
for tool_panel_config in tool_panel_configs:
tree, error_message = xml_util.parse_xml( tool_panel_config )
if tree:
root = tree.getroot()
for elem in root:
if elem.tag == 'tool':
missing_tool_configs_dict = check_tool_tag_set( elem, migrated_tool_configs_dict, missing_tool_configs_dict )
elif elem.tag == 'section':
for section_elem in elem:
if section_elem.tag == 'tool':
missing_tool_configs_dict = check_tool_tag_set( section_elem, migrated_tool_configs_dict, missing_tool_configs_dict )
else:
exception_msg = '\n\nThe entry for the main Galaxy tool shed at %s is missing from the %s file. ' % ( tool_shed, app.config.tool_sheds_config )
exception_msg += 'The entry for this tool shed must always be available in this file, so re-add it before attempting to start your Galaxy server.\n'
raise Exception( exception_msg )
return tool_shed_accessible, missing_tool_configs_dict
def check_tool_tag_set( elem, migrated_tool_configs_dict, missing_tool_configs_dict ):
file_path = elem.get( 'file', None )
if file_path:
path, name = os.path.split( file_path )
for migrated_tool_config in migrated_tool_configs_dict.keys():
if migrated_tool_config in [ file_path, name ]:
missing_tool_configs_dict[ name ] = migrated_tool_configs_dict[ migrated_tool_config ]
return missing_tool_configs_dict
def generate_clone_url_for_installed_repository( app, repository ):
"""Generate the URL for cloning a repository that has been installed into a Galaxy instance."""
tool_shed_url = get_tool_shed_url_from_tool_shed_registry( app, str( repository.tool_shed ) )
return url_join( tool_shed_url, 'repos', str( repository.owner ), str( repository.name ) )
def generate_clone_url_for_repository_in_tool_shed( user, repository ):
"""Generate the URL for cloning a repository that is in the tool shed."""
base_url = url_for( '/', qualified=True ).rstrip( '/' )
if user:
protocol, base = base_url.split( '://' )
username = '%s@' % user.username
return '%s://%s%s/repos/%s/%s' % ( protocol, username, base, repository.user.username, repository.name )
else:
return '%s/repos/%s/%s' % ( base_url, repository.user.username, repository.name )
def generate_clone_url_from_repo_info_tup( app, repo_info_tup ):
"""Generate the URL for cloning a repository given a tuple of toolshed, name, owner, changeset_revision."""
# Example tuple: ['http://localhost:9009', 'blast_datatypes', 'test', '461a4216e8ab', False]
toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = \
parse_repository_dependency_tuple( repo_info_tup )
tool_shed_url = get_tool_shed_url_from_tool_shed_registry( app, toolshed )
# Don't include the changeset_revision in clone urls.
return url_join( tool_shed_url, 'repos', owner, name )
def get_non_shed_tool_panel_configs( app ):
"""Get the non-shed related tool panel configs - there can be more than one, and the default is tool_conf.xml."""
config_filenames = []
for config_filename in app.config.tool_configs:
# Any config file that includes a tool_path attribute in the root tag set like the following is shed-related.
# <toolbox tool_path="../shed_tools">
tree, error_message = xml_util.parse_xml( config_filename )
if tree is None:
continue
root = tree.getroot()
tool_path = root.get( 'tool_path', None )
if tool_path is None:
config_filenames.append( config_filename )
return config_filenames
def get_repository_dependencies( app, tool_shed_url, repository_name, repository_owner, changeset_revision ):
repository_dependencies_dict = {}
tool_shed_accessible = True
url = '%s/repository/get_repository_dependencies?name=%s&owner=%s&changeset_revision=%s' % \
( tool_shed_url, repository_name, repository_owner, changeset_revision )
try:
raw_text = tool_shed_get( app, tool_shed_url, url )
tool_shed_accessible = True
except Exception, e:
tool_shed_accessible = False
print "The URL\n%s\nraised the exception:\n%s\n" % ( url, str( e ) )
if tool_shed_accessible:
if len( raw_text ) > 2:
encoded_text = json.loads( raw_text )
repository_dependencies_dict = encoding_util.tool_shed_decode( encoded_text )
return tool_shed_accessible, repository_dependencies_dict
def get_protocol_from_tool_shed_url( tool_shed_url ):
"""Return the protocol from the received tool_shed_url if it exists."""
try:
if tool_shed_url.find( '://' ) > 0:
return tool_shed_url.split( '://' )[0].lower()
except Exception, e:
# We receive a lot of calls here where the tool_shed_url is None. The container_util uses
# that value when creating a header row. If the tool_shed_url is not None, we have a problem.
if tool_shed_url is not None:
log.exception( "Handled exception getting the protocol from Tool Shed URL %s:\n%s" % ( str( tool_shed_url ), str( e ) ) )
# Default to HTTP protocol.
return 'http'
def get_tool_dependencies( app, tool_shed_url, repository_name, repository_owner, changeset_revision ):
tool_dependencies = []
tool_shed_accessible = True
url = '%s/repository/get_tool_dependencies?name=%s&owner=%s&changeset_revision=%s' % \
( tool_shed_url, repository_name, repository_owner, changeset_revision )
try:
text = tool_shed_get( app, tool_shed_url, url )
tool_shed_accessible = True
except Exception, e:
tool_shed_accessible = False
print "The URL\n%s\nraised the exception:\n%s\n" % ( url, str( e ) )
if tool_shed_accessible:
if text:
tool_dependencies_dict = encoding_util.tool_shed_decode( text )
for dependency_key, requirements_dict in tool_dependencies_dict.items():
tool_dependency_name = requirements_dict[ 'name' ]
tool_dependency_version = requirements_dict[ 'version' ]
tool_dependency_type = requirements_dict[ 'type' ]
tool_dependencies.append( ( tool_dependency_name, tool_dependency_version, tool_dependency_type ) )
return tool_shed_accessible, tool_dependencies
def get_tool_shed_repository_ids( as_string=False, **kwd ):
tsrid = kwd.get( 'tool_shed_repository_id', None )
tsridslist = util.listify( kwd.get( 'tool_shed_repository_ids', None ) )
if not tsridslist:
tsridslist = util.listify( kwd.get( 'id', None ) )
if tsridslist is not None:
if tsrid is not None and tsrid not in tsridslist:
tsridslist.append( tsrid )
if as_string:
return ','.join( tsridslist )
return tsridslist
else:
tsridslist = util.listify( kwd.get( 'ordered_tsr_ids', None ) )
if tsridslist is not None:
if as_string:
return ','.join( tsridslist )
return tsridslist
if as_string:
''
return []
def get_tool_shed_url_from_tool_shed_registry( app, tool_shed ):
"""
The value of tool_shed is something like: toolshed.g2.bx.psu.edu. We need the URL to this tool shed, which is
something like: http://toolshed.g2.bx.psu.edu/
"""
cleaned_tool_shed = remove_protocol_from_tool_shed_url( tool_shed )
for shed_name, shed_url in app.tool_shed_registry.tool_sheds.items():
if shed_url.find( cleaned_tool_shed ) >= 0:
if shed_url.endswith( '/' ):
shed_url = shed_url.rstrip( '/' )
return shed_url
# The tool shed from which the repository was originally installed must no longer be configured in tool_sheds_conf.xml.
return None
def handle_galaxy_url( trans, **kwd ):
galaxy_url = kwd.get( 'galaxy_url', None )
if galaxy_url:
trans.set_cookie( galaxy_url, name='toolshedgalaxyurl' )
else:
galaxy_url = trans.get_cookie( name='toolshedgalaxyurl' )
return galaxy_url
def handle_tool_shed_url_protocol( app, shed_url ):
"""Handle secure and insecure HTTP protocol since they may change over time."""
try:
if app.name == 'galaxy':
url = remove_protocol_from_tool_shed_url( shed_url )
tool_shed_url = get_tool_shed_url_from_tool_shed_registry( app, url )
else:
tool_shed_url = str( url_for( '/', qualified=True ) ).rstrip( '/' )
return tool_shed_url
except Exception, e:
# We receive a lot of calls here where the tool_shed_url is None. The container_util uses
# that value when creating a header row. If the tool_shed_url is not None, we have a problem.
if shed_url is not None:
log.exception( "Handled exception removing protocol from URL %s:\n%s" % ( str( shed_url ), str( e ) ) )
return shed_url
def parse_repository_dependency_tuple( repository_dependency_tuple, contains_error=False ):
# Default both prior_installation_required and only_if_compiling_contained_td to False in cases where metadata should be reset on the
# repository containing the repository_dependency definition.
prior_installation_required = 'False'
only_if_compiling_contained_td = 'False'
if contains_error:
if len( repository_dependency_tuple ) == 5:
tool_shed, name, owner, changeset_revision, error = repository_dependency_tuple
elif len( repository_dependency_tuple ) == 6:
tool_shed, name, owner, changeset_revision, prior_installation_required, error = repository_dependency_tuple
elif len( repository_dependency_tuple ) == 7:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td, error = \
repository_dependency_tuple
return tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td, error
else:
if len( repository_dependency_tuple ) == 4:
tool_shed, name, owner, changeset_revision = repository_dependency_tuple
elif len( repository_dependency_tuple ) == 5:
tool_shed, name, owner, changeset_revision, prior_installation_required = repository_dependency_tuple
elif len( repository_dependency_tuple ) == 6:
tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td = repository_dependency_tuple
return tool_shed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td
def remove_port_from_tool_shed_url( tool_shed_url ):
"""Return a partial Tool Shed URL, eliminating the port if it exists."""
try:
if tool_shed_url.find( ':' ) > 0:
# Eliminate the port, if any, since it will result in an invalid directory name.
new_tool_shed_url = tool_shed_url.split( ':' )[ 0 ]
else:
new_tool_shed_url = tool_shed_url
return new_tool_shed_url.rstrip( '/' )
except Exception, e:
# We receive a lot of calls here where the tool_shed_url is None. The container_util uses
# that value when creating a header row. If the tool_shed_url is not None, we have a problem.
if tool_shed_url is not None:
log.exception( "Handled exception removing the port from Tool Shed URL %s:\n%s" % ( str( tool_shed_url ), str( e ) ) )
return tool_shed_url
def remove_protocol_and_port_from_tool_shed_url( tool_shed_url ):
"""Return a partial Tool Shed URL, eliminating the protocol and/or port if either exists."""
tool_shed = remove_protocol_from_tool_shed_url( tool_shed_url )
tool_shed = remove_port_from_tool_shed_url( tool_shed )
return tool_shed
def remove_protocol_and_user_from_clone_url( repository_clone_url ):
"""Return a URL that can be used to clone a repository, eliminating the protocol and user if either exists."""
if repository_clone_url.find( '@' ) > 0:
# We have an url that includes an authenticated user, something like:
# http://test@bx.psu.edu:9009/repos/some_username/column
items = repository_clone_url.split( '@' )
tmp_url = items[ 1 ]
elif repository_clone_url.find( '//' ) > 0:
# We have an url that includes only a protocol, something like:
# http://bx.psu.edu:9009/repos/some_username/column
items = repository_clone_url.split( '//' )
tmp_url = items[ 1 ]
else:
tmp_url = repository_clone_url
return tmp_url.rstrip( '/' )
def remove_protocol_from_tool_shed_url( tool_shed_url ):
"""Return a partial Tool Shed URL, eliminating the protocol if it exists."""
try:
if tool_shed_url.find( '://' ) > 0:
new_tool_shed_url = tool_shed_url.split( '://' )[1]
else:
new_tool_shed_url = tool_shed_url
return new_tool_shed_url.rstrip( '/' )
except Exception, e:
# We receive a lot of calls here where the tool_shed_url is None. The container_util uses
# that value when creating a header row. If the tool_shed_url is not None, we have a problem.
if tool_shed_url is not None:
log.exception( "Handled exception removing the protocol from Tool Shed URL %s:\n%s" % ( str( tool_shed_url ), str( e ) ) )
return tool_shed_url
def tool_shed_get( app, tool_shed_url, uri ):
"""Make contact with the tool shed via the uri provided."""
registry = app.tool_shed_registry
# urllib2 auto-detects system proxies, when passed a Proxyhandler.
# Refer: http://docs.python.org/2/howto/urllib2.html#proxies
proxy = urllib2.ProxyHandler()
urlopener = urllib2.build_opener( proxy )
urllib2.install_opener( urlopener )
password_mgr = registry.password_manager_for_url( tool_shed_url )
if password_mgr is not None:
auth_handler = urllib2.HTTPBasicAuthHandler( password_mgr )
urlopener.add_handler( auth_handler )
response = urlopener.open( uri )
content = response.read()
response.close()
return content
def url_join( *args ):
"""Return a valid URL produced by appending a base URL and a set of request parameters."""
parts = []
for arg in args:
if arg is not None:
parts.append( arg.strip( '/' ) )
return '/'.join( parts )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/tool_shed/util/common_util.py
|
Python
|
gpl-3.0
| 19,969
|
[
"Galaxy"
] |
11cba42edec28968ddf27ecca9d3e4b0c8277a05dcb495f83b6e97e7f05e90d7
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Apr 28, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2012"
import unittest
import os
from pymatgen.core.structure import Molecule
from pymatgen.io.xyzio import XYZ
from pymatgen.io.babelio import BabelMolAdaptor
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "molecules")
try:
import openbabel as ob
import pybel as pb
except ImportError:
pb = None
ob = None
@unittest.skipIf(not (pb and ob), "OpenBabel not present. Skipping...")
class BabelMolAdaptorTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_init(self):
adaptor = BabelMolAdaptor(self.mol)
obmol = adaptor.openbabel_mol
self.assertEqual(obmol.NumAtoms(), 5)
adaptor = BabelMolAdaptor(adaptor.openbabel_mol)
self.assertEqual(adaptor.pymatgen_mol.formula, "H4 C1")
def test_from_file(self):
adaptor = BabelMolAdaptor.from_file(
os.path.join(test_dir, "Ethane_e.pdb"), "pdb")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H6 C2")
def test_from_string(self):
xyz = XYZ(self.mol)
adaptor = BabelMolAdaptor.from_string(str(xyz), "xyz")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H4 C1")
def test_localopt(self):
self.mol[1] = "H", [0, 0, 1.05]
adaptor = BabelMolAdaptor(self.mol)
adaptor.localopt()
optmol = adaptor.pymatgen_mol
for site in optmol[1:]:
self.assertAlmostEqual(site.distance(optmol[0]), 1.09216, 2)
if __name__ == "__main__":
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/io/tests/test_babelio.py
|
Python
|
mit
| 2,171
|
[
"Pybel",
"pymatgen"
] |
248f2977cae3ac5a215a0a68bbe6b61f75be92a5196b29f04b80c5ab11048e55
|
#!/usr/bin/env python
r"""
This example shows the use of the `dw_tl_he_genyeoh` hyperelastic term, whose
contribution to the deformation energy density per unit reference volume is
given by
.. math::
W = K \, \left( \overline I_1 - 3 \right)^{p}
where :math:`\overline I_1` is the first main invariant of the deviatoric part
of the right Cauchy-Green deformation tensor :math:`\ull{C}` and `K` and `p`
are its parameters.
This term may be used to implement the generalized Yeoh hyperelastic material
model [1] by adding three such terms:
.. math::
W =
K_1 \, \left( \overline I_1 - 3 \right)^{m}
+K_2 \, \left( \overline I_1 - 3 \right)^{p}
+K_3 \, \left( \overline I_1 - 3 \right)^{q}
where the coefficients :math:`K_1, K_2, K_3` and exponents :math:`m, p, q` are
material parameters. Only a single term is used in this example for the sake of
simplicity.
Components of the second Piola-Kirchhoff stress are in the case of an
incompressible material
.. math::
S_{ij} = 2 \, \pdiff{W}{C_{ij}} - p \, F^{-1}_{ik} \, F^{-T}_{kj} \;,
where :math:`p` is the hydrostatic pressure.
The large deformation is described using the total Lagrangian formulation in
this example. The incompressibility is treated by mixed displacement-pressure
formulation. The weak formulation is:
Find the displacement field :math:`\ul{u}` and pressure field :math:`p`
such that:
.. math::
\intl{\Omega\suz}{} \ull{S}\eff(\ul{u}, p) : \ull{E}(\ul{v})
\difd{V} = 0
\;, \quad \forall \ul{v} \;,
\intl{\Omega\suz}{} q\, (J(\ul{u})-1) \difd{V} = 0
\;, \quad \forall q \;.
The following formula holds for the axial true (Cauchy) stress in the case of
uniaxial stress:
.. math::
\sigma(\lambda) =
\frac{2}{3} \, m \, K_1 \,
\left( \lambda^2 + \frac{2}{\lambda} - 3 \right)^{m-1} \,
\left( \lambda - \frac{1}{\lambda^2} \right) \;,
where :math:`\lambda = l/l_0` is the prescribed stretch (:math:`l_0` and
:math:`l` being the original and deformed specimen length respectively).
The boundary conditions are set so that a state of uniaxial stress is achieved,
i.e. appropriate components of displacement are fixed on the "Left", "Bottom",
and "Near" faces and a monotonously increasing displacement is prescribed on
the "Right" face. This prescribed displacement is then used to calculate
:math:`\lambda` and to convert the second Piola-Kirchhoff stress to the true
(Cauchy) stress.
Note on material parameters
---------------------------
The three-term generalized Yeoh model is meant to be used for modelling of
filled rubbers. The following choice of parameters is suggested [1] based on
experimental data and stability considerations:
:math:`K_1 > 0`,
:math:`K_2 < 0`,
:math:`K_3 > 0`,
:math:`0.7 < m < 1`,
:math:`m < p < q`.
Usage Examples
--------------
Default options::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py
To show a comparison of stress against the analytic formula::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py -p
Using different mesh fineness::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
--shape "5, 5, 5"
Different dimensions of the computational domain::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
--dims "2, 1, 3"
Different length of time interval and/or number of time steps::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
-t 0,15,21
Use higher approximation order (the ``-t`` option to decrease the time step is
required for convergence here)::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py \
--order 2 -t 0,2,21
Change material parameters::
$ python examples/large_deformation/gen_yeoh_tl_up_interactive.py -m 2,1
View the results using ``resview.py``
-------------------------------------
Show pressure on deformed mesh (use PgDn/PgUp to jump forward/back)::
$ python resview.py --fields=p:f1:wu:p1 domain.??.vtk
Show the axial component of stress (second Piola-Kirchhoff)::
$ python resview.py --fields=stress:c0 domain.??.vtk
[1] Travis W. Hohenberger, Richard J. Windslow, Nicola M. Pugno, James J. C.
Busfield. Aconstitutive Model For Both Lowand High Strain Nonlinearities In
Highly Filled Elastomers And Implementation With User-Defined Material
Subroutines In Abaqus. Rubber Chemistry And Technology, Vol. 92, No. 4, Pp.
653-686 (2019)
"""
from __future__ import print_function, absolute_import
import argparse
import sys
SFEPY_DIR = '.'
sys.path.append(SFEPY_DIR)
import matplotlib.pyplot as plt
import numpy as np
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (
FieldVariable, Material, Integral, Function, Equation, Equations, Problem)
from sfepy.discrete.conditions import Conditions, EssentialBC
from sfepy.discrete.fem import FEDomain, Field
from sfepy.homogenization.utils import define_box_regions
from sfepy.mesh.mesh_generators import gen_block_mesh
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.terms import Term
DIMENSION = 3
def get_displacement(ts, coors, bc=None, problem=None):
"""
Define the time-dependent displacement.
"""
out = 1. * ts.time * coors[:, 0]
return out
def _get_analytic_stress(stretches, coef, exp):
out = np.array([
2 * coef * exp * (stretch**2 + 2 / stretch - 3)**(exp - 1)
* (stretch - stretch**-2)
if (stretch**2 + 2 / stretch > 3) else 0.
for stretch in stretches])
return out
def plot_graphs(
material_parameters, global_stress, global_displacement,
undeformed_length):
"""
Plot a comparison of the nominal stress computed by the FEM and using the
analytic formula.
Parameters
----------
material_parameters : list or tuple of float
The K_1 coefficient and exponent m.
global_displacement
The total displacement for each time step, from the FEM.
global_stress
The true (Cauchy) stress for each time step, from the FEM.
undeformed_length : float
The length of the undeformed specimen.
"""
coef, exp = material_parameters
stretch = 1 + np.array(global_displacement) / undeformed_length
# axial stress values
stress_fem_2pk = np.array([sig for sig in global_stress])
stress_fem = stress_fem_2pk * stretch
stress_analytic = _get_analytic_stress(stretch, coef, exp)
fig, (ax_stress, ax_difference) = plt.subplots(nrows=2, sharex=True)
ax_stress.plot(stretch, stress_fem, '.-', label='FEM')
ax_stress.plot(stretch, stress_analytic, '--', label='analytic')
ax_difference.plot(stretch, stress_fem - stress_analytic, '.-')
ax_stress.legend(loc='best').set_draggable(True)
ax_stress.set_ylabel(r'nominal stress $\mathrm{[Pa]}$')
ax_stress.grid()
ax_difference.set_ylabel(r'difference in nominal stress $\mathrm{[Pa]}$')
ax_difference.set_xlabel(r'stretch $\mathrm{[-]}$')
ax_difference.grid()
plt.tight_layout()
plt.show()
def stress_strain(
out, problem, _state, order=1, global_stress=None,
global_displacement=None, **_):
"""
Compute the stress and the strain and add them to the output.
Parameters
----------
out : dict
Holds the results of the finite element computation.
problem : sfepy.discrete.Problem
order : int
The approximation order of the displacement field.
global_displacement
Total displacement for each time step, current value will be appended.
global_stress
The true (Cauchy) stress for each time step, current value will be
appended.
Returns
-------
out : dict
"""
strain = problem.evaluate(
'dw_tl_he_genyeoh.%d.Omega(m1.par, v, u)' % (2*order),
mode='el_avg', term_mode='strain', copy_materials=False)
out['green_strain'] = Struct(
name='output_data', mode='cell', data=strain, dofs=None)
stress_1 = problem.evaluate(
'dw_tl_he_genyeoh.%d.Omega(m1.par, v, u)' % (2*order),
mode='el_avg', term_mode='stress', copy_materials=False)
stress_p = problem.evaluate(
'dw_tl_bulk_pressure.%d.Omega(v, u, p)' % (2*order),
mode='el_avg', term_mode='stress', copy_materials=False)
stress = stress_1 + stress_p
out['stress'] = Struct(
name='output_data', mode='cell', data=stress, dofs=None)
global_stress.append(stress[0, 0, 0, 0])
global_displacement.append(get_displacement(
problem.ts, np.array([[1., 0, 0]]))[0])
return out
def main(cli_args):
dims = parse_argument_list(cli_args.dims, float)
shape = parse_argument_list(cli_args.shape, int)
centre = parse_argument_list(cli_args.centre, float)
material_parameters = parse_argument_list(cli_args.material_parameters,
float)
order = cli_args.order
ts_vals = cli_args.ts.split(',')
ts = {
't0' : float(ts_vals[0]), 't1' : float(ts_vals[1]),
'n_step' : int(ts_vals[2])}
do_plot = cli_args.plot
### Mesh and regions ###
mesh = gen_block_mesh(
dims, shape, centre, name='block', verbose=False)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
lbn, rtf = domain.get_mesh_bounding_box()
box_regions = define_box_regions(3, lbn, rtf)
regions = dict([
[r, domain.create_region(r, box_regions[r][0], box_regions[r][1])]
for r in box_regions])
### Fields ###
scalar_field = Field.from_args(
'fu', np.float64, 'scalar', omega, approx_order=order-1)
vector_field = Field.from_args(
'fv', np.float64, 'vector', omega, approx_order=order)
u = FieldVariable('u', 'unknown', vector_field, history=1)
v = FieldVariable('v', 'test', vector_field, primary_var_name='u')
p = FieldVariable('p', 'unknown', scalar_field, history=1)
q = FieldVariable('q', 'test', scalar_field, primary_var_name='p')
### Material ###
coefficient, exponent = material_parameters
m_1 = Material(
'm1', par=[coefficient, exponent],
)
### Boundary conditions ###
x_sym = EssentialBC('x_sym', regions['Left'], {'u.0' : 0.0})
y_sym = EssentialBC('y_sym', regions['Near'], {'u.1' : 0.0})
z_sym = EssentialBC('z_sym', regions['Bottom'], {'u.2' : 0.0})
disp_fun = Function('disp_fun', get_displacement)
displacement = EssentialBC(
'displacement', regions['Right'], {'u.0' : disp_fun})
ebcs = Conditions([x_sym, y_sym, z_sym, displacement])
### Terms and equations ###
integral = Integral('i', order=2*order+1)
term_1 = Term.new(
'dw_tl_he_genyeoh(m1.par, v, u)',
integral, omega, m1=m_1, v=v, u=u)
term_pressure = Term.new(
'dw_tl_bulk_pressure(v, u, p)',
integral, omega, v=v, u=u, p=p)
term_volume_change = Term.new(
'dw_tl_volume(q, u)',
integral, omega, q=q, u=u, term_mode='volume')
term_volume = Term.new(
'dw_volume_integrate(q)',
integral, omega, q=q)
eq_balance = Equation('balance', term_1 + term_pressure)
eq_volume = Equation('volume', term_volume_change - term_volume)
equations = Equations([eq_balance, eq_volume])
### Solvers ###
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton(
{'i_max' : 20},
lin_solver=ls, status=nls_status
)
### Problem ###
pb = Problem('hyper', equations=equations)
pb.set_bcs(ebcs=ebcs)
pb.set_ics(ics=Conditions([]))
tss = SimpleTimeSteppingSolver(ts, nls=nls, context=pb)
pb.set_solver(tss)
### Solution ###
axial_stress = []
axial_displacement = []
def stress_strain_fun(*args, **kwargs):
return stress_strain(
*args, order=order, global_stress=axial_stress,
global_displacement=axial_displacement, **kwargs)
pb.solve(save_results=True, post_process_hook=stress_strain_fun)
if do_plot:
plot_graphs(
material_parameters, axial_stress, axial_displacement,
undeformed_length=dims[0])
def parse_argument_list(cli_arg, type_fun=None, value_separator=','):
"""
Split the command-line argument into a list of items of given type.
Parameters
----------
cli_arg : str
type_fun : function
A function to be called on each substring of `cli_arg`; default: str.
value_separator : str
"""
if type_fun is None:
type_fun = str
out = [type_fun(value) for value in cli_arg.split(value_separator)]
return out
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--order', type=int, default=1, help='The approximation order of the '
'displacement field [default: %(default)s]')
parser.add_argument(
'-m', '--material-parameters', default='0.5, 0.9',
help='Material parameters - coefficient and exponent - of a single '
'term of the generalized Yeoh hyperelastic model. '
'[default: %(default)s]')
parser.add_argument(
'--dims', default="1.0, 1.0, 1.0",
help='Dimensions of the block [default: %(default)s]')
parser.add_argument(
'--shape', default='2, 2, 2',
help='Shape (counts of nodes in x, y, z) of the block [default: '
'%(default)s]')
parser.add_argument(
'--centre', default='0.5, 0.5, 0.5',
help='Centre of the block [default: %(default)s]')
parser.add_argument(
'-p', '--plot', action='store_true', default=False,
help='Whether to plot a comparison with analytical formula.')
parser.add_argument(
'-t', '--ts',
type=str, default='0.0,2.0,11',
help='Start time, end time, and number of time steps [default: '
'"%(default)s"]')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
|
BubuLK/sfepy
|
examples/large_deformation/gen_yeoh_tl_up_interactive.py
|
Python
|
bsd-3-clause
| 14,158
|
[
"VTK"
] |
8926909706a110631dfe73fa7f2266652cda518c5cf929256ed3f7e555f61901
|
import os
import re
from f90nml import Namelist
from jinja2 import Environment, FileSystemLoader
import glob
import sh
import pdb
import tarfile
# from gfdl import create_alert
# import getpass
from isca import GFDL_WORK, GFDL_DATA, GFDL_BASE, _module_directory, get_env_file, EventEmitter
from isca.diagtable import DiagTable
from isca.loghandler import Logger, clean_log_debug
from isca.helpers import destructive, useworkdir, mkdir
P = os.path.join
class CompilationError(Exception):
pass
class FailedRunError(Exception): pass
class Experiment(Logger, EventEmitter):
"""A basic GFDL experiment"""
RESOLUTIONS = {
'T170': {
'lon_max': 512,
'lat_max': 256,
'num_fourier': 170,
'num_spherical': 171
},
'T85': {
'lon_max': 256,
'lat_max': 128,
'num_fourier': 85,
'num_spherical': 86
},
'T42': {
'lon_max': 128,
'lat_max': 64,
'num_fourier': 42,
'num_spherical': 43,
},
}
runfmt = 'run%04d'
restartfmt = 'res%04d.tar.gz'
def __init__(self, name, codebase, safe_mode=False, workbase=GFDL_WORK, database=GFDL_DATA):
super(Experiment, self).__init__()
self.name = name
self.codebase = codebase
self.safe_mode = safe_mode
# set the default locations of working directory,
# executable directory, restart file storage, and
# output data directory.
self.workdir = P(workbase, 'experiment', self.name)
self.rundir = P(self.workdir, 'run') # temporary area an individual run will be performed
self.datadir = P(database, self.name) # where run data will be moved to upon completion
self.restartdir = P(self.datadir, 'restarts') # where restarts will be stored
self.template_dir = P(_module_directory, 'templates')
self.env_source = get_env_file()
self.templates = Environment(loader=FileSystemLoader(self.template_dir))
self.diag_table = DiagTable()
self.field_table_file = P(self.codebase.srcdir, 'extra', 'model', self.codebase.name, 'field_table')
self.inputfiles = []
self.namelist = Namelist()
@destructive
def rm_workdir(self):
try:
sh.rm(['-r', self.workdir])
except sh.ErrorReturnCode:
self.log.warning('Tried to remove working directory but it doesnt exist')
@destructive
def rm_datadir(self):
try:
sh.rm(['-r', self.datadir])
except sh.ErrorReturnCode:
self.log.warning('Tried to remove data directory but it doesnt exist')
@destructive
@useworkdir
def clear_workdir(self):
self.rm_workdir()
mkdir(self.workdir)
self.log.info('Emptied working directory %r' % self.workdir)
@destructive
@useworkdir
def clear_rundir(self):
#sh.cd(self.workdir)
try:
sh.rm(['-r', self.rundir])
except sh.ErrorReturnCode:
self.log.warning('Tried to remove run directory but it doesnt exist')
mkdir(self.rundir)
self.log.info('Emptied run directory %r' % self.rundir)
def get_restart_file(self, i):
return P(self.restartdir, self.restartfmt % i)
def get_outputdir(self, run):
return P(self.datadir, self.runfmt % run)
def set_resolution(self, res, num_levels=None):
"""Set the resolution of the model, based on the standard triangular
truncations of the spectral core. For example,
exp.set_resolution('T85', 25)
creates a spectral core with enough modes to natively correspond to
a 256x128 lon-lat resolution."""
delta = self.RESOLUTIONS[res]
if num_levels is not None:
delta['num_levels'] = num_levels
self.update_namelist({'spectral_dynamics_nml': delta})
def update_namelist(self, new_vals):
"""Update the namelist sections, overwriting existing values."""
for sec in new_vals:
if sec not in self.namelist:
self.namelist[sec] = {}
nml = self.namelist[sec]
nml.update(new_vals[sec])
def write_namelist(self, outdir):
namelist_file = P(outdir, 'input.nml')
self.log.info('Writing namelist to %r' % namelist_file)
self.namelist.write(namelist_file)
def write_diag_table(self, outdir):
outfile = P(outdir, 'diag_table')
self.log.info('Writing diag_table to %r' % outfile)
if self.diag_table.is_valid():
if self.diag_table.calendar is None:
# diagnose the calendar from the namelist
cal = self.get_calendar()
self.diag_table.calendar = cal
self.diag_table.write(outfile)
else:
self.log.error("No output files defined in the DiagTable. Stopping.")
raise ValueError()
def write_field_table(self, outdir):
self.log.info('Writing field_table to %r' % P(outdir, 'field_table'))
sh.cp(self.field_table_file, P(outdir, 'field_table'))
def log_output(self, outputstring):
line = outputstring.strip()
if 'warning' in line.lower():
self.log.warn(line)
else:
self.log.debug(line)
#return clean_log_debug(outputstring)
def delete_restart(self, run):
resfile = self.get_restart_file(run)
if os.path.isfile(resfile):
sh.rm(resfile)
self.log.info('Deleted restart file %s' % resfile)
def get_calendar(self):
"""Get the value of 'main_nml/calendar.
Returns a string name of calendar, or None if not set in namelist.'"""
if 'main_nml' in self.namelist:
return self.namelist['main_nml'].get('calendar')
else:
return None
@destructive
@useworkdir
def run(self, i, restart_file=None, use_restart=True, multi_node=False, num_cores=8, overwrite_data=False, save_run=False, run_idb=False, nice_score=0, mpirun_opts=''):
"""Run the model.
`num_cores`: Number of mpi cores to distribute over.
`restart_file` (optional): A path to a valid restart archive. If None and `use_restart=True`,
restart file (i-1) will be used.
`save_run`: If True, copy the entire working directory over to GFDL_DATA
so that the run can rerun without the python script.
(This uses a lot of data storage!)
"""
self.clear_rundir()
indir = P(self.rundir, 'INPUT')
outdir = P(self.datadir, self.runfmt % i)
resdir = P(self.rundir, 'RESTART')
if os.path.isdir(outdir):
if overwrite_data:
self.log.warning('Data for run %d already exists and overwrite_data is True. Overwriting.' % i)
sh.rm('-r', outdir)
else:
self.log.warn('Data for run %d already exists but overwrite_data is False. Stopping.' % i)
return False
# make the output run folder and copy over the input files
mkdir([indir, resdir, self.restartdir])
self.codebase.write_source_control_status(P(self.rundir, 'git_hash_used.txt'))
self.write_namelist(self.rundir)
self.write_field_table(self.rundir)
self.write_diag_table(self.rundir)
for filename in self.inputfiles:
sh.cp([filename, P(indir, os.path.split(filename)[1])])
if multi_node:
mpirun_opts += ' -bootstrap pbsdsh -f $PBS_NODEFILE'
if use_restart:
if not restart_file:
# get the restart from previous iteration
restart_file = self.get_restart_file(i - 1)
if not os.path.isfile(restart_file):
self.log.error('Restart file not found, expecting file %r' % restart_file)
raise IOError('Restart file not found, expecting file %r' % restart_file)
else:
self.log.info('Using restart file %r' % restart_file)
self.extract_restart_archive(restart_file, indir)
else:
self.log.info('Running without restart file')
restart_file = None
vars = {
'rundir': self.rundir,
'execdir': self.codebase.builddir,
'executable': self.codebase.executable_name,
'env_source': self.env_source,
'mpirun_opts': mpirun_opts,
'num_cores': num_cores,
'run_idb': run_idb,
'nice_score': nice_score
}
runscript = self.templates.get_template('run.sh')
# employ the template to create a runscript
t = runscript.stream(**vars).dump(P(self.rundir, 'run.sh'))
def _outhandler(line):
handled = self.emit('run:output', self, line)
if not handled: # only log the output when no event handler is used
self.log_output(line)
self.emit('run:ready', self, i)
self.log.info("Beginning run %d" % i)
try:
#for line in sh.bash(P(self.rundir, 'run.sh'), _iter=True, _err_to_out=True):
proc = sh.bash(P(self.rundir, 'run.sh'), _bg=True, _out=_outhandler, _err_to_out=True)
self.log.info('process running as {}'.format(proc.process.pid))
proc.wait()
completed = True
except KeyboardInterrupt as e:
self.log.error("Manual interrupt, killing process.")
proc.process.terminate()
proc.wait()
#log.info("Cleaning run directory.")
#self.clear_rundir()
raise e
except sh.ErrorReturnCode as e:
completed = False
self.log.error("Run %d failed. See log for details." % i)
self.log.error("Error: %r" % e)
self.emit('run:failed', self)
raise FailedRunError()
self.emit('run:completed', self, i)
self.log.info('Run %d complete' % i)
mkdir(outdir)
if num_cores > 1:
# use postprocessing tool to combine the output from several cores
codebase_combine_script = P(self.codebase.builddir, 'mppnccombine_run.sh')
if not os.path.exists(codebase_combine_script):
self.log.warning('combine script does not exist in the commit you are running Isca from. Falling back to using $GFDL_BASE mppnccombine_run.sh script')
sh.ln('-s', P(GFDL_BASE, 'postprocessing', 'mppnccombine_run.sh'), codebase_combine_script)
combinetool = sh.Command(codebase_combine_script)
for file in self.diag_table.files:
netcdf_file = '%s.nc' % file
filebase = P(self.rundir, netcdf_file)
combinetool(self.codebase.builddir, filebase)
# copy the combined netcdf file into the data archive directory
sh.cp(filebase, P(outdir, netcdf_file))
# remove all netcdf fragments from the run directory
sh.rm(glob.glob(filebase+'*'))
self.log.debug('%s combined and copied to data directory' % netcdf_file)
for restart in glob.glob(P(resdir, '*.res.nc.0000')):
restartfile = restart.replace('.0000', '')
combinetool(self.codebase.builddir, restartfile)
sh.rm(glob.glob(restartfile+'.????'))
self.log.debug("Restart file %s combined" % restartfile)
self.emit('run:combined', self)
# make the restart archive and delete the restart files
self.make_restart_archive(self.get_restart_file(i), resdir)
sh.rm('-r', resdir)
if save_run:
# copy the complete run directory to GFDL_DATA so that the run can
# be recreated without the python script if required
mkdir(resdir)
sh.cp(['-a', self.rundir, outdir])
else:
# just save some useful diagnostic information
self.write_namelist(outdir)
self.write_field_table(outdir)
self.write_diag_table(outdir)
self.codebase.write_source_control_status(P(outdir, 'git_hash_used.txt'))
self.clear_rundir()
return True
def make_restart_archive(self, archive_file, restart_directory):
with tarfile.open(archive_file, 'w:gz') as tar:
tar.add(restart_directory, arcname='.')
self.log.info("Restart archive created at %s" % archive_file)
def extract_restart_archive(self, archive_file, input_directory):
with tarfile.open(archive_file, 'r:gz') as tar:
tar.extractall(path=input_directory)
self.log.info("Restart %s extracted to %s" % (archive_file, input_directory))
def derive(self, new_experiment_name):
"""Derive a new experiment based on this one."""
new_exp = Experiment(new_experiment_name, self.codebase)
new_exp.namelist = self.namelist.copy()
new_exp.diag_table = self.diag_table.copy()
new_exp.inputfiles = self.inputfiles[:]
return new_exp
# TODO: replace this with util functionality
# def run_parameter_sweep(self, parameter_values, runs=10, num_cores=16):
# # parameter_values should be a namelist fragment, with multiple values
# # for each study e.g. to vary obliquity:
# # exp.run_parameter_sweep({'astronomy_nml': {'obliq': [0.0, 5.0, 10.0, 15.0]}})
# # will run 4 independent studies and create data e.g.
# # <exp_name>/astronomy_nml_obliq_<0.0, 5.0 ...>/run[1-10]/daily.nc
# params = [(sec, name, values) for sec, parameters in parameter_values.items()
# for name, values in parameters.items()]
# # make a list of lists of namelist section, parameter names and values
# params = [[(a,b,val) for val in values] for a,b,values in params]
# parameter_space = itertools.product(params)
# for combo in parameter_space:
# title = '_'.join(['%s_%s_%r' % (sec[:3], name[:5], val) for sec, name, val in combo])
# exp = self.derive(self.name + '_' + title)
# for sec, name, val in combo:
# exp.namelist[sec][name] = val
# exp.clear_rundir()
# exp.run(1, use_restart=False, num_cores=num_cores)
# for i in range(runs-1):
# exp.run(i+2)
# class RunSpec(Logger):
# def __init__(self, exp):
# self.exp = exp
|
jamesp/Isca
|
src/extra/python/isca/experiment.py
|
Python
|
gpl-3.0
| 14,643
|
[
"NetCDF"
] |
76dc7e3d12cffbc433335a31b87862130e28441caa574b02e7038db80bbc5a00
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 2000
n = 4000
k = 3000
testMehrotra = True
testIPF = False
manualInit = False
display = False
progress = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = A.GlobalRow(sLoc)
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.ProcessQueues()
return A
A = Rectang(m,n)
G = Rectang(k,n)
# Generate a (b,h) which implies a primal feasible (x,s)
# ======================================================
# b := A xGen
# -----------
xGen = El.DistMultiVec()
El.Gaussian(xGen,n,1)
b = El.DistMultiVec()
El.Zeros( b, m, 1 )
El.Multiply( El.NORMAL, 1., A, xGen, 0., b )
# h := G xGen + sGen
# ------------------
sGen = El.DistMultiVec()
El.Uniform(sGen,k,1,0.5,0.5)
h = El.DistMultiVec()
El.Copy( sGen, h )
El.Multiply( El.NORMAL, 1., G, xGen, 1., h )
# Generate a c which implies a dual feasible (y,z)
# ================================================
yGen = El.DistMultiVec()
El.Gaussian(yGen,m,1)
zGen = El.DistMultiVec()
El.Uniform(zGen,k,1,0.5,0.5)
c = El.DistMultiVec()
El.Zeros(c,n,1)
El.Multiply( El.TRANSPOSE, -1., A, yGen, 1., c )
El.Multiply( El.TRANSPOSE, -1., G, zGen, 1., c )
if display:
El.Display( A, "A" )
El.Display( G, "G" )
El.Display( b, "b" )
El.Display( c, "c" )
El.Display( h, "h" )
# Set up the control structure (and possibly initial guesses)
# ===========================================================
ctrl = El.LPAffineCtrl_d()
xOrig = El.DistMultiVec()
yOrig = El.DistMultiVec()
zOrig = El.DistMultiVec()
sOrig = El.DistMultiVec()
if manualInit:
El.Uniform(xOrig,n,1,0.5,0.4999)
El.Uniform(yOrig,m,1,0.5,0.4999)
El.Uniform(zOrig,k,1,0.5,0.4999)
El.Uniform(sOrig,k,1,0.5,0.4999)
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
if testMehrotra:
ctrl.approach = El.LP_MEHROTRA
ctrl.mehrotraCtrl.primalInit = manualInit
ctrl.mehrotraCtrl.dualInit = manualInit
ctrl.mehrotraCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startMehrotra = El.mpi.Time()
El.LPAffine(A,G,b,c,h,x,y,z,s,ctrl)
endMehrotra = El.mpi.Time()
if worldRank == 0:
print "Mehrotra time:", endMehrotra-startMehrotra
if display:
El.Display( x, "x Mehrotra" )
El.Display( y, "y Mehrotra" )
El.Display( z, "z Mehrotra" )
El.Display( s, "s Mehrotra" )
obj = El.Dot(c,x)
if worldRank == 0:
print "Mehrotra c^T x =", obj
if testIPF:
ctrl.approach = El.LP_IPF
ctrl.ipfCtrl.primalInit = manualInit
ctrl.ipfCtrl.dualInit = manualInit
ctrl.ipfCtrl.progress = progress
ctrl.ipfCtrl.lineSearchCtrl.progress = progress
El.Copy( xOrig, x )
El.Copy( yOrig, y )
El.Copy( zOrig, z )
El.Copy( sOrig, s )
startIPF = El.mpi.Time()
El.LPAffine(A,G,b,c,h,x,y,z,s,ctrl)
endIPF = El.mpi.Time()
if worldRank == 0:
print "IPF time:", endIPF-startIPF
if display:
El.Display( x, "x IPF" )
El.Display( y, "y IPF" )
El.Display( z, "z IPF" )
El.Display( s, "s IPF" )
obj = El.Dot(c,x)
if worldRank == 0:
print "IPF c^T x =", obj
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
birm/Elemental
|
examples/interface/LPAffine.py
|
Python
|
bsd-3-clause
| 4,092
|
[
"Gaussian"
] |
14a512ab8754fc9b7790e2d61905e33e117ec79735de85a74bc89e480a8e8e8b
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
from django.core.urlresolvers import reverse
from shuup.core.models import Order, PaymentStatus, Product
from shuup.testing.factories import (
create_default_order_statuses, get_address, get_default_payment_method,
get_default_shipping_method, get_default_shop, get_default_supplier,
get_default_tax_class, create_product
)
from shuup.testing.mock_population import populate_if_required
from shuup.testing.models import (
CarrierWithCheckoutPhase, PaymentWithCheckoutPhase
)
from shuup.testing.soup_utils import extract_form_fields
from shuup_tests.utils import SmartClient
def fill_address_inputs(soup, with_company=False):
inputs = {}
test_address = get_address()
for key, value in extract_form_fields(soup.find('form', id='addresses')).items():
if not value:
if key in ("order-tax_number", "order-company_name"):
continue
if key.startswith("shipping-") or key.startswith("billing-"):
bit = key.split("-")[1]
value = getattr(test_address, bit, None)
if not value and "email" in key:
value = "test%d@example.shuup.com" % random.random()
if not value:
value = "test"
inputs[key] = value
if with_company:
inputs["company-tax_number"] = "FI1234567-1"
inputs["company-company_name"] = "Example Oy"
else:
inputs = dict((k, v) for (k, v) in inputs.items() if not k.startswith("company-"))
return inputs
def _populate_client_basket(client):
product_ids = []
index = client.soup("/")
product_links = index.find_all("a", rel="product-detail")
assert product_links
for i in range(3): # add three different products
product_detail_path = product_links[i]["href"]
assert product_detail_path
product_detail_soup = client.soup(product_detail_path)
inputs = extract_form_fields(product_detail_soup)
basket_path = reverse("shuup:basket")
add_to_basket_resp = client.post(basket_path, data={
"command": "add",
"product_id": inputs["product_id"],
"quantity": 1,
"supplier": get_default_supplier().pk
})
assert add_to_basket_resp.status_code < 400
product_ids.append(inputs["product_id"])
basket_soup = client.soup(basket_path)
assert b'no such element' not in basket_soup.renderContents(), 'All product details are not rendered correctly'
return product_ids
def _get_payment_method_with_phase():
processor = PaymentWithCheckoutPhase.objects.create(
identifier="processor_with_phase", enabled=True)
assert isinstance(processor, PaymentWithCheckoutPhase)
return processor.create_service(
None,
identifier="payment_with_phase",
shop=get_default_shop(),
name="Test method with phase",
enabled=True,
tax_class=get_default_tax_class())
def _get_shipping_method_with_phase():
carrier = CarrierWithCheckoutPhase.objects.create(
identifier="carrier_with_phase", enabled=True)
assert isinstance(carrier, CarrierWithCheckoutPhase)
return carrier.create_service(
None,
identifier="carrier_with_phase",
shop=get_default_shop(),
name="Test method with phase",
enabled=True,
tax_class=get_default_tax_class())
@pytest.mark.django_db
@pytest.mark.parametrize("with_company", [False, True])
def test_basic_order_flow(with_company):
create_default_order_statuses()
n_orders_pre = Order.objects.count()
populate_if_required()
c = SmartClient()
product_ids = _populate_client_basket(c)
addresses_path = reverse("shuup:checkout", kwargs={"phase": "addresses"})
addresses_soup = c.soup(addresses_path)
inputs = fill_address_inputs(addresses_soup, with_company=with_company)
response = c.post(addresses_path, data=inputs)
assert response.status_code == 302 # Should redirect forth
methods_path = reverse("shuup:checkout", kwargs={"phase": "methods"})
methods_soup = c.soup(methods_path)
assert c.post(methods_path, data=extract_form_fields(methods_soup)).status_code == 302 # Should redirect forth
confirm_path = reverse("shuup:checkout", kwargs={"phase": "confirm"})
confirm_soup = c.soup(confirm_path)
Product.objects.get(pk=product_ids[0]).soft_delete()
assert c.post(confirm_path, data=extract_form_fields(confirm_soup)).status_code == 200 # user needs to reconfirm
data = extract_form_fields(confirm_soup)
data['product_ids'] = ','.join(product_ids[1:])
assert c.post(confirm_path, data=data).status_code == 302 # Should redirect forth
n_orders_post = Order.objects.count()
assert n_orders_post > n_orders_pre, "order was created"
@pytest.mark.django_db
@pytest.mark.parametrize("get_shipping_method,shipping_data,get_payment_method,payment_data", [
(get_default_shipping_method, None, _get_payment_method_with_phase, {"input_field": True}),
(_get_shipping_method_with_phase, {"input_field": "20540"}, get_default_payment_method, None),
(_get_shipping_method_with_phase, {"input_field": "20540"}, _get_payment_method_with_phase, {"input_field": True}),
])
def test_order_flow_with_phases(get_shipping_method, shipping_data, get_payment_method, payment_data):
create_default_order_statuses()
populate_if_required()
c = SmartClient()
_populate_client_basket(c)
# Create methods
shipping_method = get_shipping_method()
payment_method = get_payment_method()
# Resolve paths
addresses_path = reverse("shuup:checkout", kwargs={"phase": "addresses"})
methods_path = reverse("shuup:checkout", kwargs={"phase": "methods"})
shipping_path = reverse("shuup:checkout", kwargs={"phase": "shipping"})
payment_path = reverse("shuup:checkout", kwargs={"phase": "payment"})
confirm_path = reverse("shuup:checkout", kwargs={"phase": "confirm"})
# Phase: Addresses
addresses_soup = c.soup(addresses_path)
inputs = fill_address_inputs(addresses_soup, with_company=False)
response = c.post(addresses_path, data=inputs)
assert response.status_code == 302, "Address phase should redirect forth to methods"
# Phase: Methods
response = c.get(methods_path)
assert response.status_code == 200
response = c.post(
methods_path,
data={
"shipping_method": shipping_method.pk,
"payment_method": payment_method.pk
}
)
assert response.status_code == 302, "Methods phase should redirect forth"
if isinstance(shipping_method.carrier, CarrierWithCheckoutPhase):
# Phase: Shipping
response = c.get(shipping_path)
assert response.status_code == 200
response = c.post(shipping_path, data=shipping_data)
assert response.status_code == 302, "Payments phase should redirect forth"
if isinstance(payment_method.payment_processor, PaymentWithCheckoutPhase):
# Phase: payment
response = c.get(payment_path)
assert response.status_code == 200
response = c.post(payment_path, data=payment_data)
assert response.status_code == 302, "Payments phase should redirect forth"
# Phase: Confirm
assert Order.objects.count() == 0
confirm_soup = c.soup(confirm_path)
response = c.post(confirm_path, data=extract_form_fields(confirm_soup))
assert response.status_code == 302, "Confirm should redirect forth"
order = Order.objects.first()
if isinstance(shipping_method.carrier, CarrierWithCheckoutPhase):
assert order.shipping_data.get("input_value") == "20540"
if isinstance(payment_method.payment_processor, PaymentWithCheckoutPhase):
assert order.payment_data.get("input_value")
assert order.payment_status == PaymentStatus.NOT_PAID
# Resolve order specific paths (payment and complete)
process_payment_path = reverse(
"shuup:order_process_payment",
kwargs={"pk": order.pk, "key": order.key})
process_payment_return_path = reverse(
"shuup:order_process_payment_return",
kwargs={"pk": order.pk, "key": order.key})
order_complete_path = reverse(
"shuup:order_complete",
kwargs={"pk": order.pk, "key": order.key})
# Check confirm redirection to payment page
assert response.url.endswith(process_payment_path), (
"Confirm should have redirected to payment page")
# Visit payment page
response = c.get(process_payment_path)
assert response.status_code == 302, "Payment page should redirect forth"
assert response.url.endswith(process_payment_return_path)
# Check payment return
response = c.get(process_payment_return_path)
assert response.status_code == 302, "Payment return should redirect forth"
assert response.url.endswith(order_complete_path)
# Check payment status has changed to DEFERRED
order = Order.objects.get(pk=order.pk) # reload
assert order.payment_status == PaymentStatus.DEFERRED
@pytest.mark.django_db
def test_checkout_empty_basket(rf):
create_default_order_statuses()
n_orders_pre = Order.objects.count()
populate_if_required()
c = SmartClient()
product_ids = _populate_client_basket(c)
addresses_path = reverse("shuup:checkout", kwargs={"phase": "addresses"})
addresses_soup = c.soup(addresses_path)
inputs = fill_address_inputs(addresses_soup)
for product_id in product_ids:
Product.objects.get(pk=product_id).soft_delete()
response, soup = c.response_and_soup(addresses_path, data=inputs, method="post")
assert response.status_code == 200 # Should redirect forth
assert b"Your shopping cart is empty." in soup.renderContents()
|
suutari/shoop
|
shuup_tests/front/test_checkout_flow.py
|
Python
|
agpl-3.0
| 10,136
|
[
"VisIt"
] |
eecad0da752c5de7b5cf288892f89a85b85b777a7a7f50146e40ae90c837ef8e
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010-2018 (ita)
"""
Classes and functions enabling the command system
"""
import os, re, imp, sys
from waflib import Utils, Errors, Logs
import waflib.Node
# the following 3 constants are updated on each new release (do not touch)
HEXVERSION=0x2000b00
"""Constant updated on new releases"""
WAFVERSION="2.0.11"
"""Constant updated on new releases"""
WAFREVISION="a97f6fb0941091b4966b625f15ec32fa783a8bec"
"""Git revision when the waf version is updated"""
ABI = 20
"""Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)"""
DBFILE = '.wafpickle-%s-%d-%d' % (sys.platform, sys.hexversion, ABI)
"""Name of the pickle file for storing the build data"""
APPNAME = 'APPNAME'
"""Default application name (used by ``waf dist``)"""
VERSION = 'VERSION'
"""Default application version (used by ``waf dist``)"""
TOP = 'top'
"""The variable name for the top-level directory in wscript files"""
OUT = 'out'
"""The variable name for the output directory in wscript files"""
WSCRIPT_FILE = 'wscript'
"""Name of the waf script files"""
launch_dir = ''
"""Directory from which waf has been called"""
run_dir = ''
"""Location of the wscript file to use as the entry point"""
top_dir = ''
"""Location of the project directory (top), if the project was configured"""
out_dir = ''
"""Location of the build directory (out), if the project was configured"""
waf_dir = ''
"""Directory containing the waf modules"""
default_encoding = Utils.console_encoding()
"""Encoding to use when reading outputs from other processes"""
g_module = None
"""
Module representing the top-level wscript file (see :py:const:`waflib.Context.run_dir`)
"""
STDOUT = 1
STDERR = -1
BOTH = 0
classes = []
"""
List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes
are added automatically by a metaclass.
"""
def create_context(cmd_name, *k, **kw):
"""
Returns a new :py:class:`waflib.Context.Context` instance corresponding to the given command.
Used in particular by :py:func:`waflib.Scripting.run_command`
:param cmd_name: command name
:type cmd_name: string
:param k: arguments to give to the context class initializer
:type k: list
:param k: keyword arguments to give to the context class initializer
:type k: dict
:return: Context object
:rtype: :py:class:`waflib.Context.Context`
"""
for x in classes:
if x.cmd == cmd_name:
return x(*k, **kw)
ctx = Context(*k, **kw)
ctx.fun = cmd_name
return ctx
class store_context(type):
"""
Metaclass that registers command classes into the list :py:const:`waflib.Context.classes`
Context classes must provide an attribute 'cmd' representing the command name, and a function
attribute 'fun' representing the function name that the command uses.
"""
def __init__(cls, name, bases, dct):
super(store_context, cls).__init__(name, bases, dct)
name = cls.__name__
if name in ('ctx', 'Context'):
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)' % name)
if not getattr(cls, 'fun', None):
cls.fun = cls.cmd
classes.insert(0, cls)
ctx = store_context('ctx', (object,), {})
"""Base class for all :py:class:`waflib.Context.Context` classes"""
class Context(ctx):
"""
Default context for waf commands, and base class for new command contexts.
Context objects are passed to top-level functions::
def foo(ctx):
print(ctx.__class__.__name__) # waflib.Context.Context
Subclasses must define the class attributes 'cmd' and 'fun':
:param cmd: command to execute as in ``waf cmd``
:type cmd: string
:param fun: function name to execute when the command is called
:type fun: string
.. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext
"""
errors = Errors
"""
Shortcut to :py:mod:`waflib.Errors` provided for convenience
"""
tools = {}
"""
A module cache for wscript files; see :py:meth:`Context.Context.load`
"""
def __init__(self, **kw):
try:
rd = kw['run_dir']
except KeyError:
rd = run_dir
# binds the context to the nodes in use to avoid a context singleton
self.node_class = type('Nod3', (waflib.Node.Node,), {})
self.node_class.__module__ = 'waflib.Node'
self.node_class.ctx = self
self.root = self.node_class('', None)
self.cur_script = None
self.path = self.root.find_dir(rd)
self.stack_path = []
self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self}
self.logger = None
def finalize(self):
"""
Called to free resources such as logger files
"""
try:
logger = self.logger
except AttributeError:
pass
else:
Logs.free_logger(logger)
delattr(self, 'logger')
def load(self, tool_list, *k, **kw):
"""
Loads a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun`
from it. A ``tooldir`` argument may be provided as a list of module paths.
:param tool_list: list of Waf tool names to load
:type tool_list: list of string or space-separated string
"""
tools = Utils.to_list(tool_list)
path = Utils.to_list(kw.get('tooldir', ''))
with_sys_path = kw.get('with_sys_path', True)
for t in tools:
module = load_tool(t, path, with_sys_path=with_sys_path)
fun = getattr(module, kw.get('name', self.fun), None)
if fun:
fun(self)
def execute(self):
"""
Here, it calls the function name in the top-level wscript file. Most subclasses
redefine this method to provide additional functionality.
"""
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self, node):
"""
Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`.
The current script is bound as a Node object on ``self.cur_script``, and the current path
is bound to ``self.path``
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.stack_path.append(self.cur_script)
self.cur_script = node
self.path = node.parent
def post_recurse(self, node):
"""
Restores ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates.
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.cur_script = self.stack_path.pop()
if self.cur_script:
self.path = self.cur_script.parent
def recurse(self, dirs, name=None, mandatory=True, once=True, encoding=None):
"""
Runs user-provided functions from the supplied list of directories.
The directories can be either absolute, or relative to the directory
of the wscript file
The methods :py:meth:`waflib.Context.Context.pre_recurse` and
:py:meth:`waflib.Context.Context.post_recurse` are called immediately before
and after a script has been executed.
:param dirs: List of directories to visit
:type dirs: list of string or space-separated string
:param name: Name of function to invoke from the wscript
:type name: string
:param mandatory: whether sub wscript files are required to exist
:type mandatory: bool
:param once: read the script file once for a particular context
:type once: bool
"""
try:
cache = self.recurse_cache
except AttributeError:
cache = self.recurse_cache = {}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
# absolute paths only
d = os.path.join(self.path.abspath(), d)
WSCRIPT = os.path.join(d, WSCRIPT_FILE)
WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun)
node = self.root.find_node(WSCRIPT_FUN)
if node and (not once or node not in cache):
cache[node] = True
self.pre_recurse(node)
try:
function_code = node.read('rU', encoding)
exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node = self.root.find_node(WSCRIPT)
tup = (node, name or self.fun)
if node and (not once or tup not in cache):
cache[tup] = True
self.pre_recurse(node)
try:
wscript_module = load_module(node.abspath(), encoding=encoding)
user_function = getattr(wscript_module, (name or self.fun), None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %r defined in %s' % (name or self.fun, node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
try:
os.listdir(d)
except OSError:
raise Errors.WafError('Cannot read the folder %r' % d)
raise Errors.WafError('No wscript file in directory %s' % d)
def log_command(self, cmd, kw):
if Logs.verbose:
fmt = os.environ.get('WAF_CMD_FORMAT')
if fmt == 'string':
if not isinstance(cmd, str):
cmd = Utils.shell_escape(cmd)
Logs.debug('runner: %r', cmd)
Logs.debug('runner_env: kw=%s', kw)
def exec_command(self, cmd, **kw):
"""
Runs an external process and returns the exit status::
def run(tsk):
ret = tsk.generator.bld.exec_command('touch foo.txt')
return ret
If the context has the attribute 'log', then captures and logs the process stderr/stdout.
Unlike :py:meth:`waflib.Context.Context.cmd_and_log`, this method does not return the
stdout/stderr values captured.
:param cmd: command argument for subprocess.Popen
:type cmd: string or list
:param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate.
:type kw: dict
:returns: process exit status
:rtype: integer
:raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process
:raises: :py:class:`waflib.Errors.WafError` in case of execution failure
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
self.log_command(cmd, kw)
if self.logger:
self.logger.info(cmd)
if 'stdout' not in kw:
kw['stdout'] = subprocess.PIPE
if 'stderr' not in kw:
kw['stderr'] = subprocess.PIPE
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError('Program %s not found!' % cmd[0])
cargs = {}
if 'timeout' in kw:
if sys.hexversion >= 0x3030000:
cargs['timeout'] = kw['timeout']
if not 'start_new_session' in kw:
kw['start_new_session'] = True
del kw['timeout']
if 'input' in kw:
if kw['input']:
cargs['input'] = kw['input']
kw['stdin'] = subprocess.PIPE
del kw['input']
if 'cwd' in kw:
if not isinstance(kw['cwd'], str):
kw['cwd'] = kw['cwd'].abspath()
encoding = kw.pop('decode_as', default_encoding)
try:
ret, out, err = Utils.run_process(cmd, kw, cargs)
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if out:
if not isinstance(out, str):
out = out.decode(encoding, errors='replace')
if self.logger:
self.logger.debug('out: %s', out)
else:
Logs.info(out, extra={'stream':sys.stdout, 'c1': ''})
if err:
if not isinstance(err, str):
err = err.decode(encoding, errors='replace')
if self.logger:
self.logger.error('err: %s' % err)
else:
Logs.info(err, extra={'stream':sys.stderr, 'c1': ''})
return ret
def cmd_and_log(self, cmd, **kw):
"""
Executes a process and returns stdout/stderr if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object (configuration tests)::
def configure(conf):
out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(cmd, input='\\n'.encode(), output=waflib.Context.STDOUT)
try:
conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH)
except Errors.WafError as e:
print(e.stdout, e.stderr)
:param cmd: args for subprocess.Popen
:type cmd: list or string
:param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate.
:type kw: dict
:returns: a tuple containing the contents of stdout and stderr
:rtype: string
:raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process
:raises: :py:class:`waflib.Errors.WafError` in case of execution failure; stdout/stderr/returncode are bound to the exception object
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
self.log_command(cmd, kw)
quiet = kw.pop('quiet', None)
to_ret = kw.pop('output', STDOUT)
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError('Program %r not found!' % cmd[0])
kw['stdout'] = kw['stderr'] = subprocess.PIPE
if quiet is None:
self.to_log(cmd)
cargs = {}
if 'timeout' in kw:
if sys.hexversion >= 0x3030000:
cargs['timeout'] = kw['timeout']
if not 'start_new_session' in kw:
kw['start_new_session'] = True
del kw['timeout']
if 'input' in kw:
if kw['input']:
cargs['input'] = kw['input']
kw['stdin'] = subprocess.PIPE
del kw['input']
if 'cwd' in kw:
if not isinstance(kw['cwd'], str):
kw['cwd'] = kw['cwd'].abspath()
encoding = kw.pop('decode_as', default_encoding)
try:
ret, out, err = Utils.run_process(cmd, kw, cargs)
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if not isinstance(out, str):
out = out.decode(encoding, errors='replace')
if not isinstance(err, str):
err = err.decode(encoding, errors='replace')
if out and quiet != STDOUT and quiet != BOTH:
self.to_log('out: %s' % out)
if err and quiet != STDERR and quiet != BOTH:
self.to_log('err: %s' % err)
if ret:
e = Errors.WafError('Command %r returned %r' % (cmd, ret))
e.returncode = ret
e.stderr = err
e.stdout = out
raise e
if to_ret == BOTH:
return (out, err)
elif to_ret == STDERR:
return err
return out
def fatal(self, msg, ex=None):
"""
Prints an error message in red and stops command execution; this is
usually used in the configuration section::
def configure(conf):
conf.fatal('a requirement is missing')
:param msg: message to display
:type msg: string
:param ex: optional exception object
:type ex: exception
:raises: :py:class:`waflib.Errors.ConfigurationError`
"""
if self.logger:
self.logger.info('from %s: %s' % (self.path.abspath(), msg))
try:
logfile = self.logger.handlers[0].baseFilename
except AttributeError:
pass
else:
if os.environ.get('WAF_PRINT_FAILURE_LOG'):
# see #1930
msg = 'Log from (%s):\n%s\n' % (logfile, Utils.readf(logfile))
else:
msg = '%s\n(complete log in %s)' % (msg, logfile)
raise self.errors.ConfigurationError(msg, ex=ex)
def to_log(self, msg):
"""
Logs information to the logger (if present), or to stderr.
Empty messages are not printed::
def build(bld):
bld.to_log('starting the build')
Provide a logger on the context class or override this method if necessary.
:param msg: message
:type msg: string
"""
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self, *k, **kw):
"""
Prints a configuration message of the form ``msg: result``.
The second part of the message will be in colors. The output
can be disabled easly by setting ``in_msg`` to a positive value::
def configure(conf):
self.in_msg = 1
conf.msg('Checking for library foo', 'ok')
# no output
:param msg: message to display to the user
:type msg: string
:param result: result to display
:type result: string or boolean
:param color: color to use, see :py:const:`waflib.Logs.colors_lst`
:type color: string
"""
try:
msg = kw['msg']
except KeyError:
msg = k[0]
self.start_msg(msg, **kw)
try:
result = kw['result']
except KeyError:
result = k[1]
color = kw.get('color')
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color, **kw)
def start_msg(self, *k, **kw):
"""
Prints the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg`
"""
if kw.get('quiet'):
return
msg = kw.get('msg') or k[0]
try:
if self.in_msg:
self.in_msg += 1
return
except AttributeError:
self.in_msg = 0
self.in_msg += 1
try:
self.line_just = max(self.line_just, len(msg))
except AttributeError:
self.line_just = max(40, len(msg))
for x in (self.line_just * '-', msg):
self.to_log(x)
Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, *k, **kw):
"""Prints the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`"""
if kw.get('quiet'):
return
self.in_msg -= 1
if self.in_msg:
return
result = kw.get('result') or k[0]
defcolor = 'GREEN'
if result is True:
msg = 'ok'
elif not result:
msg = 'not found'
defcolor = 'YELLOW'
else:
msg = str(result)
self.to_log(msg)
try:
color = kw['color']
except KeyError:
if len(k) > 1 and k[1] in Logs.colors_lst:
# compatibility waf 1.7
color = k[1]
else:
color = defcolor
Logs.pprint(color, msg)
def load_special_tools(self, var, ban=[]):
"""
Loads third-party extensions modules for certain programming languages
by trying to list certain files in the extras/ directory. This method
is typically called once for a programming language group, see for
example :py:mod:`waflib.Tools.compiler_c`
:param var: glob expression, for example 'cxx\_\*.py'
:type var: string
:param ban: list of exact file names to exclude
:type ban: list of string
"""
if os.path.isdir(waf_dir):
lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py', ''))
else:
from zipfile import PyZipFile
waflibs = PyZipFile(waf_dir)
lst = waflibs.namelist()
for x in lst:
if not re.match('waflib/extras/%s' % var.replace('*', '.*'), var):
continue
f = os.path.basename(x)
doban = False
for b in ban:
r = b.replace('*', '.*')
if re.match(r, f):
doban = True
if not doban:
f = f.replace('.py', '')
load_tool(f)
cache_modules = {}
"""
Dictionary holding already loaded modules (wscript), indexed by their absolute path.
The modules are added automatically by :py:func:`waflib.Context.load_module`
"""
def load_module(path, encoding=None):
"""
Loads a wscript file as a python module. This method caches results in :py:attr:`waflib.Context.cache_modules`
:param path: file path
:type path: string
:return: Loaded Python module
:rtype: module
"""
try:
return cache_modules[path]
except KeyError:
pass
module = imp.new_module(WSCRIPT_FILE)
try:
code = Utils.readf(path, m='rU', encoding=encoding)
except EnvironmentError:
raise Errors.WafError('Could not read the file %r' % path)
module_dir = os.path.dirname(path)
sys.path.insert(0, module_dir)
try:
exec(compile(code, path, 'exec'), module.__dict__)
finally:
sys.path.remove(module_dir)
cache_modules[path] = module
return module
def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True):
"""
Importx a Waf tool as a python module, and stores it in the dict :py:const:`waflib.Context.Context.tools`
:type tool: string
:param tool: Name of the tool
:type tooldir: list
:param tooldir: List of directories to search for the tool module
:type with_sys_path: boolean
:param with_sys_path: whether or not to search the regular sys.path, besides waf_dir and potentially given tooldirs
"""
if tool == 'java':
tool = 'javaw' # jython
else:
tool = tool.replace('++', 'xx')
if not with_sys_path:
back_path = sys.path
sys.path = []
try:
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
__import__(tool)
except ImportError as e:
e.waf_sys_path = list(sys.path)
raise
finally:
for d in tooldir:
sys.path.remove(d)
ret = sys.modules[tool]
Context.tools[tool] = ret
return ret
else:
if not with_sys_path:
sys.path.insert(0, waf_dir)
try:
for x in ('waflib.Tools.%s', 'waflib.extras.%s', 'waflib.%s', '%s'):
try:
__import__(x % tool)
break
except ImportError:
x = None
else: # raise an exception
__import__(tool)
except ImportError as e:
e.waf_sys_path = list(sys.path)
raise
finally:
if not with_sys_path:
sys.path.remove(waf_dir)
ret = sys.modules[x % tool]
Context.tools[tool] = ret
return ret
finally:
if not with_sys_path:
sys.path += back_path
|
blablack/deteriorate-lv2
|
waflib/Context.py
|
Python
|
gpl-3.0
| 21,029
|
[
"VisIt"
] |
7565e96d5acae8a47d81469b5ab2addc0654db1f23d022abeb88b6352a36bc2f
|
##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2015 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Kenneth Hoste
# Authors:: George Tsouloupas <g.tsouloupas@cyi.ac.cy>, Fotis Georgatos <fotis@cern.ch>
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing BWA, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: George Tsouloupas <g.tsouloupas@cyi.ac.cy>
"""
import os
import shutil
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_BWA(ConfigureMake):
"""
Support for building BWA
"""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to BWA."""
super(EB_BWA, self).__init__(*args, **kwargs)
self.files = []
def configure_step(self):
"""
Empty function as bwa comes with _no_ configure script
"""
self.files = ["bwa", "qualfa2fq.pl", "xa2multi.pl"]
if LooseVersion(self.version) < LooseVersion("0.7.0"):
# solid2fastq was dropped in recent versions because the same functionality is covered by other tools already
# cfr. http://osdir.com/ml/general/2010-10/msg26205.html
self.files.append("solid2fastq.pl")
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in self.files:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for BWA."""
custom_paths = {
'files': ["bin/%s" % x for x in self.files],
'dirs': []
}
super(EB_BWA, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/b/bwa.py
|
Python
|
mit
| 2,456
|
[
"BWA"
] |
bd8a07b568b8493f62dcbcc6f2db910e90f5b3b617058474ba6581eba2b34448
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
dim = 10
R = CorrelationMatrix(dim)
for i in range(dim):
for j in range(i):
R[i, j] = (i + j + 1.0) / (2.0 * dim)
mean = NumericalPoint(dim, 2.0)
sigma = NumericalPoint(dim, 3.0)
distribution = Normal(mean, sigma, R)
size = 100
sample = distribution.getSample(size)
sampleX = NumericalSample(size, dim - 1)
sampleY = NumericalSample(size, 1)
for i in range(size):
sampleY[i] = NumericalPoint(1, sample[i, 0])
p = NumericalPoint(dim - 1)
for j in range(dim - 1):
p[j] = sample[i, j + 1]
sampleX[i] = p
sampleZ = NumericalSample(size, 1)
for i in range(size):
sampleZ[i] = NumericalPoint(1, sampleY[i, 0] * sampleY[i, 0])
discreteSample1 = Poisson(0.1).getSample(size)
discreteSample2 = Geometric(0.4).getSample(size)
# ChiSquared Independance test : test if two samples (of sizes not necessarily equal) are independant ?
# Care : discrete samples only
# H0 = independent samples
# p-value threshold : probability of the H0 reject zone : 1-0.90
# p-value : probability (test variable decision > test variable decision evaluated on the samples)
# Test = True <=> p-value > p-value threshold
print("ChiSquared=", HypothesisTest.ChiSquared(
discreteSample1, discreteSample2, 0.90))
print("ChiSquared2=", HypothesisTest.ChiSquared(
discreteSample1, discreteSample1, 0.90))
# Pearson Test : test if two gaussian samples are independent (based on the evaluation of the linear correlation coefficient)
# H0 : independent samples (linear correlation coefficient = 0)
# Test = True <=> independent samples (linear correlation coefficient = 0)
# p-value threshold : probability of the H0 reject zone : 1-0.90
# p-value : probability (test variable decision > test variable decision evaluated on the samples)
# Test = True <=> p-value > p-value threshold
print("Pearson=", HypothesisTest.Pearson(sampleY, sampleZ, 0.90))
# Smirnov Test : test if two samples (of sizes not necessarily equal) follow the same distribution
# Care : continuous distributions only
# H0 = same continuous distribution
# Test = True <=> same distribution
# p-value threshold : probability of the H0 reject zone : 1-0.90
# p-value : probability (test variable decision > test variable decision evaluated on the samples)
# Test = True <=> p-value > p-value threshold
print("Smirnov=", HypothesisTest.Smirnov(sampleY, sampleZ, 0.90))
# Spearman Test : test if two samples have a monotonous relation
# H0 = no monotonous relation between both samples
# Test = True <=> no monotonous relation
# p-value threshold : probability of the H0 reject zone : 1-0.90
# p-value : probability (test variable decision > test variable decision evaluated on the samples)
# Test = True <=> p-value > p-value threshold
print("Spearman=", HypothesisTest.Spearman(sampleY, sampleZ, 0.90))
except:
import sys
print("t_HypothesisTest_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
dubourg/openturns
|
python/test/t_HypothesisTest_std.py
|
Python
|
gpl-3.0
| 3,238
|
[
"Gaussian"
] |
a056c0e1ea19db268df31df3e40a5272f7a0d8090de57cbdf9b4202b6b381125
|
from __future__ import print_function, division
from sympy.core import Mul
from sympy.functions import DiracDelta, Heaviside
from sympy.core.compatibility import default_sort_key
from sympy.core.singleton import S
def change_mul(node, x):
"""change_mul(node, x)
Rearranges the operands of a product, bringing to front any simple
DiracDelta expression.
If no simple DiracDelta expression was found, then all the DiracDelta
expressions are simplified (using DiracDelta.expand(diracdelta=True, wrt=x)).
Return: (dirac, new node)
Where:
o dirac is either a simple DiracDelta expression or None (if no simple
expression was found);
o new node is either a simplified DiracDelta expressions or None (if it
could not be simplified).
Examples
========
>>> from sympy import DiracDelta, cos
>>> from sympy.integrals.deltafunctions import change_mul
>>> from sympy.abc import x, y
>>> change_mul(x*y*DiracDelta(x)*cos(x), x)
(DiracDelta(x), x*y*cos(x))
>>> change_mul(x*y*DiracDelta(x**2 - 1)*cos(x), x)
(None, x*y*cos(x)*DiracDelta(x - 1)/2 + x*y*cos(x)*DiracDelta(x + 1)/2)
>>> change_mul(x*y*DiracDelta(cos(x))*cos(x), x)
(None, None)
See Also
========
sympy.functions.special.delta_functions.DiracDelta
deltaintegrate
"""
new_args = []
dirac = None
#Sorting is needed so that we consistently collapse the same delta;
#However, we must preserve the ordering of non-commutative terms
c, nc = node.args_cnc()
sorted_args = sorted(c, key=default_sort_key)
sorted_args.extend(nc)
for arg in sorted_args:
if arg.is_Pow and isinstance(arg.base, DiracDelta):
new_args.append(arg.func(arg.base, arg.exp - 1))
arg = arg.base
if dirac is None and (isinstance(arg, DiracDelta) and arg.is_simple(x)):
dirac = arg
else:
new_args.append(arg)
if not dirac: # there was no simple dirac
new_args = []
for arg in sorted_args:
if isinstance(arg, DiracDelta):
new_args.append(arg.expand(diracdelta=True, wrt=x))
elif arg.is_Pow and isinstance(arg.base, DiracDelta):
new_args.append(arg.func(arg.base.expand(diracdelta=True, wrt=x), arg.exp))
else:
new_args.append(arg)
if new_args != sorted_args:
nnode = Mul(*new_args).expand()
else: # if the node didn't change there is nothing to do
nnode = None
return (None, nnode)
return (dirac, Mul(*new_args))
def deltaintegrate(f, x):
"""
deltaintegrate(f, x)
The idea for integration is the following:
- If we are dealing with a DiracDelta expression, i.e. DiracDelta(g(x)),
we try to simplify it.
If we could simplify it, then we integrate the resulting expression.
We already know we can integrate a simplified expression, because only
simple DiracDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the integral,
taking care if we are dealing with a Derivative or with a proper
DiracDelta.
2) The expression is not simple (i.e. DiracDelta(cos(x))): we can do
nothing at all.
- If the node is a multiplication node having a DiracDelta term:
First we expand it.
If the expansion did work, then we try to integrate the expansion.
If not, we try to extract a simple DiracDelta term, then we have two
cases:
1) We have a simple DiracDelta term, so we return the integral.
2) We didn't have a simple term, but we do have an expression with
simplified DiracDelta terms, so we integrate this expression.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.integrals.deltafunctions import deltaintegrate
>>> from sympy import sin, cos, DiracDelta, Heaviside
>>> deltaintegrate(x*sin(x)*cos(x)*DiracDelta(x - 1), x)
sin(1)*cos(1)*Heaviside(x - 1)
>>> deltaintegrate(y**2*DiracDelta(x - z)*DiracDelta(y - z), y)
z**2*DiracDelta(x - z)*Heaviside(y - z)
See Also
========
sympy.functions.special.delta_functions.DiracDelta
sympy.integrals.integrals.Integral
"""
if not f.has(DiracDelta):
return None
from sympy.integrals import Integral, integrate
from sympy.solvers import solve
# g(x) = DiracDelta(h(x))
if f.func == DiracDelta:
h = f.expand(diracdelta=True, wrt=x)
if h == f: # can't simplify the expression
#FIXME: the second term tells whether is DeltaDirac or Derivative
#For integrating derivatives of DiracDelta we need the chain rule
if f.is_simple(x):
if (len(f.args) <= 1 or f.args[1] == 0):
return Heaviside(f.args[0])
else:
return (DiracDelta(f.args[0], f.args[1] - 1) /
f.args[0].as_poly().LC())
else: # let's try to integrate the simplified expression
fh = integrate(h, x)
return fh
elif f.is_Mul or f.is_Pow: # g(x) = a*b*c*f(DiracDelta(h(x)))*d*e
g = f.expand()
if f != g: # the expansion worked
fh = integrate(g, x)
if fh is not None and not isinstance(fh, Integral):
return fh
else:
# no expansion performed, try to extract a simple DiracDelta term
deltaterm, rest_mult = change_mul(f, x)
if not deltaterm:
if rest_mult:
fh = integrate(rest_mult, x)
return fh
else:
deltaterm = deltaterm.expand(diracdelta=True, wrt=x)
if deltaterm.is_Mul: # Take out any extracted factors
deltaterm, rest_mult_2 = change_mul(deltaterm, x)
rest_mult = rest_mult*rest_mult_2
point = solve(deltaterm.args[0], x)[0]
# Return the largest hyperreal term left after
# repeated integration by parts. For example,
#
# integrate(y*DiracDelta(x, 1),x) == y*DiracDelta(x,0), not 0
#
# This is so Integral(y*DiracDelta(x).diff(x),x).doit()
# will return y*DiracDelta(x) instead of 0 or DiracDelta(x),
# both of which are correct everywhere the value is defined
# but give wrong answers for nested integration.
n = (0 if len(deltaterm.args)==1 else deltaterm.args[1])
m = 0
while n >= 0:
r = (-1)**n*rest_mult.diff(x, n).subs(x, point)
if r is S.Zero:
n -= 1
m += 1
else:
if m == 0:
return r*Heaviside(x - point)
else:
return r*DiracDelta(x,m-1)
# In some very weak sense, x=0 is still a singularity,
# but we hope will not be of any practical consequence.
return S.Zero
return None
|
wxgeo/geophar
|
wxgeometrie/sympy/integrals/deltafunctions.py
|
Python
|
gpl-2.0
| 7,416
|
[
"DIRAC"
] |
168da773ea018c5d03b29d85c657bf3ff668e4fa7ebb238e2aa7605269cdf95b
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 25, 2014
@author: noe
'''
import unittest
from pyemma.util import statistics
import numpy as np
class TestStatistics(unittest.TestCase):
def assertConfidence(self, sample, alpha, precision):
alpha = 0.5
conf = statistics.confidence_interval(sample, alpha)
n_in = 0.0
for i in range(len(sample)):
if sample[i] > conf[0] and sample[i] < conf[1]:
n_in += 1.0
assert(alpha - (n_in/len(sample)) < precision)
def test_confidence_interval(self):
# exponential distribution
self.assertConfidence(np.random.exponential(size=10000), 0.5, 0.01)
self.assertConfidence(np.random.exponential(size=10000), 0.8, 0.01)
self.assertConfidence(np.random.exponential(size=10000), 0.95, 0.01)
# Gaussian distribution
self.assertConfidence(np.random.normal(size=10000), 0.5, 0.01)
self.assertConfidence(np.random.normal(size=10000), 0.8, 0.01)
self.assertConfidence(np.random.normal(size=10000), 0.95, 0.01)
if __name__ == "__main__":
unittest.main()
|
markovmodel/PyEMMA
|
pyemma/util/tests/statistics_test.py
|
Python
|
lgpl-3.0
| 1,879
|
[
"Gaussian"
] |
1b50302ef981cd83217509e1f5fbf9b92760ecf34863ee77a01ff92acb6c6880
|
"""
Gaussian Mixture Model
"""
import numpy as np
from scipy.stats import multivariate_normal
class GaussianMixture(object):
"""
Gaussian Mixture classification
"""
def __init__(self, c=2):
"""
Attributes:
samples (np.ndarray): Data to determine gaussian mixtures for
mus (dict): class means {class: [mean1, mean2, ...]}
covs (dict): class covariance matrices {class :[COV]}
priors (dict): class prior probabilities {class: prior}
responsibility_matrix (dict): responsibility matrix in EM
c (int): number of Guasian components
learned (bool): Keeps track of if model has been fit
"""
self.samples = np.nan
self.mus = np.nan
self.covs = np.nan
self.priors = np.nan
self.responsibility_matrix = np.nan
self.c = c
self.learned = False
def fit(self, X, iterations=50):
"""
Args:
X (np.ndarray): Training data of shape[n_samples, n_features]
iterations (int): number of EM interations until stopping
Returns: an instance of self
"""
self.samples = X
n_samples = np.shape(X)[0]
n_features = np.shape(X)[1]
# Initialize mus, covs, priors
initial_indices = np.random.choice(range(n_samples), self.c,
replace=False)
self.mus = X[initial_indices, :]
self.covs = [np.identity(n_features) for i in range(self.c)]
self.priors = [1.0/self.c for i in range(self.c)]
for iteration in range(iterations):
self.responsibility_matrix = self._expectation()
self.priors, self.mus, self.covs = self._maximization()
self.learned = True
return self
def _expectations(self, point):
responsibilities = [0 for i in range(self.c)]
for k in range(self.c):
probability = multivariate_normal.pdf(point,
mean=self.mus[k],
cov=self.covs[k]) * \
self.priors[k]
responsibilities[k] = probability
responsibilities = [float(i) / sum(responsibilities)
for i in responsibilities]
return responsibilities
def _expectation(self):
return np.apply_along_axis(self._expectations, 1, self.samples)
def _maximization(self):
# Maximize priors
priors = sum(self.responsibility_matrix)
priors = [float(i)/sum(priors) for i in priors]
# Maximize means
mus = [0 for i in range(self.c)]
for k in range(self.c):
mus_k = sum(np.multiply(self.samples,
self.responsibility_matrix[:, k][:, np.newaxis]))
normalized_mus_k = mus_k / sum(self.responsibility_matrix[:, k])
mus[k] = normalized_mus_k
# Maximize covariances
covs = [0 for i in range(self.c)]
for k in range(self.c):
covs[k] = np.cov(self.samples.T,
aweights=self.responsibility_matrix[:, k])
return priors, mus, covs
def predict(self, x, probs=False):
"""
Note: currenly only works on single vector and not matrices
Args:
x (np.ndarray): Training data of shape[1, n_features]
probs (bool): if True, returns probability of each class as well
Returns:
float: Returns predicted class
Raises:
ValueError if model has not been fit
"""
if not self.learned:
raise NameError('Fit model first')
probabilities = [0 for i in range(self.c)]
for k in range(self.c):
probability = multivariate_normal.pdf(x,
mean=self.mus[k],
cov=self.covs[k]) * \
self.priors[k]
probabilities[k] = probability
max_class = np.argmax(probabilities)
class_probs = [float(i)/sum(probabilities) for i in probabilities]
if probs:
return (max_class, class_probs)
return np.argmax(probabilities)
|
christopherjenness/ML-lib
|
ML/gaussianmixture.py
|
Python
|
mit
| 4,378
|
[
"Gaussian"
] |
71a16329ba70da459d9ee4bee2f41e8c5ee910328a6759b9e07b263256e5b700
|
""" This module loads all the classes from the VTK Rendering library into
its namespace. This is an optional module."""
import os
if os.name == 'posix':
from libvtkRenderingPython import *
else:
from vtkRenderingPython import *
|
sgh/vtk
|
Wrapping/Python/vtk/rendering.py
|
Python
|
bsd-3-clause
| 239
|
[
"VTK"
] |
875786b9541e65bca95eaae74a9a34ad1c54a133fd98a07770948c6c5e4c5a5e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Produce word vectors with deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
NOTE: There are more ways to get word vectors in Gensim than just Word2Vec.
See wrappers for FastText, VarEmbed and WordRank.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews,
visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The word vectors are stored in a KeyedVectors instance in model.wv.
This separates the read-only word vector lookup operations in KeyedVectors from the training code in Word2Vec::
>>> model.wv['computer'] # numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
The word vectors can also be instantiated from an existing file on disk in the word2vec C format
as a KeyedVectors instance::
NOTE: It is impossible to continue training the vectors loaded from the C format because hidden weights,
vocabulary frequency and the binary tree is missing::
>>> from gensim.models.keyedvectors import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various NLP word tasks with the model. Some of them
are already built-in::
>>> model.wv.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.wv.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> model.wv.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.wv.similarity('woman', 'man')
0.73723527
Probability of a text under the model::
>>> model.score(["The fox jumped over a lazy dog".split()])
0.2158356
Correlation with human opinion on word similarity::
>>> model.wv.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> model.wv.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
If you're finished training a model (i.e. no more updates, only querying),
then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
>>> word_vectors = model.wv
>>> del model
to trim unneeded model memory = use much less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.
Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean.
Distributed Representations of Words and Phrases and their Compositionality. In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
import warnings
from gensim.utils import keep_vocab_item, call_on_class_only
from gensim.models.keyedvectors import KeyedVectors, Vocab
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt,\
empty, sum as np_sum, ones, logaddexp
from scipy.special import expit
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.utils import deprecated
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_sg(model, sentences, alpha, work=None, compute_loss=False):
"""
Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(
model, model.wv.index2word[word.index], word2.index, alpha, compute_loss=compute_loss
)
result += len(word_vocabs)
return result
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None, compute_loss=False):
"""
Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha, compute_loss=compute_loss)
result += len(word_vocabs)
return result
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(word_vocabs[start: pos + model.window + 1], start):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, l1)
return log_prob_sentence
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None, compute_loss=False, is_ft=False):
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
if word not in model.wv.vocab:
return
predict_word = model.wv.vocab[word] # target word (NN output)
if is_ft:
l1_vocab = context_vectors_vocab[context_index[0]]
l1_ngrams = np_sum(context_vectors_ngrams[context_index[1:]], axis=0)
if context_index:
l1 = np_sum([l1_vocab, l1_ngrams], axis=0) / len(context_index)
else:
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0)**predict_word.code # `ch` function, 0 -> 1, 1 -> -1
lprob = -log(expit(-sgn * prod_term))
model.running_training_loss += sum(lprob)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.running_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
if is_ft:
model.wv.syn0_vocab[context_index[0]] += neu1e * context_locks_vocab[context_index[0]]
for i in context_index[1:]:
model.wv.syn0_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True,
compute_loss=False, context_vectors=None, context_locks=None, is_ft=False):
if context_vectors is None:
if is_ft:
context_vectors_vocab = model.wv.syn0_vocab
context_vectors_ngrams = model.wv.syn0_ngrams
else:
context_vectors = model.wv.syn0
if context_locks is None:
if is_ft:
context_locks_vocab = model.syn0_vocab_lockf
context_locks_ngrams = model.syn0_ngrams_lockf
else:
context_locks = model.syn0_lockf
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
prod_term = dot(l1, l2a.T)
fa = expit(prod_term) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
# loss component corresponding to hierarchical softmax
if compute_loss:
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
model.running_training_loss += sum(-log(expit(-sgn * prod_term)))
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
prod_term = dot(l1, l2b.T)
fb = expit(prod_term) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
# loss component corresponding to negative sampling
if compute_loss:
model.running_training_loss -= sum(log(expit(-1 * prod_term[1:]))) # for the sampled words
model.running_training_loss -= log(expit(prod_term[0])) # for the output word
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if is_ft:
if not model.cbow_mean and input_word_indices:
neu1e /= (len(input_word_indices[0]) + len(input_word_indices[1]))
for i in input_word_indices[0]:
context_vectors_vocab[i] += neu1e * context_locks_vocab[i]
for i in input_word_indices[1]:
context_vectors_ngrams[i] += neu1e * context_locks_ngrams[i]
else:
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
context_vectors[i] += neu1e * context_locks[i]
return neu1e
def score_sg_pair(model, word, word2):
l1 = model.wv.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_cbow_pair(model, word, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
If you're finished training a model (=no more updates, only querying)
then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `wv.save_word2vec_format()`
and `KeyedVectors.load_word2vec_format()`.
"""
def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH, compute_loss=False):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=0`), CBOW is used.
Otherwise (`sg=1`), skip-gram is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, useful range is (0, 1e-5).
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1, hierarchical softmax will be used for model training.
If set to 0 (default), and `negative` is non-zero, negative sampling will be used.
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
Default is 5. If set to 0, no negative samping is used.
`cbow_mean` = if 0, use the sum of the context word vectors. If 1 (default), use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus. Default is 5.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.
Note: The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part
of the model.
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before
assigning word indexes.
`batch_words` = target size (in words) for batches of examples passed to worker threads (and
thus cython routines). Default is 10000. (Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
"""
self.load = call_on_class_only
if FAST_VERSION == -1:
logger.warning('Slow version of %s is being used', __name__)
else:
logger.debug('Fast version of %s is being used', __name__)
self.initialize_word_vectors()
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.min_alpha_yet_reached = float(alpha) # To warn user if alpha increases
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = int(workers)
self.min_alpha = float(min_alpha)
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
self.model_trimmed_post_training = False
self.compute_loss = compute_loss
self.running_training_loss = 0
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
self.train(
sentences, total_examples=self.corpus_count, epochs=self.iter,
start_alpha=self.alpha, end_alpha=self.min_alpha
)
else:
if trim_rule is not None:
logger.warning(
"The rule, if given, is only used to prune vocabulary during build_vocab() "
"and is not stored as part of the model. Model initialized without sentences. "
"trim_rule provided, if any, will be ignored."
)
def initialize_word_vectors(self):
self.wv = KeyedVectors()
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.wv.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in xrange(vocab_size):
train_words_pow += self.wv.vocab[self.wv.index2word[word_index]].count**power
cumulative = 0.0
for word_index in xrange(vocab_size):
cumulative += self.wv.vocab[self.wv.index2word[word_index]].count**power
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(self.wv.vocab))
# build the huffman tree
heap = list(itervalues(self.wv.vocab))
heapq.heapify(heap)
for i in xrange(len(self.wv.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Vocab(count=min1.count + min2.count, index=i + len(self.wv.vocab), left=min1, right=min2)
)
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.wv.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.wv.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey
# trim by min_count & precalculate downsampling
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
self.finalize_vocab(update=update) # build tables & arrays
def build_vocab_from_freq(self, word_freq, keep_raw_vocab=False, corpus_count=None, trim_rule=None, update=False):
"""
Build vocabulary from a dictionary of word frequencies.
Build model vocabulary from a passed dictionary that contains (word,word count).
Words must be of type unicode strings.
Parameters
----------
`word_freq` : dict
Word,Word_Count dictionary.
`keep_raw_vocab` : bool
If not true, delete the raw vocabulary after the scaling is done and free up RAM.
`corpus_count`: int
Even if no corpus is provided, this argument can set corpus_count explicitly.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.
`update`: bool
If true, the new provided words in `word_freq` dict will be added to model's vocab.
Returns
--------
None
Examples
--------
>>> from gensim.models.word2vec import Word2Vec
>>> model= Word2Vec()
>>> model.build_vocab_from_freq({"Word1": 15, "Word2": 20})
"""
logger.info("Processing provided word frequencies")
# Instead of scanning text, this will assign provided word frequencies dictionary(word_freq)
# to be directly the raw vocab
raw_vocab = word_freq
logger.info(
"collected %i different raw word, with total frequency of %i",
len(raw_vocab), sum(itervalues(raw_vocab))
)
# Since no sentences are provided, this is to control the corpus_count
self.corpus_count = corpus_count if corpus_count else 0
self.raw_vocab = raw_vocab
# trim by min_count & precalculate downsampling
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update)
self.finalize_vocab(update=update) # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warning(
"Each 'sentences' item should be a list of words (usually unicode strings). "
"First item here is instead plain %s.",
type(sentence)
)
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info(
"PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, total_words, len(vocab)
)
for word in sentence:
vocab[word] += 1
total_words += len(sentence)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info(
"collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1
)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
return total_words
def scale_vocab(self, min_count=None, sample=None, dry_run=False,
keep_raw_vocab=False, trim_rule=None, update=False):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
if not update:
logger.info("Loading a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
self.wv.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.wv.vocab = {}
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))
self.wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
logger.info(
"min_count=%d retains %i unique words (%i%% of original %i, drops %i)",
min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique
)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
logger.info(
"min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)",
min_count, retain_total, retain_pct, original_total, drop_total
)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = pre_exist_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
if word in self.wv.vocab:
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
self.wv.vocab[word].count += v
else:
new_words.append(word)
new_total += v
if not dry_run:
self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))
self.wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
logger.info(
"New added %i unique words (%i%% of original %i) "
"and increased the count of %i pre-existing words (%i%% of original %i)",
len(new_words), new_unique_pct, original_unique_total, len(pre_exist_words),
pre_exist_unique_pct, original_unique_total
)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.wv.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info(
"downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total
)
# return from each step: words-affected, resulting-corpus-size, extra memory estimates
report_values = {
'drop_unique': drop_unique, 'retain_total': retain_total, 'downsample_unique': downsample_unique,
'downsample_total': int(downsample_total), 'memory': self.estimate_memory(vocab_size=len(retain_words))
}
return report_values
def finalize_vocab(self, update=False):
"""Build tables and model weights based on final vocabulary settings."""
if not self.wv.index2word:
self.scale_vocab()
if self.sorted_vocab and not update:
self.sort_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.wv.vocab)
self.wv.index2word.append(word)
self.wv.vocab[word] = v
# set initial input/projection and hidden weights
if not update:
self.reset_weights()
else:
self.update_weights()
def sort_vocab(self):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if len(self.wv.syn0):
raise RuntimeError("cannot sort vocabulary after model weights already initialized.")
self.wv.index2word.sort(key=lambda word: self.wv.vocab[word].count, reverse=True)
for i, word in enumerate(self.wv.index2word):
self.wv.vocab[word].index = i
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.wv.vocab = other_model.wv.vocab
self.wv.index2word = other_model.wv.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, sentences, alpha, inits):
"""
Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work, self.compute_loss)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1, self.compute_loss)
return tally, self._raw_word_count(sentences)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence) for sentence in job)
def train(self, sentences, total_examples=None, total_words=None,
epochs=None, start_alpha=None, end_alpha=None, word_count=0,
queue_factor=2, report_delay=1.0, compute_loss=None):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, and accurate
progres-percentage logging, either total_examples (count of sentences) or total_words (count of
raw words in sentences) MUST be provided. (If the corpus is the same as was provided to
`build_vocab()`, the count of examples in that corpus will be available in the model's
`corpus_count` property.)
To avoid common mistakes around the model's ability to do multiple training passes itself, an
explicit `epochs` argument MUST be provided. In the common and recommended case, where `train()`
is only called once, the model's cached `iter` value should be supplied as `epochs` value.
"""
if self.model_trimmed_post_training:
raise RuntimeError("Parameters for training were discarded using model_trimmed_post_training method")
if FAST_VERSION < 0:
warnings.warn(
"C extension not loaded for Word2Vec, training will be slow. "
"Install a C compiler and reinstall gensim for fast training."
)
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
if compute_loss:
self.compute_loss = compute_loss
self.running_training_loss = 0
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s negative=%s window=%s",
self.workers, len(self.wv.vocab), self.layer1_size, self.sg,
self.hs, self.sample, self.negative, self.window
)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not len(self.wv.syn0):
raise RuntimeError("you must first finalize vocabulary before training the model")
if not hasattr(self, 'corpus_count'):
raise ValueError(
"The number of sentences in the training corpus is missing. "
"Did you load the model via KeyedVectors.load_word2vec_format?"
"Models loaded via load_word2vec_format don't support further training. "
"Instead start with a blank model, scan_vocab on the new corpus, "
"intersect_word2vec_format with the old model, then train."
)
if total_words is None and total_examples is None:
raise ValueError(
"You must specify either total_examples or total_words, for proper alpha and progress calculations. "
"The usual value is total_examples=model.corpus_count."
)
if epochs is None:
raise ValueError("You must specify an explict epochs count. The usual value is epochs=model.iter.")
start_alpha = start_alpha or self.alpha
end_alpha = end_alpha or self.min_alpha
job_tally = 0
if epochs > 1:
sentences = utils.RepeatCorpusNTimes(sentences, epochs)
total_words = total_words and total_words * epochs
total_examples = total_examples and total_examples * epochs
def worker_loop():
"""Train the model, lifting lists of sentences from the job_queue."""
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
sentences, alpha = job
tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1))
progress_queue.put((len(sentences), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def job_producer():
"""Fill jobs queue using the input `sentences` iterator."""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_alpha = start_alpha
if next_alpha > self.min_alpha_yet_reached:
logger.warning("Effective 'alpha' higher than previous training cycles")
self.min_alpha_yet_reached = next_alpha
job_no = 0
for sent_idx, sentence in enumerate(sentences):
sentence_length = self._raw_word_count([sentence])
# can we fit this sentence into the existing job batch?
if batch_size + sentence_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(sentence)
batch_size += sentence_length
else:
# no => submit the existing job
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha
)
job_no += 1
job_queue.put((job_batch, next_alpha))
# update the learning rate for the next job
if end_alpha < next_alpha:
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
progress = 1.0 * pushed_words / total_words
next_alpha = start_alpha - (start_alpha - end_alpha) * progress
next_alpha = max(end_alpha, next_alpha)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [sentence], sentence_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha
)
job_no += 1
job_queue.put((job_batch, next_alpha))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in xrange(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
unfinished_worker_count = len(workers)
workers.append(threading.Thread(target=job_producer))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
example_count, trained_word_count, raw_word_count = 0, 0, word_count
start, next_report = default_timer() - 0.00001, 1.0
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
if total_examples:
# examples-based progress %
logger.info(
"PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * example_count / total_examples, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
else:
# words-based progress %
logger.info(
"PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * raw_word_count / total_words, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue)
)
next_report = elapsed + report_delay
# all done; report the final stats
elapsed = default_timer() - start
logger.info(
"training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed
)
if job_tally < 10 * self.workers:
logger.warning(
"under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay"
)
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warning(
"supplied example count (%i) did not equal expected count (%i)", example_count, total_examples
)
if total_words and total_words != raw_word_count:
logger.warning(
"supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words
)
self.train_count += 1 # number of times train() has been called
self.total_train_time += elapsed
self.clear_sims()
return trained_word_count
# basics copied from the train() function
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that).
We have currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with hs=1 and negative=0 for this to work.
Note that you should specify total_sentences; we'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the article by [#taddy]_ and the gensim demo at [#deepir]_ for examples of
how to use such scores in document classification.
.. [#taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations,
in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
.. [#deepir] https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb
"""
if FAST_VERSION < 0:
warnings.warn(
"C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness."
)
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative
)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError(
"We have currently only implemented score for the hierarchical softmax scheme, "
"so you need to have run word2vec with hs=1 and negative=0 for this to work."
)
def worker_loop():
"""Compute log probability for each sentence, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences
)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info("reached end of input; waiting to finish %i outstanding jobs", job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed
)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed
)
return sentence_scores[:sentence_count]
def clear_sims(self):
"""
Removes all L2-normalized vectors for words from the model.
You will have to recompute them using init_sims method.
"""
self.wv.syn0norm = None
def update_weights(self):
"""
Copy all the existing weights, and reset the weights for the newly
added vocabulary.
"""
logger.info("updating layer weights")
gained_vocab = len(self.wv.vocab) - len(self.wv.syn0)
newsyn0 = empty((gained_vocab, self.vector_size), dtype=REAL)
# randomize the remaining words
for i in xrange(len(self.wv.syn0), len(self.wv.vocab)):
# construct deterministic seed from word AND seed argument
newsyn0[i - len(self.wv.syn0)] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))
# Raise an error if an online update is run before initial training on a corpus
if not len(self.wv.syn0):
raise RuntimeError(
"You cannot do an online vocabulary-update of a model which has no prior vocabulary. "
"First build the vocabulary of your model with a corpus before doing an online update."
)
self.wv.syn0 = vstack([self.wv.syn0, newsyn0])
if self.hs:
self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if self.negative:
self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
self.wv.syn0norm = None
# do not suppress learning for already learned words
self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.wv.syn0 = empty((len(self.wv.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.wv.vocab)):
# construct deterministic seed from word AND seed argument
self.wv.syn0[i] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)
self.wv.syn0norm = None
self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
`lockf` is a lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
"""
overlap_count = 0
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.wv.vocab:
overlap_count += 1
self.wv.syn0[self.wv.vocab[word].index] = weights
self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
if word in self.wv.vocab:
overlap_count += 1
self.wv.syn0[self.wv.vocab[word].index] = weights
self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes
logger.info("merged %d vectors into %s matrix from %s", overlap_count, self.wv.syn0.shape, fname)
@deprecated("Method will be removed in 4.0.0, use self.wv.most_similar() instead")
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):
"""
Deprecated. Use self.wv.most_similar() instead.
Refer to the documentation for `gensim.models.KeyedVectors.most_similar`
"""
return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)
@deprecated("Method will be removed in 4.0.0, use self.wv.wmdistance() instead")
def wmdistance(self, document1, document2):
"""
Deprecated. Use self.wv.wmdistance() instead.
Refer to the documentation for `gensim.models.KeyedVectors.wmdistance`
"""
return self.wv.wmdistance(document1, document2)
@deprecated("Method will be removed in 4.0.0, use self.wv.most_similar_cosmul() instead")
def most_similar_cosmul(self, positive=None, negative=None, topn=10):
"""
Deprecated. Use self.wv.most_similar_cosmul() instead.
Refer to the documentation for `gensim.models.KeyedVectors.most_similar_cosmul`
"""
return self.wv.most_similar_cosmul(positive, negative, topn)
@deprecated("Method will be removed in 4.0.0, use self.wv.similar_by_word() instead")
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Deprecated. Use self.wv.similar_by_word() instead.
Refer to the documentation for `gensim.models.KeyedVectors.similar_by_word`
"""
return self.wv.similar_by_word(word, topn, restrict_vocab)
@deprecated("Method will be removed in 4.0.0, use self.wv.similar_by_vector() instead")
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Deprecated. Use self.wv.similar_by_vector() instead.
Refer to the documentation for `gensim.models.KeyedVectors.similar_by_vector`
"""
return self.wv.similar_by_vector(vector, topn, restrict_vocab)
@deprecated("Method will be removed in 4.0.0, use self.wv.doesnt_match() instead")
def doesnt_match(self, words):
"""
Deprecated. Use self.wv.doesnt_match() instead.
Refer to the documentation for `gensim.models.KeyedVectors.doesnt_match`
"""
return self.wv.doesnt_match(words)
@deprecated("Method will be removed in 4.0.0, use self.wv.__getitem__() instead")
def __getitem__(self, words):
"""
Deprecated. Use self.wv.__getitem__() instead.
Refer to the documentation for `gensim.models.KeyedVectors.__getitem__`
"""
return self.wv.__getitem__(words)
@deprecated("Method will be removed in 4.0.0, use self.wv.__contains__() instead")
def __contains__(self, word):
"""
Deprecated. Use self.wv.__contains__() instead.
Refer to the documentation for `gensim.models.KeyedVectors.__contains__`
"""
return self.wv.__contains__(word)
@deprecated("Method will be removed in 4.0.0, use self.wv.similarity() instead")
def similarity(self, w1, w2):
"""
Deprecated. Use self.wv.similarity() instead.
Refer to the documentation for `gensim.models.KeyedVectors.similarity`
"""
return self.wv.similarity(w1, w2)
@deprecated("Method will be removed in 4.0.0, use self.wv.n_similarity() instead")
def n_similarity(self, ws1, ws2):
"""
Deprecated. Use self.wv.n_similarity() instead.
Refer to the documentation for `gensim.models.KeyedVectors.n_similarity`
"""
return self.wv.n_similarity(ws1, ws2)
def predict_output_word(self, context_words_list, topn=10):
"""Report the probability distribution of the center word given the context words
as input to the trained model."""
if not self.negative:
raise RuntimeError(
"We have currently only implemented predict_output_word for the negative sampling scheme, "
"so you need to have run word2vec with negative > 0 for this to work."
)
if not hasattr(self.wv, 'syn0') or not hasattr(self, 'syn1neg'):
raise RuntimeError("Parameters required for predicting the output words not found.")
word_vocabs = [self.wv.vocab[w] for w in context_words_list if w in self.wv.vocab]
if not word_vocabs:
warnings.warn("All the input context words are out-of-vocabulary for the current model.")
return None
word2_indices = [word.index for word in word_vocabs]
l1 = np_sum(self.wv.syn0[word2_indices], axis=0)
if word2_indices and self.cbow_mean:
l1 /= len(word2_indices)
prob_values = exp(dot(l1, self.syn1neg.T)) # propagate hidden -> output and take softmax to get probabilities
prob_values /= sum(prob_values)
top_indices = matutils.argsort(prob_values, topn=topn, reverse=True)
# returning the most probable output words with their probabilities
return [(self.wv.index2word[index1], prob_values[index1]) for index1 in top_indices]
def init_sims(self, replace=False):
"""
init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute
of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors
"""
if replace and hasattr(self, 'syn1'):
del self.syn1
return self.wv.init_sims(replace)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.wv.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info(
"estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total']
)
return report
@staticmethod
def log_accuracy(section):
return KeyedVectors.log_accuracy(section)
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):
most_similar = most_similar or KeyedVectors.most_similar
return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)
@staticmethod
@deprecated("Method will be removed in 4.0.0, use self.wv.log_evaluate_word_pairs() instead")
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
"""
Deprecated. Use self.wv.log_evaluate_word_pairs() instead.
Refer to the documentation for `gensim.models.KeyedVectors.log_evaluate_word_pairs`
"""
return KeyedVectors.log_evaluate_word_pairs(pearson, spearman, oov, pairs)
@deprecated("Method will be removed in 4.0.0, use self.wv.evaluate_word_pairs() instead")
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
"""
Deprecated. Use self.wv.evaluate_word_pairs() instead.
Refer to the documentation for `gensim.models.KeyedVectors.evaluate_word_pairs`
"""
return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (
self.__class__.__name__, len(self.wv.index2word), self.vector_size, self.alpha
)
@deprecated(
"Method will be removed in 4.0.0, keep just_word_vectors = model.wv to retain just the KeyedVectors instance"
)
def _minimize_model(self, save_syn1=False, save_syn1neg=False, save_syn0_lockf=False):
if save_syn1 and save_syn1neg and save_syn0_lockf:
return
if hasattr(self, 'syn1') and not save_syn1:
del self.syn1
if hasattr(self, 'syn1neg') and not save_syn1neg:
del self.syn1neg
if hasattr(self, 'syn0_lockf') and not save_syn0_lockf:
del self.syn0_lockf
self.model_trimmed_post_training = True
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
"""
Discard parameters that are used in training and score. Use if you're sure you're done training a model.
If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
"""
if replace_word_vectors_with_normalized:
self.init_sims(replace=True)
self._minimize_model()
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = utils.SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
# update older models
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative and hasattr(model.wv, 'index2word'):
model.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
for v in model.wv.vocab.values():
if hasattr(v, 'sample_int'):
break # already 0.12.0+ style int probabilities
elif hasattr(v, 'sample_probability'):
v.sample_int = int(round(v.sample_probability * 2**32))
del v.sample_probability
if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):
model.syn0_lockf = ones(len(model.wv.syn0), dtype=REAL)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
def _load_specials(self, *args, **kwargs):
super(Word2Vec, self)._load_specials(*args, **kwargs)
# loading from a pre-KeyedVectors word2vec model
if not hasattr(self, 'wv'):
wv = KeyedVectors()
wv.syn0 = self.__dict__.get('syn0', [])
wv.syn0norm = self.__dict__.get('syn0norm', None)
wv.vocab = self.__dict__.get('vocab', {})
wv.index2word = self.__dict__.get('index2word', [])
self.wv = wv
@classmethod
@deprecated("Method will be removed in 4.0.0, use gensim.models.KeyedVectors.load_word2vec_format instead")
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.")
@deprecated("Method will be removed in 4.0.0, use model.wv.save_word2vec_format instead")
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""Deprecated. Use model.wv.save_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use model.wv.save_word2vec_format instead.")
def get_latest_training_loss(self):
return self.running_training_loss
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""
Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or not clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i: i + self.max_sentence_length]
i += self.max_sentence_length
class PathLineSentences(object):
"""
Works like word2vec.LineSentence, but will process all files in a directory in alphabetical order by filename.
The directory can only contain files that can be read by LineSentence: .bz2, .gz, and text files.
Any file not ending with .bz2 or .gz is assumed to be a text file. Does not work with subdirectories.
The format of files (either text, or compressed text files) in the path is one sentence = one line,
with words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` should be a path to a directory (as a string) where all files can be opened by the
LineSentence class. Each file will be read up to `limit` lines (or not clipped if limit is None, the default).
Example::
sentences = PathLineSentences(os.getcwd() + '\\corpus\\')
The files in the directory should be either text files, .bz2 files, or .gz files.
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
if os.path.isfile(self.source):
logger.debug('single file given as source, rather than a directory of files')
logger.debug('consider using models.word2vec.LineSentence for a single file')
self.input_files = [self.source] # force code compatibility with list of files
elif os.path.isdir(self.source):
self.source = os.path.join(self.source, '') # ensures os-specific slash at end of path
logger.info('reading directory %s', self.source)
self.input_files = os.listdir(self.source)
self.input_files = [self.source + filename for filename in self.input_files] # make full paths
self.input_files.sort() # makes sure it happens in filename order
else: # not a file or a directory, then we can't do anything with it
raise ValueError('input is neither a file nor a path')
logger.info('files read into PathLineSentences:%s', '\n'.join(self.input_files))
def __iter__(self):
"""iterate through the files"""
for file_name in self.input_files:
logger.info('reading file %s', file_name)
with utils.smart_open(file_name) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i:i + self.max_sentence_length]
i += self.max_sentence_length
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
logger.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, iter=args.iter
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
|
markroxor/gensim
|
gensim/models/word2vec.py
|
Python
|
lgpl-2.1
| 87,112
|
[
"VisIt"
] |
b78d5b9cb42dd3064c4503c7fea06bc9c51b2d0b32406b6488c568097e1d8811
|
from i3pystatus.core.util import internet, require
from i3pystatus.weather import WeatherBackend
from datetime import datetime
from urllib.request import urlopen
GEOLOOKUP_URL = 'http://api.wunderground.com/api/%s/geolookup%s/q/%s.json'
STATION_QUERY_URL = 'http://api.wunderground.com/api/%s/%s/q/%s.json'
class Wunderground(WeatherBackend):
'''
This module retrieves weather data using the Weather Underground API.
.. note::
A Weather Underground API key is required to use this module, you can
sign up for a developer API key free at
https://www.wunderground.com/weather/api/
Valid values for ``location_code`` include:
* **State/City_Name** - CA/San_Francisco
* **Country/City** - France/Paris
* **Geolocation by IP** - autoip
* **Zip or Postal Code** - 60616
* **ICAO Airport Code** - icao:LAX
* **Latitude/Longitude** - 41.8301943,-87.6342619
* **Personal Weather Station (PWS)** - pws:KILCHICA30
When not using a ``pws`` or ``icao`` station ID, the location will be
queried (this uses an API query), and the closest station will be used.
For a list of PWS station IDs, visit the following URL:
http://www.wunderground.com/weatherstation/ListStations.asp
.. rubric:: API usage
An API key is allowed 500 queries per day, and no more than 10 in a
given minute. Therefore, it is recommended to be conservative when
setting the update interval (the default is 1800 seconds, or 30
minutes), and one should be careful how often one restarts i3pystatus
and how often a refresh is forced by left-clicking the module.
As noted above, when not using a ``pws`` or ``icao`` station ID, an API
query will be used to determine the station ID to use. This will be
done once when i3pystatus is started, and not repeated until the next
time i3pystatus is started.
When updating weather data, one API query will be used to obtain the
current conditions. The high/low temperature forecast requires an
additonal API query, and is optional (disabled by default). To enable
forecast checking, set ``forecast=True``.
.. _weather-usage-wunderground:
.. rubric:: Usage example
.. code-block:: python
from i3pystatus import Status
from i3pystatus.weather import wunderground
status = Status(logfile='/home/username/var/i3pystatus.log')
status.register(
'weather',
format='{condition} {current_temp}{temp_unit}[ {icon}][ Hi: {high_temp}][ Lo: {low_temp}][ {update_error}]',
colorize=True,
hints={'markup': 'pango'},
backend=wunderground.Wunderground(
api_key='api_key_goes_here',
location_code='pws:MAT645',
units='imperial',
forecast=True,
update_error='<span color="#ff0000">!</span>',
),
)
status.run()
See :ref:`here <weather-formatters>` for a list of formatters which can be
used.
'''
settings = (
('api_key', 'Weather Underground API key'),
('location_code', 'Location code from wunderground.com'),
('units', '\'metric\' or \'imperial\''),
('use_pws', 'Set to False to use only airport stations'),
('forecast', 'Set to ``True`` to check forecast (generates one '
'additional API request per weather update). If set to '
'``False``, then the ``low_temp`` and ``high_temp`` '
'formatters will be set to empty strings.'),
('update_error', 'Value for the ``{update_error}`` formatter when an '
'error is encountered while checking weather data'),
)
required = ('api_key', 'location_code')
api_key = None
location_code = None
units = 'metric'
use_pws = True
forecast = False
update_error = '!'
# These will be set once weather data has been checked
station_id = None
forecast_url = None
def init(self):
'''
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
'''
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass
self.get_station_id()
@require(internet)
def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
no_data = ('', '')
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return no_data
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return no_data
@require(internet)
def get_station_id(self):
'''
Use geolocation to get the station ID
'''
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key,
extra_opts,
self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0]
except (KeyError, IndexError):
raise Exception(
'No locations matched location_code %s' % self.location_code)
self.logger.error('nearest = %s', nearest)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for nearest PWS')
self.station_id = 'pws:%s' % nearest_pws
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for nearest airport')
self.station_id = 'icao:%s' % nearest_airport
def check_response(self, response):
try:
return response['response']['error']['description']
except KeyError:
# No error in response
return False
@require(internet)
def check_weather(self):
'''
Query the configured/queried station and return the weather data
'''
if self.station_id is None:
# Failed to get the nearest station ID when first launched, so
# retry it.
self.get_station_id()
self.data['update_error'] = ''
try:
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
self.logger.error('No weather data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
forecast = self.api_request(query_url)['forecast']
forecast = forecast['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
# This is a non-fatal error, so don't return but do set the
# error flag.
self.data['update_error'] = self.update_error
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = forecast.get('low', {}).get(unit, '')
high_temp = forecast.get('high', {}).get(unit, '')
else:
low_temp = high_temp = ''
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None, default=''):
if data is None:
data = response
return str(data.get(key, default))
try:
observation_epoch = _find('observation_epoch') or _find('local_epoch')
observation_time = datetime.fromtimestamp(int(observation_epoch))
except (TypeError, ValueError):
log.debug(
'Observation time \'%s\' is not a UNIX timestamp',
observation_epoch
)
observation_time = datetime.fromtimestamp(0)
self.data['city'] = _find('city', response['observation_location'])
self.data['condition'] = _find('weather')
self.data['observation_time'] = observation_time
self.data['current_temp'] = _find('temp_' + temp_unit).split('.')[0]
self.data['low_temp'] = low_temp
self.data['high_temp'] = high_temp
self.data['temp_unit'] = '°' + temp_unit.upper()
self.data['feelslike'] = _find('feelslike_' + temp_unit)
self.data['dewpoint'] = _find('dewpoint_' + temp_unit)
self.data['wind_speed'] = _find('wind_' + speed_unit)
self.data['wind_unit'] = speed_unit
self.data['wind_direction'] = _find('wind_dir')
self.data['wind_gust'] = _find('wind_gust_' + speed_unit)
self.data['pressure'] = _find('pressure_' + pressure_unit)
self.data['pressure_unit'] = pressure_unit
self.data['pressure_trend'] = _find('pressure_trend')
self.data['visibility'] = _find('visibility_' + distance_unit)
self.data['visibility_unit'] = distance_unit
self.data['humidity'] = _find('relative_humidity').rstrip('%')
self.data['uv_index'] = _find('UV')
except Exception:
# Don't let an uncaught exception kill the update thread
self.logger.error(
'Uncaught error occurred while checking weather. '
'Exception follows:', exc_info=True
)
self.data['update_error'] = self.update_error
|
teto/i3pystatus
|
i3pystatus/weather/wunderground.py
|
Python
|
mit
| 11,898
|
[
"VisIt"
] |
c1d900eece9b2511fe1daae25ccdca0d0cc6c22d49a7750f83b9028fc26bdc69
|
#! /bin/env python
from landlab.io.vtk.writer import VtkWriter
from landlab.io.vtk.vtktypes import VtkUniformRectilinear
from landlab.io.vtk.vtkxml import (VtkRootElement, VtkGridElement,
VtkPieceElement, VtkCoordinatesElement,
VtkPointDataElement, VtkCellDataElement,
VtkExtent)
class VtkUniformRectilinearWriter(VtkWriter):
_vtk_grid_type = VtkUniformRectilinear
def construct_field_elements(self, field):
extent = VtkExtent(field.shape[::-1])
origin = VtkOrigin(field.origin[::-1], field.spacing[::-1])
spacing = VtkSpacing(field.spacing[::-1])
element = {
'VTKFile':
VtkRootElement(VtkUniformRectilinear),
'Grid':
VtkGridElement(VtkUniformRectilinear, WholeExtent=extent,
Origin=origin, Spacing=spacing),
'Piece':
VtkPieceElement(Extent=extent),
'PointData':
VtkPointDataElement(field.at_node, append=self.data,
encoding=self.encoding),
'CellData':
VtkCellDataElement(field.at_cell, append=data,
encoding=encoding),
}
return element
|
decvalts/landlab
|
landlab/io/vtk/vti.py
|
Python
|
mit
| 1,346
|
[
"VTK"
] |
6de2442a4c7b71f3fabd0d198b8f200bd923a461de83b6688073ba1716209f69
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Classes that represent a portion of the state of an OpenMM context.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import abc
import sys
import copy
import zlib
import inspect
import weakref
import collections
import numpy as np
try:
import openmm
from openmm import unit
except ImportError: # OpenMM < 7.6
from simtk import openmm, unit
from openmmtools import utils, integrators, forces, constants
# =============================================================================
# MODULE FUNCTIONS
# =============================================================================
def create_thermodynamic_state_protocol(system, protocol, constants=None,
composable_states=None):
"""An optimized utility function to create a list of thermodynamic states.
The method takes advantage of the fact that copying a thermodynamic state
does not require a copy of the OpenMM ``System`` object and that setting
parameters that are controlled by the ``(Compound)ThermodynamicState``
is effectively instantaneous.
Parameters
----------
reference_state : ThermodynamicState or openmm.System
``ThermodynamicState`` or The OpenMM ``System``. If a ``System`` the
constants must specify the temperature.
protocol : dict: str -> list
A dictionary associating the thermodynamic parameters to a list of
values. All the lists must have the same length.
constants : dict: str -> list
A dictionary associating a thermodnamic parameter to a value that
must remain constant along the protocol.
composable_states : IComposableState or list, optional
If specified, the function returns a list of ``CompoundThermodynamicState``
instead of simple ``ThermodynamicState`` objects.
Returns
-------
states : list of ``ThermodynamicState`` or ``CompoundThermodynamicState``
The sequence of thermodynamic states for the given protocol.
Examples
--------
>>> from openmm import unit
>>> from openmmtools import testsystems
>>> system = testsystems.AlanineDipeptideExplicit().system
>>> protocol = {'temperature': [300, 310, 330]*unit.kelvin,
... 'pressure': [1.0, 1.1, 1.2]*unit.atmosphere}
>>> states = create_thermodynamic_state_protocol(system, protocol)
>>> len(states)
3
"""
# Check that all elements of the protocol have the same length.
if len(protocol) == 0:
raise ValueError('No protocol has been specified.')
values_lengths = [len(values) for values in protocol.values()]
if len(set(values_lengths)) != 1:
raise ValueError('The protocol parameter values have different '
'lengths!\n{}'.format(protocol))
protocol_length = values_lengths[0]
# Handle default value.
if constants is None:
constants = {}
# Check that the user didn't specify the same parameter as both
# a constant and a protocol variable.
if len(set(constants).intersection(set(protocol))) != 0:
raise ValueError('Some parameters have been specified both '
'in constants and protocol.')
# Augument protocol to include the constants values as well.
for constant_parameter, value in constants.items():
protocol[constant_parameter] = [value for _ in range(protocol_length)]
# Create the reference ThermodynamicState.
if isinstance(system, openmm.System):
# Make sure the temperature is defined somewhere.
try:
temperature = constants['temperature']
except KeyError:
try:
temperature = protocol['temperature'][0]
except KeyError:
raise ValueError('If a System is passed the list of '
'constants must specify the temperature.')
thermo_state = ThermodynamicState(system, temperature=temperature)
else:
thermo_state = system
# Check if we need to create a reference CompoundThermodynamicState.
# Cast a single ComposableState into a list.
if isinstance(composable_states, IComposableState):
composable_states = [composable_states]
if composable_states is not None:
thermo_state = CompoundThermodynamicState(thermo_state, composable_states)
# Create all the states. Copying a state is much faster than
# initializing one because we don't have to copy System object.
states = [copy.deepcopy(thermo_state) for _ in range(protocol_length)]
# Assign protocol parameters.
protocol_keys, protocol_values = zip(*protocol.items())
for state_idx, state_values in enumerate(zip(*protocol_values)):
state = states[state_idx]
for lambda_key, lambda_value in zip(protocol_keys, state_values):
if hasattr(state, lambda_key):
setattr(state, lambda_key, lambda_value)
else:
raise AttributeError('{} object does not have protocol attribute '
'{}'.format(type(state), lambda_key))
return states
def reduced_potential_at_states(sampler_state, thermodynamic_states, context_cache):
"""Compute the reduced potential of a single configuration at multiple thermodynamic states.
Parameters
----------
sampler_state : SamplerState
The state holding the coordinates used to compute the potential.
thermodynamic_states : list of ``ThermodynamicState``
The list of thermodynamic states at which to compute the potential.
context_cache : cache.ContextCache
The context cache to use to request ``Context`` objects.
Returns
-------
reduced_potentials : np.ndarray of float
``reduced_potentials[i]`` is the unit-less reduced potentials
(i.e., in kT units) of state ``thermodynamic_states[i]``.
"""
reduced_potentials = np.zeros(len(thermodynamic_states))
# Group thermodynamic states by compatibility.
compatible_groups, original_indices = group_by_compatibility(thermodynamic_states)
# Compute the reduced potentials of all the compatible states.
for compatible_group, state_indices in zip(compatible_groups, original_indices):
# Get the context, any Integrator works.
context, integrator = context_cache.get_context(compatible_group[0])
# Update positions and box vectors. We don't need
# to set Context velocities for the potential.
sampler_state.apply_to_context(context, ignore_velocities=True)
# Compute and update the reduced potentials.
compatible_energies = ThermodynamicState.reduced_potential_at_states(
context, compatible_group)
for energy_idx, state_idx in enumerate(state_indices):
reduced_potentials[state_idx] = compatible_energies[energy_idx]
return reduced_potentials
def group_by_compatibility(thermodynamic_states):
"""Utility function to split the thermodynamic states by compatibility.
Parameters
----------
thermodynamic_states : list of ThermodynamicState
The thermodynamic state to group by compatibility.
Returns
-------
compatible_groups : list of list of ThermodynamicState
The states grouped by compatibility.
original_indices: list of list of int
The indices of the ThermodynamicStates in theoriginal list.
"""
compatible_groups = []
original_indices = []
for state_idx, state in enumerate(thermodynamic_states):
# Search for compatible group.
found_compatible = False
for group, indices in zip(compatible_groups, original_indices):
if state.is_state_compatible(group[0]):
found_compatible = True
group.append(state)
indices.append(state_idx)
# Create new one.
if not found_compatible:
compatible_groups.append([state])
original_indices.append([state_idx])
return compatible_groups, original_indices
def _box_vectors_volume(box_vectors):
"""Return the volume of the box vectors.
Support also triclinic boxes.
Parameters
----------
box_vectors : openmm.unit.Quantity
Vectors defining the box.
Returns
-------
volume : openmm.unit.Quantity
The box volume in units of length^3.
Examples
--------
Compute the volume of a Lennard-Jones fluid at 100 K and 1 atm.
>>> from openmmtools import testsystems
>>> system = testsystems.LennardJonesFluid(nparticles=100).system
>>> v = _box_vectors_volume(system.getDefaultPeriodicBoxVectors())
"""
a, b, c = box_vectors
box_matrix = np.array([a/a.unit, b/a.unit, c/a.unit])
return np.linalg.det(box_matrix) * a.unit**3
def _box_vectors_area_xy(box_vectors):
"""Return the xy-area of the box vectors.
Parameters
----------
box_vectors : openmm.unit.Quantity
Vectors defining the box.
Returns
-------
area_xy : openmm.unit.Quantity
The box area in units of length^2.
"""
return box_vectors[0][0] * box_vectors[1][1]
# =============================================================================
# CUSTOM EXCEPTIONS
# =============================================================================
class ThermodynamicsError(Exception):
"""Custom ThermodynamicState error.
The exception defines error codes as class constants. Currently
defined constants are MULTIPLE_BAROSTATS, UNSUPPORTED_BAROSTAT,
INCONSISTENT_BAROSTAT, BAROSTATED_NONPERIODIC, and
INCONSISTENT_INTEGRATOR.
Parameters
----------
code : ThermodynamicsError.Code
The error code.
Attributes
----------
code : ThermodynamicsError.Code
The code associated to this error.
Examples
--------
>>> raise ThermodynamicsError(ThermodynamicsError.MULTIPLE_BAROSTATS)
Traceback (most recent call last):
...
openmmtools.states.ThermodynamicsError: System has multiple barostats.
"""
# TODO substitute this with enum when we drop Python 2.7 support
(MULTIPLE_THERMOSTATS,
NO_THERMOSTAT,
NONE_TEMPERATURE,
INCONSISTENT_THERMOSTAT,
MULTIPLE_BAROSTATS,
NO_BAROSTAT,
UNSUPPORTED_BAROSTAT,
UNSUPPORTED_ANISOTROPIC_BAROSTAT,
SURFACE_TENSION_NOT_SUPPORTED,
INCONSISTENT_BAROSTAT,
BAROSTATED_NONPERIODIC,
INCONSISTENT_INTEGRATOR,
INCOMPATIBLE_SAMPLER_STATE,
INCOMPATIBLE_ENSEMBLE) = range(14)
error_messages = {
MULTIPLE_THERMOSTATS: "System has multiple thermostats.",
NO_THERMOSTAT: "System does not have a thermostat specifying the temperature.",
NONE_TEMPERATURE: "Cannot set temperature of the thermodynamic state to None.",
INCONSISTENT_THERMOSTAT: "System thermostat is inconsistent with thermodynamic state.",
MULTIPLE_BAROSTATS: "System has multiple barostats.",
UNSUPPORTED_BAROSTAT: "Found unsupported barostat {} in system.",
UNSUPPORTED_ANISOTROPIC_BAROSTAT:
"MonteCarloAnisotropicBarostat is only supported if the pressure along all scaled axes is the same.",
SURFACE_TENSION_NOT_SUPPORTED:
"Surface tension can only be set for states that have a system with a MonteCarloMembraneBarostat.",
NO_BAROSTAT: "System does not have a barostat specifying the pressure.",
INCONSISTENT_BAROSTAT: "System barostat is inconsistent with thermodynamic state.",
BAROSTATED_NONPERIODIC: "Non-periodic systems cannot have a barostat.",
INCONSISTENT_INTEGRATOR: "Integrator is coupled to a heat bath at a different temperature.",
INCOMPATIBLE_SAMPLER_STATE: "The sampler state has a different number of particles.",
INCOMPATIBLE_ENSEMBLE: "Cannot apply to a context in a different thermodynamic ensemble."
}
def __init__(self, code, *args):
error_message = self.error_messages[code].format(*args)
super(ThermodynamicsError, self).__init__(error_message)
self.code = code
class SamplerStateError(Exception):
"""Custom SamplerState error.
The exception defines error codes as class constants. The only
currently defined constant is INCONSISTENT_VELOCITIES.
Parameters
----------
code : SamplerStateError.Code
The error code.
Attributes
----------
code : SamplerStateError.Code
The code associated to this error.
Examples
--------
>>> raise SamplerStateError(SamplerStateError.INCONSISTENT_VELOCITIES)
Traceback (most recent call last):
...
openmmtools.states.SamplerStateError: Velocities have different length than positions.
"""
# TODO substitute this with enum when we drop Python 2.7 support
(INCONSISTENT_VELOCITIES,
INCONSISTENT_POSITIONS) = range(2)
error_messages = {
INCONSISTENT_VELOCITIES: "Velocities have different length than positions.",
INCONSISTENT_POSITIONS: "Specified positions with inconsistent number of particles."
}
def __init__(self, code, *args):
error_message = self.error_messages[code].format(*args)
super(SamplerStateError, self).__init__(error_message)
self.code = code
# =============================================================================
# THERMODYNAMIC STATE
# =============================================================================
class ThermodynamicState(object):
"""Thermodynamic state of a system.
Represent the portion of the state of a Context that does not
change with integration. Its main objectives are to wrap an
OpenMM system object to easily maintain a consistent thermodynamic
state. It can be used to create new OpenMM Contexts, or to convert
an existing Context to this particular thermodynamic state.
NVT, NPT and NPgammaT ensembles are supported. The temperature must
be specified in the constructor, either implicitly via a thermostat
force in the system, or explicitly through the temperature
parameter, which overrides an eventual thermostat indication.
To set a ThermodynamicState up in the NPgammaT ensemble, the system
passed to the constructor has to have a MonteCarloMembraneBarostat.
To set a ThermodynamicState up with anisotropic pressure control,
the system passed to the constructor has to have a MonteCarloAnisotropicBarostat.
Currently the MonteCarloAnisotropicBarostat is only supported if
the pressure is equal for all axes that are under pressure control.
Parameters
----------
system : openmm.System
An OpenMM system in a particular thermodynamic state.
temperature : openmm.unit.Quantity, optional
The temperature for the system at constant temperature. If
a MonteCarloBarostat is associated to the system, its
temperature will be set to this. If None, the temperature
is inferred from the system thermostat.
pressure : openmm.unit.Quantity, optional
The pressure for the system at constant pressure. If this
is specified, a MonteCarloBarostat is added to the system,
or just set to this pressure in case it already exists. If
None, the pressure is inferred from the system barostat, and
NVT ensemble is assumed if there is no barostat.
surface_tension : openmm.unit.Quantity, optional
The surface tension for the system at constant surface tension.
If this is specified, the system must have a MonteCarloMembraneBarostat.
If None, the surface_tension is inferred from the barostat and
NPT/NVT ensemble is assumed if there is no MonteCarloMembraneBarostat.
Attributes
----------
system
temperature
pressure
surface_tension
volume
n_particles
Notes
-----
This state object cannot describe states obeying non-Boltzamnn
statistics, such as Tsallis statistics.
Examples
--------
Specify an NVT state for a water box at 298 K.
>>> from openmmtools import testsystems
>>> temperature = 298.0*unit.kelvin
>>> waterbox = testsystems.WaterBox(box_edge=10*unit.angstroms,
... cutoff=4*unit.angstroms).system
>>> state = ThermodynamicState(system=waterbox, temperature=temperature)
In an NVT ensemble volume is constant and pressure is None.
>>> state.volume
Quantity(value=1.0, unit=nanometer**3)
>>> state.pressure is None
True
Convert this to an NPT state at 298 K and 1 atm pressure. This
operation automatically adds a MonteCarloBarostat to the system.
>>> pressure = 1.0*unit.atmosphere
>>> state.pressure = pressure
>>> state.pressure
Quantity(value=1.0, unit=atmosphere)
>>> state.volume is None
True
You cannot set a non-periodic system at constant pressure
>>> nonperiodic_system = testsystems.TolueneVacuum().system
>>> state = ThermodynamicState(nonperiodic_system, temperature=300*unit.kelvin,
... pressure=1.0*unit.atmosphere)
Traceback (most recent call last):
...
openmmtools.states.ThermodynamicsError: Non-periodic systems cannot have a barostat.
When temperature and/or pressure are not specified (i.e. they are
None) ThermodynamicState tries to infer them from a thermostat or
a barostat.
>>> state = ThermodynamicState(system=waterbox)
Traceback (most recent call last):
...
openmmtools.states.ThermodynamicsError: System does not have a thermostat specifying the temperature.
>>> thermostat = openmm.AndersenThermostat(200.0*unit.kelvin, 1.0/unit.picosecond)
>>> force_id = waterbox.addForce(thermostat)
>>> state = ThermodynamicState(system=waterbox)
>>> state.pressure is None
True
>>> state.temperature
Quantity(value=200.0, unit=kelvin)
>>> barostat = openmm.MonteCarloBarostat(1.0*unit.atmosphere, 200.0*unit.kelvin)
>>> force_id = waterbox.addForce(barostat)
>>> state = ThermodynamicState(system=waterbox)
>>> state.pressure
Quantity(value=1.01325, unit=bar)
>>> state.temperature
Quantity(value=200.0, unit=kelvin)
"""
# -------------------------------------------------------------------------
# Public interface
# -------------------------------------------------------------------------
def __init__(self, system, temperature=None, pressure=None, surface_tension=None):
self._initialize(system, temperature, pressure, surface_tension)
@property
def system(self):
"""The system in this thermodynamic state.
The returned system is a copy and can be modified without
altering the internal state of ThermodynamicState. In order
to ensure a consistent thermodynamic state, the system has
a Thermostat force. You can use `get_system()` to obtain a
copy of the system without the thermostat. The method
`create_context()` then takes care of removing the thermostat
when an integrator with a coupled heat bath is used (e.g.
`LangevinIntegrator`).
It can be set only to a system which is consistent with the
current thermodynamic state. Use `set_system()` if you want to
correct the thermodynamic state of the system automatically
before assignment.
See Also
--------
ThermodynamicState.get_system
ThermodynamicState.set_system
ThermodynamicState.create_context
"""
return self.get_system()
@system.setter
def system(self, value):
self.set_system(value)
def set_system(self, system, fix_state=False):
"""Manipulate and set the system.
With default arguments, this is equivalent to using the system
property, which raises an exception if the thermostat and the
barostat are not configured according to the thermodynamic state.
With this method it is possible to adjust temperature and
pressure of the system to make the assignment possible, without
manually configuring thermostat and barostat.
Parameters
----------
system : openmm.System
The system to set.
fix_state : bool, optional
If True, a thermostat is added to the system (if not already
present) and set to the correct temperature. If this state is
in NPT ensemble, a barostat is added or configured if it
exist already. If False, this simply check that thermostat
and barostat are correctly configured without modifying them.
Default is False.
Raises
------
ThermodynamicsError
If the system after the requested manipulation is still in
an incompatible state.
Examples
--------
The constructor adds a thermostat and a barostat to configure
the system in an NPT ensemble.
>>> from openmmtools import testsystems
>>> alanine = testsystems.AlanineDipeptideExplicit()
>>> state = ThermodynamicState(alanine.system, temperature=300*unit.kelvin,
... pressure=1.0*unit.atmosphere)
If we try to set a system not in NPT ensemble, an error occur.
>>> state.system = alanine.system
Traceback (most recent call last):
...
openmmtools.states.ThermodynamicsError: System does not have a thermostat specifying the temperature.
We can fix both thermostat and barostat while setting the system.
>>> state.set_system(alanine.system, fix_state=True)
"""
# Copy the system to avoid modifications during standardization.
system = copy.deepcopy(system)
self._unsafe_set_system(system, fix_state)
def get_system(self, remove_thermostat=False, remove_barostat=False):
"""Manipulate and return the system.
With default arguments, this is equivalent as the system property.
By setting the arguments it is possible to obtain a modified copy
of the system without the thermostat or the barostat.
Parameters
----------
remove_thermostat : bool
If True, the system thermostat is removed.
remove_barostat : bool
If True, the system barostat is removed.
Returns
-------
system : openmm.System
The system of this ThermodynamicState.
Examples
--------
The constructor adds a thermostat and a barostat to configure
the system in an NPT ensemble.
>>> from openmmtools import testsystems
>>> alanine = testsystems.AlanineDipeptideExplicit()
>>> state = ThermodynamicState(alanine.system, temperature=300*unit.kelvin,
... pressure=1.0*unit.atmosphere)
The system property returns a copy of the system with the
added thermostat and barostat.
>>> system = state.system
>>> [force.__class__.__name__ for force in system.getForces()
... if 'Thermostat' in force.__class__.__name__]
['AndersenThermostat']
We can remove them while getting the arguments with
>>> system = state.get_system(remove_thermostat=True, remove_barostat=True)
>>> [force.__class__.__name__ for force in system.getForces()
... if 'Thermostat' in force.__class__.__name__]
[]
"""
system = copy.deepcopy(self._standard_system)
# Remove or configure standard pressure barostat.
if remove_barostat:
self._pop_barostat(system)
else: # Set pressure of standard barostat.
self._set_system_pressure(system, self.pressure)
self._set_system_surface_tension(system, self.surface_tension)
# Set temperature of standard thermostat and barostat.
if not (remove_barostat and remove_thermostat):
self._set_system_temperature(system, self.temperature)
# Remove or configure standard temperature thermostat.
if remove_thermostat:
self._remove_thermostat(system)
return system
@property
def temperature(self):
"""Constant temperature of the thermodynamic state."""
return self._temperature
@temperature.setter
def temperature(self, value):
if value is None:
raise ThermodynamicsError(ThermodynamicsError.NONE_TEMPERATURE)
self._temperature = value
@property
def kT(self):
"""Thermal energy per mole."""
return constants.kB * self.temperature
@property
def beta(self):
"""Thermodynamic beta in units of mole/energy."""
return 1.0 / self.kT
@property
def pressure(self):
"""Constant pressure of the thermodynamic state.
If the pressure is allowed to fluctuate, this is None. Setting
this will automatically add/configure a barostat to the system.
If it is set to None, the barostat will be removed.
"""
return self._pressure
@pressure.setter
def pressure(self, new_pressure):
old_pressure = self._pressure
self._pressure = new_pressure
# If we change ensemble, we need to modify the standard system.
if (new_pressure is None) != (old_pressure is None):
# The barostat will be removed/added since fix_state is True.
try:
self.set_system(self._standard_system, fix_state=True)
except ThermodynamicsError:
# Restore old pressure to keep object consistent.
self._pressure = old_pressure
raise
@property
def barostat(self):
"""The barostat associated to the system.
Note that this is only a copy of the barostat, and you will need
to set back the ThermodynamicState.barostat property for the changes
to take place internally. If the pressure is allowed to fluctuate,
this is None. Normally, you should only need to access the pressure
and temperature properties, but this allows you to modify other parameters
of the MonteCarloBarostat (e.g. frequency) after initialization. Setting
this to None will place the system in an NVT ensemble.
"""
# Retrieve the barostat with standard temperature/pressure, then
# set temperature and pressure to the thermodynamic state values.
barostat = copy.deepcopy(self._find_barostat(self._standard_system))
if barostat is not None: # NPT ensemble.
self._set_barostat_pressure(barostat, self.pressure)
self._set_barostat_temperature(barostat, self.temperature)
if self.surface_tension is not None:
self._set_barostat_surface_tension(barostat, self.surface_tension)
return barostat
@barostat.setter
def barostat(self, new_barostat):
# If None, just remove the barostat from the standard system.
if new_barostat is None:
self.pressure = None
self.surface_tension = None
return
# Remember old pressure and surface tension in case something goes wrong.
old_pressure = self.pressure
old_surface_tension = self.surface_tension
# make sure that the barostat type does not change
if self.barostat is not None and type(new_barostat) is not type(self.barostat):
raise ThermodynamicsError(ThermodynamicsError.INCONSISTENT_BAROSTAT)
# Build the system with the new barostat.
system = self.get_system(remove_barostat=True)
system.addForce(copy.deepcopy(new_barostat))
# Update the internally stored standard system, and restore the old
# pressure if something goes wrong (e.g. the system is not periodic).
try:
self._pressure = self._get_barostat_pressure(new_barostat)
self._surface_tension = self._get_barostat_surface_tension(new_barostat)
self._unsafe_set_system(system, fix_state=False)
except ThermodynamicsError:
self._pressure = old_pressure
self._surface_tension = old_surface_tension
raise
@property
def default_box_vectors(self):
"""The default box vectors of the System (read-only)."""
return self._standard_system.getDefaultPeriodicBoxVectors()
@property
def volume(self):
"""Constant volume of the thermodynamic state (read-only).
If the volume is allowed to fluctuate, or if the system is
not in a periodic box this is None.
"""
return self.get_volume()
def get_volume(self, ignore_ensemble=False):
"""Volume of the periodic box (read-only).
Parameters
----------
ignore_ensemble : bool, optional
If True, the volume of the periodic box vectors is returned
even if the volume fluctuates.
Returns
-------
volume : openmm.unit.Quantity
The volume of the periodic box (units of length^3) or
None if the system is not periodic or allowed to fluctuate.
"""
# Check if volume fluctuates
if self.pressure is not None and not ignore_ensemble:
return None
if not self._standard_system.usesPeriodicBoundaryConditions():
return None
return _box_vectors_volume(self.default_box_vectors)
@property
def n_particles(self):
"""Number of particles (read-only)."""
return self._standard_system.getNumParticles()
@property
def is_periodic(self):
"""True if the system is in a periodic box (read-only)."""
return self._standard_system.usesPeriodicBoundaryConditions()
@property
def surface_tension(self):
"""Surface tension"""
return self._surface_tension
@surface_tension.setter
def surface_tension(self, gamma):
if (self._surface_tension is None) != (gamma is None):
raise ThermodynamicsError(ThermodynamicsError.SURFACE_TENSION_NOT_SUPPORTED)
else:
self._surface_tension = gamma
def reduced_potential(self, context_state):
"""Reduced potential in this thermodynamic state.
Parameters
----------
context_state : SamplerState or openmm.Context
Carry the configurational properties of the system.
Returns
-------
u : float
The unit-less reduced potential, which can be considered
to have units of kT.
Notes
-----
The reduced potential is defined as in Ref. [1],
with a additional term for the surface tension
u = \beta [U(x) + p V(x) + \mu N(x) - \gamma A]
where the thermodynamic parameters are
\beta = 1/(kB T) is the inverse temperature
p is the pressure
\mu is the chemical potential
\gamma is the surface tension
and the configurational properties are
x the atomic positions
U(x) is the potential energy
V(x) is the instantaneous box volume
N(x) the numbers of various particle species (e.g. protons of
titratible groups)
A(x) is the xy-area of the box.
References
----------
[1] Shirts MR and Chodera JD. Statistically optimal analysis of
equilibrium states. J Chem Phys 129:124105, 2008.
Examples
--------
Compute the reduced potential of a water box at 298 K and 1 atm.
>>> from openmmtools import testsystems
>>> waterbox = testsystems.WaterBox(box_edge=20.0*unit.angstroms)
>>> system, positions = waterbox.system, waterbox.positions
>>> state = ThermodynamicState(system=waterbox.system,
... temperature=298.0*unit.kelvin,
... pressure=1.0*unit.atmosphere)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = state.create_context(integrator)
>>> context.setPositions(waterbox.positions)
>>> sampler_state = SamplerState.from_context(context)
>>> u = state.reduced_potential(sampler_state)
If the sampler state is incompatible, an error is raised
>>> incompatible_sampler_state = sampler_state[:-1]
>>> state.reduced_potential(incompatible_sampler_state)
Traceback (most recent call last):
...
openmmtools.states.ThermodynamicsError: The sampler state has a different number of particles.
In case a cached SamplerState containing the potential energy
and the volume of the context is not available, the method
accepts a Context object and compute them with Context.getState().
>>> u = state.reduced_potential(context)
"""
# Read Context/SamplerState n_particles, energy and volume.
if isinstance(context_state, openmm.Context):
n_particles = context_state.getSystem().getNumParticles()
openmm_state = context_state.getState(getEnergy=True)
potential_energy = openmm_state.getPotentialEnergy()
volume = openmm_state.getPeriodicBoxVolume()
area = _box_vectors_area_xy(openmm_state.getPeriodicBoxVectors())
else:
n_particles = context_state.n_particles
potential_energy = context_state.potential_energy
volume = context_state.volume
area = context_state.area_xy
# Check compatibility.
if n_particles != self.n_particles:
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_SAMPLER_STATE)
return self._compute_reduced_potential(potential_energy, self.temperature,
volume, self.pressure, area, self.surface_tension)
@classmethod
def reduced_potential_at_states(cls, context, thermodynamic_states):
"""Efficiently compute the reduced potential for a list of compatible states.
The user is responsible to ensure that the given context is compatible
with the thermodynamic states.
Parameters
----------
context : openmm.Context
The OpenMM `Context` object with box vectors and positions set.
thermodynamic_states : list of ThermodynamicState
The list of thermodynamic states at which to compute the reduced
potential.
Returns
-------
reduced_potentials : list of float
The unit-less reduced potentials, which can be considered
to have units of kT.
Raises
------
ValueError
If the thermodynamic states are not compatible to each other.
"""
# Isolate first thermodynamic state.
if len(thermodynamic_states) == 1:
thermodynamic_states[0].apply_to_context(context)
return [thermodynamic_states[0].reduced_potential(context)]
# Check that the states are compatible.
for state_idx, state in enumerate(thermodynamic_states[:-1]):
if not state.is_state_compatible(thermodynamic_states[state_idx + 1]):
raise ValueError('State {} is not compatible.')
# In NPT, we'll need also the volume.
is_npt = thermodynamic_states[0].pressure is not None
is_npgammat = thermodynamic_states[0].surface_tension is not None
volume = None
area_xy = None
energy_by_force_group = {force.getForceGroup(): 0.0*unit.kilocalories_per_mole
for force in context.getSystem().getForces()}
# Create new cache for memoization.
memo = {}
# Go through thermodynamic states and compute only the energy of the
# force groups that changed. Compute all the groups the first pass.
force_groups_to_compute = set(energy_by_force_group)
reduced_potentials = [0.0 for _ in range(len(thermodynamic_states))]
for state_idx, state in enumerate(thermodynamic_states):
if state_idx == 0:
state.apply_to_context(context)
else:
state._apply_to_context_in_state(context, thermodynamic_states[state_idx - 1])
# Compute the energy of all the groups to update.
for force_group_idx in force_groups_to_compute:
openmm_state = context.getState(getEnergy=True, groups=2**force_group_idx)
energy_by_force_group[force_group_idx] = openmm_state.getPotentialEnergy()
# Compute volume if this is the first time we obtain a state.
if is_npt and volume is None:
volume = openmm_state.getPeriodicBoxVolume()
if is_npgammat and area_xy is None:
area_xy = _box_vectors_area_xy(openmm_state.getPeriodicBoxVectors())
# Compute the new total reduced potential.
potential_energy = unit.sum(list(energy_by_force_group.values()))
reduced_potential = cls._compute_reduced_potential(potential_energy, state.temperature,
volume, state.pressure, area_xy, state.surface_tension)
reduced_potentials[state_idx] = reduced_potential
# Update groups to compute for next states.
if state_idx < len(thermodynamic_states) - 1:
next_state = thermodynamic_states[state_idx + 1]
force_groups_to_compute = next_state._find_force_groups_to_update(context, state, memo)
return reduced_potentials
def is_state_compatible(self, thermodynamic_state):
"""Check compatibility between ThermodynamicStates.
The state is compatible if Contexts created by thermodynamic_state
can be set to this ThermodynamicState through apply_to_context.
The property is symmetric and transitive.
This is faster than checking compatibility of a Context object
through is_context_compatible, and it should be preferred when
possible.
Parameters
----------
thermodynamic_state : ThermodynamicState
The thermodynamic state to test.
Returns
-------
is_compatible : bool
True if the context created by thermodynamic_state can be
converted to this state through apply_to_context().
See Also
--------
ThermodynamicState.apply_to_context
ThermodynamicState.is_context_compatible
Examples
--------
States in the same ensemble (NVT or NPT) are compatible.
>>> from openmm import unit
>>> from openmmtools import testsystems
>>> alanine = testsystems.AlanineDipeptideExplicit()
>>> state1 = ThermodynamicState(alanine.system, 273*unit.kelvin)
>>> state2 = ThermodynamicState(alanine.system, 310*unit.kelvin)
>>> state1.is_state_compatible(state2)
True
States in different ensembles are not compatible.
>>> state1.pressure = 1.0*unit.atmosphere
>>> state1.is_state_compatible(state2)
False
States that store different systems (that differ by more than
barostat and thermostat pressure and temperature) are also not
compatible.
>>> alanine_implicit = testsystems.AlanineDipeptideImplicit().system
>>> state_implicit = ThermodynamicState(alanine_implicit, 310*unit.kelvin)
>>> state2.is_state_compatible(state_implicit)
False
"""
state_system_hash = thermodynamic_state._standard_system_hash
return self._standard_system_hash == state_system_hash
def is_context_compatible(self, context):
"""Check compatibility of the given context.
This is equivalent to is_state_compatible but slower, and it should
be used only when the state the created the context is unknown. The
context is compatible if it can be set to this ThermodynamicState
through apply_to_context().
Parameters
----------
context : openmm.Context
The OpenMM context to test.
Returns
-------
is_compatible : bool
True if this ThermodynamicState can be applied to context.
See Also
--------
ThermodynamicState.apply_to_context
ThermodynamicState.is_state_compatible
"""
# Avoid modifying the context system during standardization.
context_system = copy.deepcopy(context.getSystem())
context_integrator = context.getIntegrator()
# If the temperature is controlled by the integrator, the compatibility
# is independent on the parameters of the thermostat, so we add one
# identical to self._standard_system. We don't care if the integrator's
# temperature != self.temperature, so we set check_consistency=False.
if self._is_integrator_thermostated(context_integrator, check_consistency=False):
thermostat = self._find_thermostat(self._standard_system)
context_system.addForce(copy.deepcopy(thermostat))
# Compute and compare standard system hash.
self._standardize_system(context_system)
context_system_hash = self._compute_standard_system_hash(context_system)
is_compatible = self._standard_system_hash == context_system_hash
return is_compatible
def create_context(self, integrator, platform=None, platform_properties=None):
"""Create a context in this ThermodynamicState.
The context contains a copy of the system. If the integrator
is coupled to a heat bath (e.g. LangevinIntegrator), the system
in the context will not have a thermostat, and vice versa if
the integrator is not thermostated the system in the context will
have a thermostat.
An integrator is considered thermostated if it exposes a method
getTemperature(). A CompoundIntegrator is considered coupled to
a heat bath if at least one of its integrators is. An exception
is raised if the integrator is thermostated at a temperature
different from the thermodynamic state's.
Parameters
----------
integrator : openmm.Integrator
The integrator to use for Context creation. The eventual
heat bath temperature must be consistent with the
thermodynamic state.
platform : openmm.Platform, optional
Platform to use. If None, OpenMM tries to select the fastest
available platform. Default is None.
platform_properties : dict, optional
A dictionary of platform properties. Requires platform to be
specified.
Returns
-------
context : openmm.Context
The created OpenMM Context object.
Raises
------
ThermodynamicsError
If the integrator has a temperature different from this
ThermodynamicState.
ValueError
If platform_properties is specified, but platform is None
Examples
--------
When passing an integrator that does not expose getter and setter
for the temperature, the context will be created with a thermostat.
>>> import openmm
>>> from openmm import unit
>>> from openmmtools import testsystems
>>> toluene = testsystems.TolueneVacuum()
>>> state = ThermodynamicState(toluene.system, 300*unit.kelvin)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = state.create_context(integrator)
>>> system = context.getSystem()
>>> [force.__class__.__name__ for force in system.getForces()
... if 'Thermostat' in force.__class__.__name__]
['AndersenThermostat']
The thermostat is removed if we choose an integrator coupled
to a heat bath.
>>> del context # Delete previous context to free memory.
>>> integrator = openmm.LangevinIntegrator(300*unit.kelvin, 5.0/unit.picosecond,
... 2.0*unit.femtosecond)
>>> context = state.create_context(integrator)
>>> system = context.getSystem()
>>> [force.__class__.__name__ for force in system.getForces()
... if 'Thermostat' in force.__class__.__name__]
[]
"""
# Check that integrator is consistent and if it is thermostated.
# With CompoundIntegrator, at least one must be thermostated.
is_thermostated = self._is_integrator_thermostated(integrator)
# Get a copy of the system. If integrator is coupled
# to heat bath, remove the system thermostat.
system = self.get_system(remove_thermostat=is_thermostated)
# Create context.
if platform is None:
if platform_properties is not None:
raise ValueError("To set platform_properties, you need to also specify the platform.")
return openmm.Context(system, integrator)
elif platform_properties is None:
return openmm.Context(system, integrator, platform)
else:
return openmm.Context(system, integrator, platform, platform_properties)
def apply_to_context(self, context):
"""Apply this ThermodynamicState to the context.
The method apply_to_context does *not* check for the compatibility
of the context. The user is responsible for this. Depending on the
system size, is_context_compatible can be an expensive operation,
so is_state_compatible should be preferred when possible.
Parameters
----------
context : openmm.Context
The OpenMM Context to be set to this ThermodynamicState.
Raises
------
ThermodynamicsError
If the context is in a different thermodynamic ensemble w.r.t.
this state. This is just a quick check which does not substitute
is_state_compatible or is_context_compatible.
See Also
--------
ThermodynamicState.is_state_compatible
ThermodynamicState.is_context_compatible
Examples
--------
The method doesn't verify compatibility with the context, it is
the user's responsibility to do so, possibly with is_state_compatible
rather than is_context_compatible which is slower.
>>> import openmm
>>> from openmm import unit
>>> from openmmtools import testsystems
>>> toluene = testsystems.TolueneVacuum()
>>> state1 = ThermodynamicState(toluene.system, 273.0*unit.kelvin)
>>> state2 = ThermodynamicState(toluene.system, 310.0*unit.kelvin)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = state1.create_context(integrator)
>>> if state2.is_state_compatible(state1):
... state2.apply_to_context(context)
>>> context.getParameter(openmm.AndersenThermostat.Temperature())
310.0
"""
self._set_context_barostat(context, update_pressure=True, update_temperature=True, update_surface_tension=True)
self._set_context_thermostat(context)
# -------------------------------------------------------------------------
# Magic methods
# -------------------------------------------------------------------------
def __copy__(self):
"""Overwrite normal implementation to share standard system."""
cls = self.__class__
new_state = cls.__new__(cls)
new_state.__dict__.update({k: v for k, v in self.__dict__.items()
if k != '_standard_system'})
new_state.__dict__['_standard_system'] = self._standard_system
return new_state
def __deepcopy__(self, memo):
"""Overwrite normal implementation to share standard system."""
cls = self.__class__
new_state = cls.__new__(cls)
memo[id(self)] = new_state
for k, v in self.__dict__.items():
if k != '_standard_system':
new_state.__dict__[k] = copy.deepcopy(v, memo)
new_state.__dict__['_standard_system'] = self._standard_system
return new_state
_ENCODING = 'utf-8'
def __getstate__(self, skip_system=False):
"""Return a dictionary representation of the state.
Zlib compresses the serialized system after its created. Many
alchemical systems have very long serializations so this method
helps reduce space in memory and on disk. The compression forces
the encoding for compatibility between separate Python installs
(utf-8 by default).
Parameters
----------
skip_system: bool, Default: False
Choose whether or not to get the serialized system as the part
of the return. If False, then the serialized system is computed
and included in the serialization. If True, then ``None`` is
returned for the ``'standard_system'`` field of the serialization.
"""
serialized_system = None
if not skip_system:
serialized_system = openmm.XmlSerializer.serialize(self._standard_system)
serialized_system = zlib.compress(serialized_system.encode(self._ENCODING))
return dict(standard_system=serialized_system, temperature=self.temperature,
pressure=self.pressure, surface_tension=self._surface_tension)
def __setstate__(self, serialization):
"""Set the state from a dictionary representation."""
self._temperature = serialization['temperature']
self._pressure = serialization['pressure']
self._surface_tension = serialization['surface_tension']
serialized_system = serialization['standard_system']
# Decompress system, if need be
try:
serialized_system = zlib.decompress(serialized_system)
# Py2 returns the string, Py3 returns a byte string to decode, but if we
# decode the string in Py2 we get a unicode object that OpenMM can't parse.
if sys.version_info > (3, 0):
serialized_system = serialized_system.decode(self._ENCODING)
except (TypeError, zlib.error): # Py3/2 throws different error types
# Catch the "serialization is not compressed" error, do nothing to string.
# Preserves backwards compatibility
pass
self._standard_system_hash = serialized_system.__hash__()
# Check first if we have already the system in the cache.
try:
self._standard_system = self._standard_system_cache[self._standard_system_hash]
except KeyError:
system = openmm.XmlSerializer.deserialize(serialized_system)
self._standard_system_cache[self._standard_system_hash] = system
self._standard_system = system
# -------------------------------------------------------------------------
# Internal-usage: initialization
# -------------------------------------------------------------------------
def _initialize(self, system, temperature=None, pressure=None, surface_tension=None):
"""Initialize the thermodynamic state."""
# Avoid modifying the original system when setting temperature and pressure.
system = copy.deepcopy(system)
# If pressure is None, we try to infer the pressure from the barostat.
barostat = self._find_barostat(system)
if pressure is None and barostat is not None:
self._pressure = self._get_barostat_pressure(barostat)
else:
self._pressure = pressure # Pressure here can also be None.
# If surface tension is None, we try to infer the surface tension from the barostat.
barostat_type = type(barostat)
if surface_tension is None and barostat_type == openmm.MonteCarloMembraneBarostat:
self._surface_tension = barostat.getDefaultSurfaceTension()
elif surface_tension is not None and barostat_type != openmm.MonteCarloMembraneBarostat:
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
else:
self._surface_tension = surface_tension
# If temperature is None, we infer the temperature from a thermostat.
if temperature is None:
thermostat = self._find_thermostat(system)
if thermostat is None:
raise ThermodynamicsError(ThermodynamicsError.NO_THERMOSTAT)
self._temperature = thermostat.getDefaultTemperature()
else:
self._temperature = temperature
# Fix system temperature/pressure if requested.
if temperature is not None:
self._set_system_temperature(system, temperature)
if pressure is not None:
self._set_system_pressure(system, pressure)
if surface_tension is not None:
self._set_system_surface_tension(system, surface_tension)
# We can use the unsafe set_system since the system has been copied.
self._unsafe_set_system(system, fix_state=False)
# -------------------------------------------------------------------------
# Internal-usage: system handling
# -------------------------------------------------------------------------
# Standard values are not standard in a physical sense, they are
# just consistent between ThermodynamicStates to make comparison
# of standard system hashes possible. We set this to round floats
# and use OpenMM units to avoid funniness due to precision errors
# caused by unit conversion.
_STANDARD_PRESSURE = 1.0*unit.bar
_STANDARD_TEMPERATURE = 273.0*unit.kelvin
_STANDARD_SURFACE_TENSION = 0.0*unit.nanometer*unit.bar
_NONPERIODIC_NONBONDED_METHODS = {openmm.NonbondedForce.NoCutoff,
openmm.NonbondedForce.CutoffNonPeriodic}
# Shared cache of standard systems to minimize memory consumption
# when simulating a lot of thermodynamic states. The cache holds
# only weak references so ThermodynamicState objects must keep the
# system as an internal variable.
_standard_system_cache = weakref.WeakValueDictionary()
def _unsafe_set_system(self, system, fix_state):
"""This implements self.set_system but modifies the passed system."""
# Configure temperature and pressure.
if fix_state:
# We just need to add/remove the barostat according to the ensemble.
# Temperature, pressure, surface tension of thermostat and barostat will be set
# to their standard value afterwards.
self._set_system_pressure(system, self.pressure)
self._set_system_surface_tension(system, self.surface_tension)
else:
# If the flag is deactivated, we check that temperature
# pressure, and surface tension of the system are correct.
self._check_system_consistency(system)
# Update standard system.
self._standardize_system(system)
self._update_standard_system(system)
def _check_system_consistency(self, system):
"""Check system consistency with this ThermodynamicState.
Raise an error if the system is inconsistent. Currently checks
that there's 1 and only 1 thermostat at the correct temperature,
that there's only 1 barostat (or none in case this is in NVT),
that the barostat is supported, has the correct temperature and
pressure, and that it is not associated to a non-periodic system.
Parameters
----------
system : openmm.System
The system to test.
Raises
------
ThermodynamicsError
If the system is inconsistent with this state.
"""
TE = ThermodynamicsError # shortcut
# This raises MULTIPLE_THERMOSTATS
thermostat = self._find_thermostat(system)
# When system is self._system, we check the presence of a
# thermostat before the barostat to avoid crashes when
# checking the barostat temperature.
if thermostat is None:
raise TE(TE.NO_THERMOSTAT)
elif not utils.is_quantity_close(thermostat.getDefaultTemperature(),
self.temperature):
raise TE(TE.INCONSISTENT_THERMOSTAT)
# This line raises MULTIPLE_BAROSTATS and UNSUPPORTED_BAROSTAT.
barostat = self._find_barostat(system)
if barostat is not None:
# Check that barostat is not added to non-periodic system. We
# cannot use System.usesPeriodicBoundaryConditions() because
# in OpenMM < 7.1 that returns True when a barostat is added.
# TODO just use usesPeriodicBoundaryConditions when drop openmm7.0
for force in system.getForces():
if isinstance(force, openmm.NonbondedForce):
nonbonded_method = force.getNonbondedMethod()
if nonbonded_method in self._NONPERIODIC_NONBONDED_METHODS:
raise TE(TE.BAROSTATED_NONPERIODIC)
if not self._is_barostat_consistent(barostat):
raise TE(TE.INCONSISTENT_BAROSTAT)
elif self.pressure is not None:
raise TE(TE.NO_BAROSTAT)
def _standardize_system(self, system):
"""Return a copy of the system in a standard representation.
This effectively defines which ThermodynamicStates are compatible
between each other. Compatible ThermodynamicStates have the same
standard systems, and is_state_compatible will return True if
the (cached) serialization of the standard systems are identical.
If no thermostat is present, an AndersenThermostat is added. The
presence of absence of a barostat determine whether this system is
in NPT or NVT ensemble. Pressure and temperature of barostat (if
any) and thermostat are set to _STANDARD_PRESSURE/TEMPERATURE.
If present, the barostat force is pushed at the end so that the
order of the two forces won't matter.
Effectively this means that only same systems in the same ensemble
(NPT or NVT) are compatible between each other.
Parameters
----------
system : openmm.System
The system to standardize.
See Also
--------
ThermodynamicState.apply_to_context
ThermodynamicState.is_state_compatible
ThermodynamicState.is_context_compatible
"""
# This adds a thermostat if it doesn't exist already. This way
# the comparison between system using thermostat with different
# parameters (e.g. collision frequency) will fail as expected.
self._set_system_temperature(system, self._STANDARD_TEMPERATURE)
# We need to be sure that thermostat and barostat always are
# in the same order, as the hash depends on the Forces order.
# Here we push the barostat at the end.
barostat = self._pop_barostat(system)
if barostat is not None:
self._set_barostat_pressure(barostat, self._STANDARD_PRESSURE)
if isinstance(barostat, openmm.MonteCarloMembraneBarostat):
self._set_barostat_surface_tension(barostat, self._STANDARD_SURFACE_TENSION)
system.addForce(barostat)
def _compute_standard_system_hash(self, standard_system):
"""Compute the standard system hash."""
system_serialization = openmm.XmlSerializer.serialize(standard_system)
return system_serialization.__hash__()
def _update_standard_system(self, standard_system):
"""Update the standard system, its hash and the standard system cache."""
self._standard_system_hash = self._compute_standard_system_hash(standard_system)
try:
self._standard_system = self._standard_system_cache[self._standard_system_hash]
except KeyError:
self._standard_system_cache[self._standard_system_hash] = standard_system
self._standard_system = standard_system
# -------------------------------------------------------------------------
# Internal-usage: context handling
# -------------------------------------------------------------------------
def _set_context_barostat(self, context, update_pressure, update_temperature, update_surface_tension):
"""Set the barostat parameters in the Context."""
barostat = self._find_barostat(context.getSystem())
# Check if we are in the same ensemble.
if (barostat is None) != (self._pressure is None):
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
if (type(barostat) is openmm.MonteCarloMembraneBarostat) == (self._surface_tension is None):
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
# No need to set the barostat if we are in NVT.
if self._pressure is None:
return
# Apply pressure, surface tension, and temperature to barostat.
if update_pressure:
self._set_barostat_pressure(barostat, self.pressure)
self._set_barostat_pressure_in_context(barostat, self.pressure, context)
if self.surface_tension is not None and update_surface_tension:
self._set_barostat_surface_tension(barostat, self.surface_tension)
self._set_barostat_surface_tension_in_context(barostat, self.surface_tension, context)
if update_temperature:
self._set_barostat_temperature(barostat, self.temperature)
# TODO remove try except when drop openmm7.0 support
try:
context.setParameter(barostat.Temperature(), self.temperature)
except AttributeError: # OpenMM < 7.1
openmm_state = context.getState(getPositions=True, getVelocities=True,
getParameters=True)
context.reinitialize()
context.setState(openmm_state)
def _set_context_thermostat(self, context):
"""Set the thermostat parameters in the Context."""
# First try to set the integrator (most common case).
# If this fails retrieve the Andersen thermostat.
is_thermostated = self._set_integrator_temperature(context.getIntegrator())
if not is_thermostated:
thermostat = self._find_thermostat(context.getSystem())
thermostat.setDefaultTemperature(self.temperature)
context.setParameter(thermostat.Temperature(), self.temperature)
def _apply_to_context_in_state(self, context, thermodynamic_state):
"""Apply this ThermodynamicState to the context.
When we know the thermodynamic state of the context, this is much faster
then apply_to_context(). The given thermodynamic state is assumed to be
compatible.
Parameters
----------
context : openmm.Context
The OpenMM Context to be set to this ThermodynamicState.
thermodynamic_state : ThermodynamicState
The ThermodynamicState of this context.
"""
update_pressure = self.pressure != thermodynamic_state.pressure
update_temperature = self.temperature != thermodynamic_state.temperature
update_surface_tension = self.surface_tension != thermodynamic_state.surface_tension
if update_pressure or update_temperature or update_surface_tension:
self._set_context_barostat(context, update_pressure, update_temperature, update_surface_tension)
if update_temperature:
self._set_context_thermostat(context)
# -------------------------------------------------------------------------
# Internal-usage: integrator handling
# -------------------------------------------------------------------------
@staticmethod
def _loop_over_integrators(integrator):
"""Unify manipulation of normal, compound and thermostated integrators."""
if isinstance(integrator, openmm.CompoundIntegrator):
for integrator_id in range(integrator.getNumIntegrators()):
_integrator = integrator.getIntegrator(integrator_id)
integrators.ThermostatedIntegrator.restore_interface(_integrator)
yield _integrator
else:
integrators.ThermostatedIntegrator.restore_interface(integrator)
yield integrator
def _is_integrator_thermostated(self, integrator, check_consistency=True):
"""True if integrator is coupled to a heat bath.
If integrator is a CompoundIntegrator, it returns true if at least
one of its integrators is coupled to a heat bath.
Raises
------
ThermodynamicsError
If check_consistency is True and the integrator is
coupled to a heat bath at a different temperature
than this thermodynamic state.
"""
# Loop over integrators to handle CompoundIntegrators.
is_thermostated = False
for _integrator in self._loop_over_integrators(integrator):
try:
temperature = _integrator.getTemperature()
except AttributeError:
pass
else:
# Raise exception if the heat bath is at the wrong temperature.
if (check_consistency and
not utils.is_quantity_close(temperature, self.temperature)):
err_code = ThermodynamicsError.INCONSISTENT_INTEGRATOR
raise ThermodynamicsError(err_code)
is_thermostated = True
# We still need to loop over every integrator to make sure
# that the temperature is consistent for all of them.
return is_thermostated
def _set_integrator_temperature(self, integrator):
"""Set heat bath temperature of the integrator.
If integrator is a CompoundIntegrator, it sets the temperature
of every sub-integrator.
Returns
-------
is_thermostated : bool
True if the integrator is thermostated.
"""
def set_temp(_integrator):
try:
_integrator.setTemperature(self.temperature)
return True
except AttributeError:
return False
# Loop over integrators to handle CompoundIntegrators.
is_thermostated = False
for _integrator in self._loop_over_integrators(integrator):
is_thermostated = is_thermostated or set_temp(_integrator)
return is_thermostated
# -------------------------------------------------------------------------
# Internal-usage: barostat handling
# -------------------------------------------------------------------------
_SUPPORTED_BAROSTATS = {'MonteCarloBarostat', 'MonteCarloAnisotropicBarostat', 'MonteCarloMembraneBarostat'}
@classmethod
def _find_barostat(cls, system, get_index=False):
"""Return the first barostat found in the system.
Returns
-------
force_idx : int or None, optional
The force index of the barostat.
barostat : OpenMM Force object
The barostat in system, or None if no barostat is found.
Raises
------
ThermodynamicsError
If the system contains unsupported barostats.
"""
try:
force_idx, barostat = forces.find_forces(system, '.*Barostat.*', only_one=True)
except forces.MultipleForcesError:
raise ThermodynamicsError(ThermodynamicsError.MULTIPLE_BAROSTATS)
except forces.NoForceFoundError:
force_idx, barostat = None, None
else:
if barostat.__class__.__name__ not in cls._SUPPORTED_BAROSTATS:
raise ThermodynamicsError(ThermodynamicsError.UNSUPPORTED_BAROSTAT,
barostat.__class__.__name__)
elif isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
# support only if pressure in all scaled directions is equal
pressures = barostat.getDefaultPressure().value_in_unit(unit.bar)
scaled = [barostat.getScaleX(), barostat.getScaleY(), barostat.getScaleY()]
if sum(scaled) == 0:
raise ThermodynamicsError(ThermodynamicsError.UNSUPPORTED_ANISOTROPIC_BAROSTAT)
active_pressures = [pressure for pressure, active in zip(pressures, scaled) if active]
if any(abs(pressure - active_pressures[0]) > 0 for pressure in active_pressures):
raise ThermodynamicsError(ThermodynamicsError.UNSUPPORTED_ANISOTROPIC_BAROSTAT)
if get_index:
return force_idx, barostat
return barostat
@classmethod
def _pop_barostat(cls, system):
"""Remove the system barostat.
Returns
-------
The removed barostat if it was found, None otherwise.
"""
barostat_idx, barostat = cls._find_barostat(system, get_index=True)
if barostat_idx is not None:
# We need to copy the barostat since we don't own
# its memory (i.e. we can't add it back to the system).
barostat = copy.deepcopy(barostat)
system.removeForce(barostat_idx)
return barostat
return None
def _is_barostat_type_consistent(self, barostat):
# during initialization (standard system not set), any barostat type is OK
if not hasattr(self, "_standard_system"):
return True
system_barostat = self._find_barostat(self._standard_system)
return type(barostat) == type(system_barostat)
def _is_barostat_consistent(self, barostat):
"""Check the barostat's temperature, pressure, and surface_tension."""
try:
barostat_temperature = barostat.getDefaultTemperature()
except AttributeError: # versions previous to OpenMM 7.1
barostat_temperature = barostat.getTemperature()
barostat_pressure = self._get_barostat_pressure(barostat)
barostat_surface_tension = self._get_barostat_surface_tension(barostat)
is_consistent = self._is_barostat_type_consistent(barostat)
is_consistent = is_consistent and utils.is_quantity_close(barostat_temperature, self.temperature)
is_consistent = is_consistent and utils.is_quantity_close(barostat_pressure, self.pressure)
if barostat is not None and self._surface_tension is not None:
is_consistent = is_consistent and utils.is_quantity_close(barostat_surface_tension, self._surface_tension)
else:
is_consistent = is_consistent and (barostat_surface_tension == self._surface_tension) # both None
return is_consistent
def _set_system_pressure(self, system, pressure):
"""Add or configure the system barostat to the given pressure.
If a new barostat is added, its temperature is set to
self.temperature.
Parameters
----------
system : openmm.System
The system's barostat will be added/configured.
pressure : openmm.unit.Quantity or None
The pressure with units compatible to bars. If None, the
barostat of the system is removed.
Raises
------
ThermodynamicsError
If pressure needs to be set for a non-periodic system.
"""
if pressure is None: # If new pressure is None, remove barostat.
self._pop_barostat(system)
return
if not system.usesPeriodicBoundaryConditions():
raise ThermodynamicsError(ThermodynamicsError.BAROSTATED_NONPERIODIC)
barostat = self._find_barostat(system)
if barostat is None: # Add barostat
barostat = openmm.MonteCarloBarostat(pressure, self.temperature)
system.addForce(barostat)
else: # Set existing barostat
self._set_barostat_pressure(barostat, pressure)
@staticmethod
def _set_barostat_pressure(barostat, pressure):
"""Set barostat pressure."""
if isinstance(pressure, unit.Quantity):
pressure = pressure.value_in_unit(unit.bar)
if isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
barostat.setDefaultPressure(openmm.Vec3(pressure, pressure, pressure)*unit.bar)
else:
barostat.setDefaultPressure(pressure*unit.bar)
@staticmethod
def _set_barostat_pressure_in_context(barostat, pressure, context):
"""Set barostat pressure."""
if isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
p = pressure.value_in_unit(unit.bar)
context.setParameter(barostat.Pressure(), openmm.Vec3(p, p, p)*unit.bar)
else:
context.setParameter(barostat.Pressure(), pressure)
@staticmethod
def _get_barostat_pressure(barostat):
"""Set barostat pressure."""
if isinstance(barostat, openmm.MonteCarloAnisotropicBarostat):
scaled = [barostat.getScaleX(), barostat.getScaleY(), barostat.getScaleZ()]
first_scaled_axis = scaled.index(True)
return barostat.getDefaultPressure()[first_scaled_axis]
else:
return barostat.getDefaultPressure()
@staticmethod
def _set_barostat_temperature(barostat, temperature):
"""Set barostat temperature."""
barostat.setDefaultTemperature(temperature)
def _set_system_surface_tension(self, system, gamma):
"""Set system surface tension"""
if gamma is not None and not system.usesPeriodicBoundaryConditions():
raise ThermodynamicsError(ThermodynamicsError.BAROSTATED_NONPERIODIC)
barostat = self._find_barostat(system)
if (gamma is None) == isinstance(barostat, openmm.MonteCarloMembraneBarostat):
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
self._set_barostat_surface_tension(barostat, gamma)
def _set_barostat_surface_tension(self, barostat, gamma):
# working around a bug in the unit conversion https://github.com/openmm/openmm/issues/2406
if isinstance(gamma, unit.Quantity):
gamma = gamma.value_in_unit(unit.bar * unit.nanometer)
if isinstance(barostat, openmm.MonteCarloMembraneBarostat):
barostat.setDefaultSurfaceTension(gamma)
elif gamma is not None:
raise ThermodynamicsError(ThermodynamicsError.SURFACE_TENSION_NOT_SUPPORTED)
def _get_barostat_surface_tension(self, barostat):
if isinstance(barostat, openmm.MonteCarloMembraneBarostat):
return barostat.getDefaultSurfaceTension()
else:
return None
@staticmethod
def _set_barostat_surface_tension_in_context(barostat, surface_tension, context):
"""Set barostat surface tension."""
# work around a unit conversion issue in openmm
if isinstance(surface_tension, unit.Quantity):
surface_tension = surface_tension.value_in_unit(unit.nanometer*unit.bar)
try:
context.getParameter(barostat.SurfaceTension())
except Exception:
raise ThermodynamicsError(ThermodynamicsError.INCOMPATIBLE_ENSEMBLE)
context.setParameter(barostat.SurfaceTension(), surface_tension)
# -------------------------------------------------------------------------
# Internal-usage: thermostat handling
# -------------------------------------------------------------------------
@classmethod
def _find_thermostat(cls, system, get_index=False):
"""Return the first thermostat in the system.
Returns
-------
force_idx : int or None, optional
The force index of the thermostat.
thermostat : OpenMM Force object or None
The thermostat in system, or None if no thermostat is found.
"""
try:
force_idx, thermostat = forces.find_forces(system, '.*Thermostat.*', only_one=True)
except forces.MultipleForcesError:
raise ThermodynamicsError(ThermodynamicsError.MULTIPLE_THERMOSTATS)
except forces.NoForceFoundError:
force_idx, thermostat = None, None
if get_index:
return force_idx, thermostat
return thermostat
@classmethod
def _remove_thermostat(cls, system):
"""Remove the system thermostat."""
thermostat_idx, thermostat = cls._find_thermostat(system, get_index=True)
if thermostat_idx is not None:
system.removeForce(thermostat_idx)
@classmethod
def _set_system_temperature(cls, system, temperature):
"""Configure thermostat and barostat to the given temperature.
The thermostat temperature is set, or a new AndersenThermostat
is added if it doesn't exist.
Parameters
----------
system : openmm.System
The system to modify.
temperature : openmm.unit.Quantity
The temperature for the thermostat.
"""
thermostat = cls._find_thermostat(system)
if thermostat is None:
thermostat = openmm.AndersenThermostat(temperature, 1.0/unit.picosecond)
system.addForce(thermostat)
else:
thermostat.setDefaultTemperature(temperature)
barostat = cls._find_barostat(system)
if barostat is not None:
cls._set_barostat_temperature(barostat, temperature)
# -------------------------------------------------------------------------
# Internal-usage: initialization
# -------------------------------------------------------------------------
@staticmethod
def _compute_reduced_potential(potential_energy, temperature, volume, pressure, area_xy=None, surface_tension=None):
"""Convert potential energy into reduced potential."""
beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * temperature)
reduced_potential = potential_energy / unit.AVOGADRO_CONSTANT_NA
if pressure is not None:
reduced_potential += pressure * volume
if area_xy is not None and surface_tension is not None:
reduced_potential -= surface_tension * area_xy
return beta * reduced_potential
def _find_force_groups_to_update(self, context, thermodynamic_state, memo):
"""Find the force groups to be recomputed when moving to the given state.
With the current implementation of ThermodynamicState, no force group has
to be recomputed as only temperature and pressure change between compatible
states, but this method becomes essential in CompoundThermodynamicState.
"""
return set()
# =============================================================================
# SAMPLER STATE
# =============================================================================
class SamplerState(object):
"""State carrying the configurational properties of a system.
Represent the portion of the state of a Context that changes with
integration. When initialized through the normal constructor, the
object is only partially defined as the energy attributes are None
until the SamplerState is updated with update_from_context. The
state can still be applied to a newly created context to set its
positions, velocities and box vectors. To initialize all attributes,
use the alternative constructor from_context.
Parameters
----------
positions : Nx3 openmm.unit.Quantity
Position vectors for N particles (length units).
velocities : Nx3 openmm.unit.Quantity, optional
Velocity vectors for N particles (velocity units).
box_vectors : 3x3 openmm.unit.Quantity
Current box vectors (length units).
Attributes
----------
positions
velocities
box_vectors : 3x3 openmm.unit.Quantity.
Current box vectors (length units).
potential_energy
kinetic_energy
total_energy
volume
n_particles
collective_variables
Examples
--------
>>> from openmmtools import testsystems
>>> toluene_test = testsystems.TolueneVacuum()
>>> sampler_state = SamplerState(toluene_test.positions)
At this point only the positions are defined
>>> sampler_state.velocities is None
True
>>> sampler_state.total_energy is None
True
but it can still be used to set up a context
>>> temperature = 300.0*unit.kelvin
>>> thermodynamic_state = ThermodynamicState(toluene_test.system, temperature)
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = thermodynamic_state.create_context(integrator)
>>> sampler_state.apply_to_context(context) # Set initial positions.
A SamplerState cannot be updated by an incompatible context
which here is defined as having the same number of particles
>>> hostguest_test = testsystems.HostGuestVacuum()
>>> incompatible_state = ThermodynamicState(hostguest_test.system, temperature)
>>> integrator2 = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> incompatible_context = incompatible_state.create_context(integrator2)
>>> incompatible_context.setPositions(hostguest_test.positions)
>>> sampler_state.is_context_compatible(incompatible_context)
False
>>> sampler_state.update_from_context(incompatible_context)
Traceback (most recent call last):
...
openmmtools.states.SamplerStateError: Specified positions with inconsistent number of particles.
Create a new SamplerState instead
>>> sampler_state2 = SamplerState.from_context(context)
>>> sampler_state2.potential_energy is not None
True
It is possible to slice a sampler state to obtain positions and
particles of a subset of atoms
>>> sliced_sampler_state = sampler_state[:10]
>>> sliced_sampler_state.n_particles
10
"""
# -------------------------------------------------------------------------
# Public interface
# -------------------------------------------------------------------------
def __init__(self, positions, velocities=None, box_vectors=None):
# Allocate variables, they get set in _initialize
self._positions = None
self._velocities = None
self._box_vectors = None
self._collective_variables = None
self._kinetic_energy = None
self._potential_energy = None
args = []
for input in [positions, velocities, box_vectors]:
if isinstance(input, unit.Quantity) and not isinstance(input._value, np.ndarray):
args.append(np.array(input/input.unit)*input.unit)
else:
args.append(copy.deepcopy(input))
self._initialize(*args)
@classmethod
def from_context(cls, context_state, ignore_collective_variables=False):
"""Alternative constructor.
Read all the configurational properties from a Context object or
an OpenMM State object. This guarantees that all attributes
(including energy attributes) are initialized.
Parameters
----------
context_state : openmm.Context or openmm.State
The object to read. If a State object, it must contain information
about positions, velocities and energy.
ignore_collective_variables : bool, optional
If True, the collective variables are not updated from the
Context, and will be invalidated. If a State is passed in,
this raises an error if False, otherwise, it would be ambiguous
between a State tied to a System with collective variables, and one without.
Returns
-------
sampler_state : SamplerState
A new SamplerState object.
"""
sampler_state = cls([])
sampler_state._read_context_state(context_state, check_consistency=False,
ignore_positions=False,
ignore_velocities=False,
ignore_collective_variables=ignore_collective_variables)
return sampler_state
@property
def positions(self):
"""Particle positions.
An Nx3 openmm.unit.Quantity object, where N is the number of
particles.
Raises
------
SamplerStateError
If set to an array with a number of particles different
than n_particles.
"""
return self._positions
@positions.setter
def positions(self, value):
self._set_positions(value, from_context=False, check_consistency=True)
@property
def velocities(self):
"""Particle velocities.
An Nx3 openmm.unit.Quantity object, where N is the number of
particles.
Raises
------
SamplerStateError
If set to an array with a number of particles different
than n_particles.
"""
return self._velocities
@velocities.setter
def velocities(self, value):
self._set_velocities(value, from_context=False)
@property
def box_vectors(self):
"""Box vectors.
An 3x3 openmm.unit.Quantity object.
"""
return self._box_vectors
@box_vectors.setter
def box_vectors(self, value):
# Make sure this is a Quantity. System.getDefaultPeriodicBoxVectors
# returns a list of Quantity objects instead for example.
if value is not None and not isinstance(value, unit.Quantity):
value = unit.Quantity(value)
self._box_vectors = value
# Derived properties
@property
def potential_energy(self):
"""openmm.unit.Quantity or None: Potential energy of this configuration."""
if self._are_positions_valid:
return None
return self._potential_energy
@potential_energy.setter
def potential_energy(self, new_value):
if new_value is not None:
raise AttributeError("Cannot set potential energy as it is a function of Context")
self._potential_energy = None
@property
def kinetic_energy(self):
"""openmm.unit.Quantity or None: Kinetic energy of this configuration."""
if self.velocities is None or self.velocities.has_changed:
return None
return self._kinetic_energy
@kinetic_energy.setter
def kinetic_energy(self, new_value):
if new_value is not None:
raise AttributeError("Cannot set kinetic energy as it is a function of Context")
self._kinetic_energy = None
@property
def collective_variables(self):
"""dict or None: Collective variables for this configuration if present in Context"""
if self._are_positions_valid:
return None
return self._collective_variables
@collective_variables.setter
def collective_variables(self, new_value):
if new_value is not None:
raise AttributeError("Cannot set collective variables as it is a function of Context")
self._collective_variables = new_value
@property
def total_energy(self):
"""The sum of potential and kinetic energy (read-only)."""
if self.potential_energy is None or self.kinetic_energy is None:
return None
return self.potential_energy + self.kinetic_energy
@property
def volume(self):
"""The volume of the box (read-only)"""
return _box_vectors_volume(self.box_vectors)
@property
def area_xy(self):
"""The xy-area of the box (read-only)"""
return _box_vectors_area_xy(self.box_vectors)
@property
def n_particles(self):
"""Number of particles (read-only)."""
return len(self.positions)
def is_context_compatible(self, context):
"""Check compatibility of the given context.
The context is compatible if this SamplerState can be applied
through apply_to_context.
Parameters
----------
context : openmm.Context
The context to test.
Returns
-------
is_compatible : bool
True if this SamplerState can be applied to context.
See Also
--------
SamplerState.apply_to_context
"""
is_compatible = self.n_particles == context.getSystem().getNumParticles()
return is_compatible
def update_from_context(self, context_state, ignore_positions=False, ignore_velocities=False,
ignore_collective_variables=False):
"""Read the state from the given Context or State object.
The context must be compatible. Use SamplerState.from_context
if you want to build a new sampler state from an incompatible.
Parameters
----------
context_state : openmm.Context or openmm.State
The object to read. If a State, it must contain information
on positions, velocities and energies. Collective
variables can only be updated from a Context, NOT a State
at the moment.
ignore_positions : bool, optional
If True, the positions (and potential energy) are not updated from the
Context. This can cause the SamplerState to no longer be consistent between
its variables, so the defaults err on the side of updating everything,
if possible. Only use if you know what you are doing.
ignore_velocities : bool, optional
If True, the velocities (and kinetic energy) are not updated from the
Context. This can cause the SamplerState to no longer be consistent between
its variables, so the defaults err on the side of updating everything,
if possible. Only use if you know what you are doing.
ignore_collective_variables : bool, optional
If True, the collective variables are not updated from the
Context. If a State is passed in,
this raises an error if False, otherwise, it would be ambiguous
between a State tied to a System with collective variables, and one without.
Raises
------
SamplerStateError
If the given context is not compatible, or if a State is given without
setting ignore_collective_variables
"""
self._read_context_state(context_state, check_consistency=True,
ignore_positions=ignore_positions,
ignore_velocities=ignore_velocities,
ignore_collective_variables=ignore_collective_variables)
def apply_to_context(self, context, ignore_velocities=False):
"""Set the context state.
If velocities and box vectors have not been specified in the
constructor, they are not set.
Parameters
----------
context : openmm.Context
The context to set.
ignore_velocities : bool, optional
If True, velocities are not set in the Context even if they
are defined. This can be useful if you only need to use the
Context only to compute energies.
"""
# NOTE: Box vectors MUST be updated before positions are set.
if self.box_vectors is not None:
context.setPeriodicBoxVectors(*self.box_vectors)
context.setPositions(self._unitless_positions)
if self._velocities is not None and not ignore_velocities:
context.setVelocities(self._unitless_velocities)
def has_nan(self):
"""Check that energies and positions are finite.
Returns
-------
True if the potential energy or any of the generalized coordinates
are nan.
"""
if (self.potential_energy is not None and
np.isnan(self.potential_energy.value_in_unit(self.potential_energy.unit))):
return True
if np.any(np.isnan(self._positions)):
return True
return False
def __getitem__(self, item):
sampler_state = self.__class__([])
# Handle single index.
if np.issubdtype(type(item), np.integer):
# Here we don't need to copy since we instantiate a new array.
pos_value = self._positions[item].value_in_unit(self._positions.unit)
new_positions = unit.Quantity(np.array([pos_value]), self._positions.unit)
sampler_state._set_positions(new_positions, from_context=False, check_consistency=False)
if self._velocities is not None:
vel_value = self._velocities[item].value_in_unit(self._velocities.unit)
new_velocities = unit.Quantity(np.array([vel_value]), self._velocities.unit)
sampler_state._set_velocities(new_velocities, from_context=False)
else: # Assume slice or sequence.
# Copy original values to avoid side effects.
sampler_state._set_positions(copy.deepcopy(self._positions[item]),
from_context=False, check_consistency=False)
if self._velocities is not None:
sampler_state._set_velocities(copy.deepcopy(self._velocities[item].copy()),
from_context=False)
# Copy box vectors.
sampler_state.box_vectors = copy.deepcopy(self.box_vectors)
# Energies/CV's for only a subset of atoms is undefined.
sampler_state._potential_energy = None
sampler_state._kinetic_energy = None
sampler_state._collective_variables = None
return sampler_state
def __getstate__(self, ignore_velocities=False):
"""Return a dictionary representation of the state.
Parameters
----------
ignore_velocities : bool, optional
If True, velocities are not serialized. This can be useful for
example to save bandwidth when sending a ``SamplerState`` over
the network and velocities are not required (default is False).
"""
velocities = None if ignore_velocities else self.velocities
serialization = dict(
positions=self.positions, velocities=velocities,
box_vectors=self.box_vectors, potential_energy=self.potential_energy,
kinetic_energy=self.kinetic_energy,
collective_variables=self.collective_variables
)
return serialization
def __setstate__(self, serialization, ignore_velocities=False):
"""Set the state from a dictionary representation.
Parameters
----------
ignore_velocities : bool, optional
If True and the ``SamplerState`` has already velocities
defined, this does not overwrite the velocities.
"""
if ignore_velocities and '_velocities' in self.__dict__:
serialization['velocities'] = self.velocities
self._initialize(**serialization)
# -------------------------------------------------------------------------
# Internal-usage
# -------------------------------------------------------------------------
def _initialize(self, positions, velocities, box_vectors,
potential_energy=None, kinetic_energy=None, collective_variables=None):
"""Initialize the sampler state."""
self._set_positions(positions, from_context=False, check_consistency=False)
self.velocities = velocities # Checks consistency and units.
self.box_vectors = box_vectors # Make sure box vectors is Quantity.
self._potential_energy = potential_energy
self._kinetic_energy = kinetic_energy
self._collective_variables = collective_variables
def _set_positions(self, new_positions, from_context, check_consistency):
"""Set the positions without checking for consistency."""
if check_consistency and (new_positions is None or len(new_positions) != self.n_particles):
raise SamplerStateError(SamplerStateError.INCONSISTENT_POSITIONS)
if from_context:
self._unitless_positions_cache = new_positions._value
assert new_positions.unit == unit.nanometer
else:
self._unitless_positions_cache = None
self._positions = utils.TrackedQuantity(new_positions)
# The potential energy changes with different positions.
self._potential_energy = None
# The CVs change with different positions too
self._collective_variables = None
def _set_velocities(self, new_velocities, from_context):
"""Set the velocities."""
if from_context:
self._unitless_velocities_cache = new_velocities._value
assert new_velocities.unit == unit.nanometer/unit.picoseconds
else:
if new_velocities is not None and self.n_particles != len(new_velocities):
raise SamplerStateError(SamplerStateError.INCONSISTENT_VELOCITIES)
self._unitless_velocities_cache = None
if new_velocities is not None:
new_velocities = utils.TrackedQuantity(new_velocities)
self._velocities = new_velocities
# The kinetic energy changes with different positions.
self._kinetic_energy = None
@property
def _unitless_positions(self):
"""Keeps a cache of unitless positions."""
if self._unitless_positions_cache is None or self._positions.has_changed:
self._unitless_positions_cache = self.positions.value_in_unit_system(unit.md_unit_system)
if self._positions.has_changed:
self._positions.has_changed = False
self._potential_energy = None
return self._unitless_positions_cache
@property
def _unitless_velocities(self):
"""Keeps a cache of unitless velocities."""
if self._velocities is None:
return None
if self._unitless_velocities_cache is None or self._velocities.has_changed:
self._unitless_velocities_cache = self._velocities.value_in_unit_system(unit.md_unit_system)
if self._velocities.has_changed:
self._velocities.has_changed = False
self._kinetic_energy = None
return self._unitless_velocities_cache
def _read_context_state(self, context_state, check_consistency,
ignore_positions,
ignore_velocities,
ignore_collective_variables):
"""Read the Context state.
Parameters
----------
context_state : openmm.Context or openmm.State
The object to read.
check_consistency : bool
If True, raise an error if the context system have a
different number of particles than the current state.
ignore_positions : bool
If True, the positions and potential energy are not updated from the
Context.
ignore_velocities : bool
If True, the velocities and kinetic energy are not updated from the
Context.
ignore_collective_variables : bool
If True, the collective variables are not updated from the
Context. If a State is passed in,
this raises an error if False, otherwise, it would be ambiguous
between a State tied to a System with collective variables, and one without.
Raises
------
SamplerStateError
If the the context system have a different number of
particles than the current state.
"""
if isinstance(context_state, openmm.Context):
system = context_state.getSystem()
openmm_state = context_state.getState(getPositions=not ignore_positions,
getVelocities=not ignore_velocities,
getEnergy=not (ignore_velocities and ignore_positions),
enforcePeriodicBox=system.usesPeriodicBoundaryConditions())
else:
if not ignore_collective_variables:
raise SamplerStateError("State objects must have ignore_collective_variables=True because they "
"don't track CV's and would be ambiguous between a System with no "
"collective variables.")
openmm_state = context_state
# We assign positions first, since the velocities
# property will check its length for consistency.
# Potential energy and kinetic energy must be updated
# after positions and velocities or they'll be reset.
if not ignore_positions:
positions = openmm_state.getPositions(asNumpy=True)
self._set_positions(positions, from_context=True, check_consistency=check_consistency)
self._potential_energy = openmm_state.getPotentialEnergy()
if not ignore_velocities:
velocities = openmm_state.getVelocities(asNumpy=True)
self._set_velocities(velocities, from_context=True)
self._kinetic_energy = openmm_state.getKineticEnergy()
self.box_vectors = openmm_state.getPeriodicBoxVectors(asNumpy=True)
if not ignore_collective_variables:
self._read_collective_variables(context_state)
def _read_collective_variables(self, context_state):
"""
Update the collective variables from the context object
Parameters
----------
context_state : openmm.Context
The object to read. This only works with Context's for now,
but in the future, this may support OpenMM State objects as well.
"""
# Allows direct key assignment without initializing each key:dict pair
collective_variables = collections.defaultdict(dict)
system = context_state.getSystem()
for force_index, force in enumerate(system.getForces()):
try:
cv_values = force.getCollectiveVariableValues(context_state)
for cv_index in range(force.getNumCollectiveVariables()):
cv_name = force.getCollectiveVariableName(cv_index)
collective_variables[cv_name][force_index] = cv_values[cv_index]
except AttributeError:
pass
# Trap no variables found (empty dict), return None
# Cast defaultdict back to dict
self._collective_variables = dict(collective_variables) if collective_variables else None
@property
def _are_positions_valid(self):
"""Helper function to reduce this check duplication in multiple properties"""
return self.positions is None or self.positions.has_changed
# =============================================================================
# COMPOUND THERMODYNAMIC STATE
# =============================================================================
class ComposableStateError(Exception):
"""Error raised by a ComposableState."""
pass
class IComposableState(utils.SubhookedABCMeta):
"""A state composable through CompoundThermodynamicState.
Define the interface that needs to be implemented to extend a
ThermodynamicState through CompoundThermodynamicState.
See Also
--------
CompoundThermodynamicState
"""
@abc.abstractmethod
def apply_to_system(self, system):
"""Set the system to be in this state.
This method is called in three situations:
1) On initialization, before standardizing the system.
2) When a new system is set and the argument ``fix_state`` is
set to ``True``.
3) When the system is retrieved to convert the standard system
into a system in the correct thermodynamic state for the
simulation.
Parameters
----------
system : openmm.System
The system to modify.
Raises
------
ComposableStateError
If the system is not compatible with the state.
"""
pass
@abc.abstractmethod
def check_system_consistency(self, system):
"""Check if the system is in this state.
It raises a ComposableStateError if the system is not in
this state. This is called when the ThermodynamicState's
system is set with the ``fix_state`` argument set to False.
Parameters
----------
system : openmm.System
The system to test.
Raises
------
ComposableStateError
If the system is not consistent with this state.
"""
pass
@abc.abstractmethod
def apply_to_context(self, context):
"""Set the context to be in this state.
Parameters
----------
context : openmm.Context
The context to set.
Raises
------
ComposableStateError
If the context is not compatible with the state.
"""
pass
@abc.abstractmethod
def _standardize_system(self, system):
"""Standardize the given system.
ThermodynamicState relies on this method to create a standard
system that defines compatibility with another state or context.
The definition of a standard system is tied to the implementation
of apply_to_context. For example, if apply_to_context sets a
global parameter of the context, _standardize_system should
set the default value of the parameter in the system to a
standard value.
Parameters
----------
system : openmm.System
The system to standardize.
Raises
------
ComposableStateError
If the system is not compatible with the state.
"""
pass
@abc.abstractmethod
def _on_setattr(self, standard_system, attribute_name, old_composable_state):
"""Check if standard system needs to be updated after a state attribute is set.
This callback function is called after an attribute is set (i.e.
after __setattr__ is called on this state) or if an attribute whose
name starts with "set_" is requested (i.e. if a setter is retrieved
from this state through __getattr__).
Parameters
----------
standard_system : openmm.System
The standard system before setting the attribute.
attribute_name : str
The name of the attribute that has just been set or retrieved.
old_composable_state : IComposableState
A copy of the composable state before the attribute was set.
Returns
-------
need_changes : bool
True if the standard system has to be updated, False if no change
occurred.
Raises
------
ComposableStateError
If the attribute change put the system in an inconsistent state.
"""
pass
@abc.abstractmethod
def _find_force_groups_to_update(self, context, current_context_state, memo):
"""Find the force groups whose energy must be recomputed after applying self.
This is used to compute efficiently the potential energy of the
same configuration in multiple thermodynamic states to minimize
the number of force evaluations.
Parameters
----------
context : Context
The context, currently in `current_context_state`, that will
be moved to this state.
current_context_state : ThermodynamicState
The full thermodynamic state of the given context. This is
guaranteed to be compatible with self.
memo : dict
A dictionary that can be used by the state for memoization
to speed up consecutive calls on the same context.
Returns
-------
force_groups_to_update : set of int
The indices of the force groups whose energy must be computed
again after applying this state, assuming the context to be in
`current_context_state`.
"""
pass
class CompoundThermodynamicState(ThermodynamicState):
"""Thermodynamic state composed by multiple states.
Allows to extend a ThermodynamicState through composition rather
than inheritance.
The class dynamically inherits from the ThermodynamicState object
given in the constructor, and it preserves direct access to all its
methods and attributes. It is compatible also with subclasses of
ThermodynamicState, but it does not support objects which make use
of __slots__.
It is the user's responsibility to check that IComposableStates are
compatible to each other (i.e. that they do not depend on and/or
modify the same properties of the system). If this is not the case,
consider merging them into a single IComposableStates. If an
IComposableState needs to access properties of ThermodynamicState
(e.g. temperature, pressure) consider extending it through normal
inheritance.
It is not necessary to explicitly inherit from IComposableState for
compatibility as long as all abstract methods are implemented. All
its attributes and methods will still be directly accessible unless
they are masked by the main ThermodynamicState or by a IComposableState
that appeared before in the constructor argument composable_states.
After construction, changing the original thermodynamic_state or
any of the composable_states changes the state of the compound state.
Parameters
----------
thermodynamic_state : ThermodynamicState
The main ThermodynamicState which holds the OpenMM system.
composable_states : list of IComposableState
Each element represent a portion of the overall thermodynamic
state.
Examples
--------
Create an alchemically modified system.
>>> from openmmtools import testsystems, alchemy
>>> factory = alchemy.AbsoluteAlchemicalFactory(consistent_exceptions=False)
>>> alanine_vacuum = testsystems.AlanineDipeptideVacuum().system
>>> alchemical_region = alchemy.AlchemicalRegion(alchemical_atoms=range(22))
>>> alanine_alchemical_system = factory.create_alchemical_system(reference_system=alanine_vacuum,
... alchemical_regions=alchemical_region)
>>> alchemical_state = alchemy.AlchemicalState.from_system(alanine_alchemical_system)
AlchemicalState implement the IComposableState interface, so it can be
used with CompoundThermodynamicState. All the alchemical parameters are
accessible through the compound state.
>>> import openmm
>>> from openmm import unit
>>> thermodynamic_state = ThermodynamicState(system=alanine_alchemical_system,
... temperature=300*unit.kelvin)
>>> compound_state = CompoundThermodynamicState(thermodynamic_state=thermodynamic_state,
... composable_states=[alchemical_state])
>>> compound_state.lambda_sterics
1.0
>>> compound_state.lambda_electrostatics
1.0
You can control the parameters in the OpenMM Context in this state by
setting the state attributes.
>>> compound_state.lambda_sterics = 0.5
>>> integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
>>> context = compound_state.create_context(integrator)
>>> context.getParameter('lambda_sterics')
0.5
>>> compound_state.lambda_sterics = 1.0
>>> compound_state.apply_to_context(context)
>>> context.getParameter('lambda_sterics')
1.0
"""
def __init__(self, thermodynamic_state, composable_states):
# Check that composable states expose the correct interface.
for composable_state in composable_states:
assert isinstance(composable_state, IComposableState)
# Copy internal attributes of thermodynamic state.
thermodynamic_state = copy.deepcopy(thermodynamic_state)
self.__dict__ = thermodynamic_state.__dict__
# Setting self._composable_states signals __setattr__ to start
# searching in composable states as well, so this must be the
# last new attribute set in the constructor.
composable_states = copy.deepcopy(composable_states)
self._composable_states = composable_states
# This call causes the thermodynamic state standard system
# to be standardized also w.r.t. all the composable states.
self.set_system(self._standard_system, fix_state=True)
def get_system(self, **kwargs):
"""Manipulate and return the system.
With default arguments, this is equivalent as the system property.
By setting the arguments it is possible to obtain a modified copy
of the system without the thermostat or the barostat.
Parameters
----------
remove_thermostat : bool
If True, the system thermostat is removed.
remove_barostat : bool
If True, the system barostat is removed.
Returns
-------
system : openmm.System
The system of this ThermodynamicState.
"""
system = super(CompoundThermodynamicState, self).get_system(**kwargs)
# The system returned by ThermodynamicState has standard parameters,
# so we need to set them to the actual value of the composable states.
for s in self._composable_states:
s.apply_to_system(system)
return system
def set_system(self, system, fix_state=False):
"""Allow to set the system and fix its thermodynamic state.
With default arguments, this is equivalent to assign the
system property, which raise an error if the system is in
a different thermodynamic state.
Parameters
----------
system : openmm.System
The system to set.
fix_state : bool, optional
The thermodynamic state of the state will be fixed by
all the composable states. Default is False.
See Also
--------
ThermodynamicState.set_system
"""
system = copy.deepcopy(system)
for s in self._composable_states:
if fix_state:
s.apply_to_system(system)
else:
s.check_system_consistency(system)
super(CompoundThermodynamicState, self)._unsafe_set_system(system, fix_state)
def is_context_compatible(self, context):
"""Check compatibility of the given context.
Parameters
----------
context : openmm.Context
The OpenMM context to test.
Returns
-------
is_compatible : bool
True if this ThermodynamicState can be applied to context.
See Also
--------
ThermodynamicState.is_context_compatible
"""
# We override ThermodynamicState.is_context_compatible to
# handle the case in which one of the composable states
# raises ComposableStateError when standardizing the context system.
try:
return super(CompoundThermodynamicState, self).is_context_compatible(context)
except ComposableStateError:
return False
def apply_to_context(self, context):
"""Apply this compound thermodynamic state to the context.
See Also
--------
ThermodynamicState.apply_to_context
"""
super(CompoundThermodynamicState, self).apply_to_context(context)
for s in self._composable_states:
s.apply_to_context(context)
def __getattr__(self, name):
def setter_decorator(funcs, composable_states):
def _setter_decorator(*args, **kwargs):
for func, composable_state in zip(funcs, composable_states):
old_state = copy.deepcopy(composable_state)
func(*args, **kwargs)
self._on_setattr_callback(composable_state, name, old_state)
return _setter_decorator
# Called only if the attribute couldn't be found in __dict__.
# In this case we fall back to composable state, in the given order.
attrs = []
composable_states = []
for s in self._composable_states:
try:
attr = getattr(s, name)
except AttributeError:
pass
else:
attrs.append(attr)
composable_states.append(s)
if len(attrs) > 0:
# If this is a setter, we need to set the attribute in all states
# and ensure that the callback is called in each of them.
if name.startswith('set_'):
# Decorate the setter so that _on_setattr is called after the
# attribute is modified. This also reduces the calls to multiple
# setter to a single function.
attr = setter_decorator(attrs, composable_states)
else:
if len(attrs) > 1 and not all(np.isclose(attrs[0], a) for a in attrs[1:]):
raise RuntimeError('The composable states of {} expose the same '
'attribute with different values: {}'.format(
self.__class__.__name__, set(attrs)))
attr = attrs[0]
return attr
# Attribute not found, fall back to normal behavior.
return super(CompoundThermodynamicState, self).__getattribute__(name)
def __setattr__(self, name, value):
# Add new attribute to CompoundThermodynamicState.
if '_composable_states' not in self.__dict__:
super(CompoundThermodynamicState, self).__setattr__(name, value)
# Update existing ThermodynamicState attribute (check ancestors).
# We can't use hasattr here because it calls __getattr__, which
# search in all composable states as well. This means that this
# will catch only properties and methods.
elif any(name in C.__dict__ for C in self.__class__.__mro__):
super(CompoundThermodynamicState, self).__setattr__(name, value)
# Update composable states attributes. This catches also normal
# attributes besides properties and methods.
else:
old_state = None
for s in self._composable_states:
try:
getattr(s, name)
except AttributeError:
pass
else:
old_state = copy.deepcopy(s)
s.__setattr__(name, value)
self._on_setattr_callback(s, name, old_state)
# No attribute found. This is monkey patching.
if old_state is None:
super(CompoundThermodynamicState, self).__setattr__(name, value)
def __getstate__(self, **kwargs):
"""Return a dictionary representation of the state."""
# Create original ThermodynamicState to serialize.
thermodynamic_state = object.__new__(self.__class__.__bases__[0])
thermodynamic_state.__dict__ = self.__dict__
# Set the instance _standardize_system method to CompoundState._standardize_system
# so that the composable states standardization will be called during serialization.
thermodynamic_state._standardize_system = self._standardize_system
serialized_thermodynamic_state = utils.serialize(thermodynamic_state, **kwargs)
# Serialize composable states.
serialized_composable_states = [utils.serialize(state)
for state in self._composable_states]
return dict(thermodynamic_state=serialized_thermodynamic_state,
composable_states=serialized_composable_states)
def __setstate__(self, serialization):
"""Set the state from a dictionary representation."""
serialized_thermodynamic_state = serialization['thermodynamic_state']
serialized_composable_states = serialization['composable_states']
thermodynamic_state = utils.deserialize(serialized_thermodynamic_state)
self.__dict__ = thermodynamic_state.__dict__
self._composable_states = [utils.deserialize(state)
for state in serialized_composable_states]
# -------------------------------------------------------------------------
# Internal-usage
# -------------------------------------------------------------------------
def _standardize_system(self, system):
"""Standardize the system.
Override ThermodynamicState._standardize_system to standardize
the system also with respect to all other composable states.
Raises
------
ComposableStateError
If it is impossible to standardize the system.
See Also
--------
ThermodynamicState._standardize_system
"""
super(CompoundThermodynamicState, self)._standardize_system(system)
for composable_state in self._composable_states:
composable_state._standardize_system(system)
def _on_setattr_callback(self, composable_state, attribute_name, old_composable_state):
"""Updates the standard system (and hash) after __setattr__."""
try:
change_standard_system = composable_state._on_setattr(self._standard_system, attribute_name, old_composable_state)
except TypeError:
change_standard_system = composable_state._on_setattr(self._standard_system, attribute_name)
# TODO Drop support for the old signature and remove deprecation warning from 0.17 on.
import warnings
old_signature = '_on_setattr(self, standard_system, attribute_name)'
new_signature = old_signature[:-1] + ', old_composable_state)'
warnings.warn('The signature IComposableState.{} has been deprecated, '
'and future versions of openmmtools will support only the '
'new one: {}.'.format(old_signature, new_signature))
if change_standard_system:
new_standard_system = copy.deepcopy(self._standard_system)
composable_state.apply_to_system(new_standard_system)
composable_state._standardize_system(new_standard_system)
self._update_standard_system(new_standard_system)
def _apply_to_context_in_state(self, context, thermodynamic_state):
super(CompoundThermodynamicState, self)._apply_to_context_in_state(context, thermodynamic_state)
for s in self._composable_states:
s.apply_to_context(context)
def _find_force_groups_to_update(self, context, current_context_state, memo):
"""Find the force groups to be recomputed when moving to the given state.
Override ThermodynamicState._find_force_groups_to_update to find
groups to update for changes of composable states.
"""
# Initialize memo: create new cache for each composable state.
if len(memo) == 0:
memo.update({i: {} for i in range(len(self._composable_states))})
# Find force group to update for parent class.
force_groups = super(CompoundThermodynamicState, self)._find_force_groups_to_update(
context, current_context_state, memo)
# Find force group to update for composable states.
for composable_state_idx, composable_state in enumerate(self._composable_states):
force_groups.update(composable_state._find_force_groups_to_update(
context, current_context_state, memo[composable_state_idx]))
return force_groups
# =============================================================================
# GLOBAL PARAMETER STATE
# =============================================================================
class GlobalParameterError(ComposableStateError):
"""Exception raised by ``GlobalParameterState``."""
pass
class GlobalParameterFunction(object):
"""A function of global parameters.
All the functions supported by ``openmmtools.utils.math_eval``
are supported.
Parameters
----------
expression : str
A mathematical expression involving global parameters.
See Also
--------
openmmtools.utils.math_eval
Examples
--------
>>> class MyComposableState(GlobalParameterState):
... gamma = GlobalParameterState.GlobalParameter('gamma', standard_value=1.0)
... lambda_angles = GlobalParameterState.GlobalParameter('lambda_angles', standard_value=1.0)
...
>>> composable_state = MyComposableState(gamma=1.0, lambda_angles=0.5)
>>> composable_state.set_function_variable('lambda', 0.5)
>>> composable_state.set_function_variable('lambda2', 1.0)
>>> composable_state.gamma = GlobalParameterFunction('lambda**2')
>>> composable_state.gamma
0.25
>>> composable_state.lambda_angles = GlobalParameterFunction('(lambda + lambda2) / 2')
>>> composable_state.lambda_angles
0.75
>>> composable_state.set_function_variable('lambda2', 0.5)
>>> composable_state.lambda_angles
0.5
"""
def __init__(self, expression):
self._expression = expression
def __call__(self, variables):
return utils.math_eval(self._expression, variables)
class GlobalParameterState(object):
"""A composable state controlling one or more OpenMM ``Force``'s global parameters.
This is a partially abstract class that provides facilities to implement
composable states that control one or more global parameters defined in
OpenMM ``Force`` objects. Global parameters are implemented through the
use of the ``GlobalParameterState.GlobalParameter`` descriptor.
A ``Force`` object can use one or more global parameters that are
controlled by the same state. Conversely, multiple ``Force``s can use
the same global parameter (i.e. with the same name) controlled by the
state object.
It is possible to enslave the global parameters to one or more arbitrary
variables entering a mathematical expression through the use of
``GlobalParameterFunction``. Global parameters that are associated to a
global parameter function are validated on get rather than on set.
Parameters
----------
parameters_name_suffix : str, optional
If specified, the state will control a modified version of the global
parameters with the name ``parameter_name + '_' + parameters_name_suffix``.
When this is the case, the normal parameters are not accessible.
**kwargs
The value of the parameters controlled by this state. Parameters
that are not passed here are left undefined.
Notes
-----
The class automatically implement the static constructor ``from_system``
that reads and create a state object from an OpenMM ``System``. The function
calls ``__init__`` and passes the parameter name suffix string as the
first positional argument, so it is possible to overwrite ``__init__``
and rename ``parameters_name_suffix`` as long as it is the first parameter
of the constructor.
See Also
--------
GlobalParameterFunction
Examples
--------
Assume we have a ``System`` with a custom force object whose energy
function is controlled by two global variables called ``lambda_bonds``
and ``gamma``.
>>> import openmm
>>> from openmm import unit
>>> # Define a diatomic molecule.
>>> system = openmm.System()
>>> particle_idx = system.addParticle(40.0*unit.amu)
>>> particle_idx = system.addParticle(40.0*unit.amu)
>>> custom_force = openmm.CustomBondForce('lambda_bonds^gamma*60000*(r-0.15)^2;')
>>> parameter_idx = custom_force.addGlobalParameter('lambda_bonds', 1.0) # Default value is 1.0.
>>> parameter_idx = custom_force.addGlobalParameter('gamma', 1.0) # Default value is 1.0.
>>> bond_idx = custom_force.addBond(0, 1, [])
>>> force_index = system.addForce(custom_force)
>>> # Create a thermodynamic state object controlling the temperature of the system.
>>> thermodynamic_state = ThermodynamicState(system, temperature=300.0*unit.kelvin)
Define a composable state controlling the two global parameters ``gamma``
and ``lambda_bonds``, both with standard state value 0.0. Parameters that
are not specified in the constructor are left undefined.
>>> class MyComposableState(GlobalParameterState):
... gamma = GlobalParameterState.GlobalParameter('gamma', standard_value=1.0)
... lambda_bonds = GlobalParameterState.GlobalParameter('lambda_bonds', standard_value=1.0)
...
>>> my_composable_state = MyComposableState(gamma=1.0)
>>> my_composable_state.gamma
1.0
>>> my_composable_state.lambda_bonds is None
True
There is a second static constructor you can use to read the state
of an OpenMM ``System`` from the default values of its force parameters.
>>> my_composable_state = MyComposableState.from_system(system)
>>> my_composable_state.lambda_bonds
1.0
>>> my_composable_state.gamma
1.0
Optionally, you can limit the values that a global parameter can assume.
In this case, ``lambda_bonds`` is forced to be between 0.0 and 1.0.
>>> class MyComposableState(GlobalParameterState):
... gamma = GlobalParameterState.GlobalParameter('gamma', standard_value=0.0)
... lambda_bonds = GlobalParameterState.GlobalParameter('lambda_bonds', standard_value=0.0)
... @lambda_bonds.validator
... def lambda_bonds(self, instance, new_value):
... if new_value is not None and not (0.0 <= new_value <= 1.0):
... raise ValueError('lambda_bonds must be between 0.0 and 1.0')
... return new_value
...
>>> my_composable_state = MyComposableState(gamma=1.0)
>>> my_composable_state.lambda_bonds = 2.0
Traceback (most recent call last):
...
ValueError: lambda_bonds must be between 0.0 and 1.0
You can then add it to a ``CompoundThermodynamicState`` to manipulate
OpenMM ``System`` and ``Context`` objects.
>>> my_composable_state.lambda_bonds = 1.0
>>> compound_state = CompoundThermodynamicState(thermodynamic_state, composable_states=[my_composable_state])
>>> state_system = compound_state.get_system()
>>> state_system.getForce(0).getGlobalParameterDefaultValue(0) # lambda_bonds global parameter.
1.0
>>> compound_state.lambda_bonds = 0.0
>>> state_system = compound_state.get_system()
>>> state_system.getForce(0).getGlobalParameterDefaultValue(0) # lambda_bonds global parameter.
0.0
>>> context = compound_state.create_context(openmm.VerletIntegrator(1.0*unit.femtoseconds))
>>> context.getParameter('lambda_bonds')
0.0
>>> compound_state.lambda_bonds = 1.0
>>> compound_state.apply_to_context(context)
>>> context.getParameter('lambda_bonds')
1.0
You can express enslave global parameters to a mathematical expression
involving arbitrary variables.
>>> compound_state.set_function_variable('lambda', 1.0)
>>> compound_state.lambda_bonds = GlobalParameterFunction('2*(lambda - 0.5) * step(lambda - 0.5)')
>>> for l in [0.5, 0.75, 1.0]:
... compound_state.set_function_variable('lambda', l)
... print(compound_state.lambda_bonds)
0.0
0.5
1.0
If you need to control similar forces with the same state object,
this parent class provides a suffix mechanism to control different
global variables with the same state object. This allows to reuse
the same logic to control multiple objects
>>> # Add a second custom force using similar global parameters.
>>> custom_force = openmm.CustomBondForce('lambda_bonds_mysuffix*20000*(r-0.15)^2;')
>>> parameter_idx = custom_force.addGlobalParameter('lambda_bonds_mysuffix', 1.0) # Default value is 1.0.
>>> bond_idx = custom_force.addBond(0, 1, [])
>>> force_idx = system.addForce(custom_force)
>>> # Create a state controlling the modified global parameter.
>>> my_composable_state = MyComposableState(parameters_name_suffix='mysuffix', lambda_bonds=0.0)
>>> my_composable_state.lambda_bonds_mysuffix = 1.0
>>> my_composable_state.gamma_mysuffix is None
True
>>> my_composable_state.apply_to_system(system)
>>> # The unmodified parameter becomes unaccessible.
>>> my_composable_state.apply_to_system(system)
>>> my_composable_state.lambda_bonds
Traceback (most recent call last):
...
AttributeError: This state does not control lambda_bonds but lambda_bonds_mysuffix.
Note also in the example above that the forces don't need to define
all the global parameters controlled by the state. The state object
will perform some check to ensure that you won't try to set an undefined
parameter.
>>> my_composable_state.gamma_mysuffix = 2
>>> my_composable_state.apply_to_system(system)
Traceback (most recent call last):
...
openmmtools.states.GlobalParameterError: Could not find global parameter gamma_mysuffix in the system.
"""
# This constant can be overwritten by inheriting classes to
# raise a custom exception class when an error is encountered.
_GLOBAL_PARAMETER_ERROR = GlobalParameterError
def __init__(self, parameters_name_suffix=None, **kwargs):
self._initialize(parameters_name_suffix=parameters_name_suffix, **kwargs)
@classmethod
def from_system(cls, system, parameters_name_suffix=None):
"""Static constructor reading the state from an OpenMM System.
Parameters
----------
system : openmm.System
An OpenMM ``System`` object defining a non-empty subset
of global parameters controlled by this state.
parameters_name_suffix : str, optional
If specified, the state will search for a modified
version of the global parameters with the name
``parameter_name + '_' + parameters_name_suffix``.
Returns
-------
The GlobalParameterState object representing the state of the system.
Raises
------
GlobalParameterStateError
If the same parameter has different values in the system, or
if the system has no lambda parameters.
"""
state_parameters = {}
for force, parameter_name, parameter_id in cls._get_system_controlled_parameters(
system, parameters_name_suffix):
if parameter_id >= force.getNumGlobalParameters():
raise GlobalParameterStateError(f'Attempted to access system parameter {parameter_name} (id {parameter_id}) that does not exist in {force.__class__.__name__}')
parameter_value = force.getGlobalParameterDefaultValue(parameter_id)
# Check that we haven't already found
# the parameter with a different value.
if parameter_name in state_parameters:
if state_parameters[parameter_name] != parameter_value:
err_msg = ('Parameter {} has been found twice (Force {}) with two values: '
'{} and {}').format(parameter_name, force.__class__.__name__,
parameter_value, state_parameters[parameter_name])
raise cls._GLOBAL_PARAMETER_ERROR(err_msg)
else:
state_parameters[parameter_name] = parameter_value
# Check that the system can be controlled by this state..
if len(state_parameters) == 0:
err_msg = 'System has no global parameters controlled by this state.'
raise cls._GLOBAL_PARAMETER_ERROR(err_msg)
# Create and return the GlobalParameterState. The constructor of
# GlobalParameterState takes the parameters without the suffix so
# we left them undefined in the constructor and assign the attributes.
state = cls(parameters_name_suffix)
for parameter_name, parameter_value in state_parameters.items():
setattr(state, parameter_name, parameter_value)
return state
# -------------------------------------------------------------------------
# Function variables
# -------------------------------------------------------------------------
def get_function_variable(self, variable_name):
"""Return the value of the function variable.
Function variables are _not_ global parameters but rather variables
entering mathematical expressions specified with ``GlobalParameterFunction``,
which can be use to enslave global parameter to arbitrary variables.
Parameters
----------
variable_name : str
The name of the function variable.
Returns
-------
variable_value : float
The value of the function variable.
"""
try:
variable_value = self._function_variables[variable_name]
except KeyError:
err_msg = 'Unknown function variable {}'.format(variable_name)
raise self._GLOBAL_PARAMETER_ERROR(err_msg)
return variable_value
def set_function_variable(self, variable_name, new_value):
"""Set the value of the function variable.
Function variables are _not_ global parameters but rather variables
entering mathematical expressions specified with ``GlobalParameterFunction``,
which can be use to enslave global parameter to arbitrary variables.
Parameters
----------
variable_name : str
The name of the function variable.
new_value : float
The new value for the variable.
"""
forbidden_variable_names = set(self._parameters)
if variable_name in forbidden_variable_names:
err_msg = ('Cannot have an function variable with the same name '
'of the predefined global parameter {}.'.format(variable_name))
raise self._GLOBAL_PARAMETER_ERROR(err_msg)
# Check that the new value is a scalar,
if not (np.isreal(new_value) and np.isscalar(new_value)):
err_msg = 'Only integers and floats can be assigned to a function variable.'
raise self._GLOBAL_PARAMETER_ERROR(err_msg)
self._function_variables[variable_name] = new_value
# -------------------------------------------------------------------------
# Operators
# -------------------------------------------------------------------------
def __eq__(self, other):
"""Equality operator.
Two GlobalParameterState are equal if they control the same global
parameters and they all have the same values. This way the operator
preserves the commutative property.
"""
# Check if other is a global parameter state.
if not isinstance(other, GlobalParameterState):
return False
# Check that they define the same parameters.
if not set(self._parameters) == set(other._parameters):
return False
# Check that all values are the same
is_equal = True
for parameter_name in self._parameters:
self_value = getattr(self, parameter_name)
other_value = getattr(other, parameter_name)
is_equal = is_equal and self_value == other_value
return is_equal
def __ne__(self, other):
# TODO: we can safely remove this when dropping support for Python 2
return not self == other
def __str__(self):
return str(self._parameters)
# -------------------------------------------------------------------------
# Global parameters descriptor class.
# -------------------------------------------------------------------------
# The global parameter descriptor makes it easy for the user to
# create their own state classes. The set of controlled parameters is
# dynamically discovered by _get_controlled_parameters() by checking
# which descriptors are GlobalParameter objects.
class GlobalParameter(object):
"""Descriptor for a global parameter.
Parameters
----------
parameter_name : str
The name of the global parameter.
standard_value : float
The value of the global parameter in the standard state. This
is used to define the concept of compatible states (i.e. whether
a ``System`` or ``Context`` can be transformed from a state
to another).
validator : callable, optional
A function to call before setting a new value with signature
``validator(self, instance, new_value) -> validated_value``.
It is also possible to define this through the ``validator``
decorator.
"""
def __init__(self, parameter_name, standard_value, validator=None):
self.parameter_name = parameter_name
self.standard_value = standard_value
self.validator_func = validator
def __get__(self, instance, owner_class=None):
self._check_controlled(instance)
return instance._get_global_parameter_value(self.parameter_name, self)
def __set__(self, instance, new_value):
self._check_controlled(instance)
instance._set_global_parameter_value(self.parameter_name, new_value, self)
def validator(self, validator):
return self.__class__(self.parameter_name, self.standard_value, validator)
def _check_controlled(self, instance):
"""Raise GlobalParameterError if the parameter is not controlled by the state.
If the state uses a parameter name suffix, the normal parameter
name is not accessible.
"""
if instance._parameters_name_suffix is not None:
suffixed_parameter_name = self.parameter_name + '_' + instance._parameters_name_suffix
err_msg = 'This state does not control {} but {}.'.format(
self.parameter_name, suffixed_parameter_name)
raise AttributeError(err_msg)
# -------------------------------------------------------------------------
# Internal usage: IComposableState interface
# -------------------------------------------------------------------------
def apply_to_system(self, system):
"""Set the state of the system to this.
Parameters
----------
system : openmm.System
The system to modify.
Raises
------
GlobalParameterError
If the system does not defined some of the global parameters
controlled by this state.
"""
parameters_applied = set()
for force, parameter_name, parameter_id in self._get_system_controlled_parameters(
system, self._parameters_name_suffix):
parameter_value = getattr(self, parameter_name)
if parameter_value is None:
err_msg = 'The system parameter {} is not defined in this state.'
raise self._GLOBAL_PARAMETER_ERROR(err_msg.format(parameter_name))
else:
if parameter_id >= force.getNumGlobalParameters():
raise GlobalParameterStateError(f'Attempted to access system parameter {parameter_name} (id {parameter_id}) that does not exist in {force.__class__.__name__}')
parameters_applied.add(parameter_name)
force.setGlobalParameterDefaultValue(parameter_id, parameter_value)
# Check that we set all the defined parameters.
for parameter_name in self._get_controlled_parameters(self._parameters_name_suffix):
if (self._parameters[parameter_name] is not None and
parameter_name not in parameters_applied):
err_msg = 'Could not find global parameter {} in the system.'
raise self._GLOBAL_PARAMETER_ERROR(err_msg.format(parameter_name))
def check_system_consistency(self, system):
"""Check if the system is in this state.
It raises a GlobalParameterError if the system is not consistent
with this state.
Parameters
----------
system : openmm.System
The system to test.
Raises
------
GlobalParameterError
If the system is not consistent with this state.
"""
system_state = self.__class__.from_system(system, self._parameters_name_suffix)
# Check if parameters are all the same.
if self != system_state:
err_msg = ('Consistency check failed:\n'
'\tSystem parameters {}\n'
'\t{} parameters {}')
class_name = self.__class__.__name__
raise self._GLOBAL_PARAMETER_ERROR(err_msg.format(system_state, class_name, self))
def apply_to_context(self, context):
"""Put the Context into this state.
Parameters
----------
context : openmm.Context
The context to set.
Raises
------
GlobalParameterError
If the context does not have the required global parameters.
"""
context_parameters = context.getParameters()
# Set the global parameters in Context.
for parameter_name in self._parameters:
parameter_value = getattr(self, parameter_name)
if parameter_value is None:
# Check that Context does not have this parameter.
if parameter_name in context_parameters:
err_msg = 'Context has parameter {} which is undefined in this state.'
raise self._GLOBAL_PARAMETER_ERROR(err_msg.format(parameter_name))
continue
try:
context.setParameter(parameter_name, parameter_value)
except Exception:
err_msg = 'Could not find parameter {} in context'
raise self._GLOBAL_PARAMETER_ERROR(err_msg.format(parameter_name))
def _standardize_system(self, system):
"""Standardize the given system.
Set all global parameters of the system their standard value.
Parameters
----------
system : openmm.System
The system to standardize.
Raises
------
GlobalParameterError
If the system is not consistent with this state.
"""
# Collect all the global parameters' standard values.
standard_values = {}
controlled_parameters = self._get_controlled_parameters(self._parameters_name_suffix)
for parameter_name, parameter_descriptor in controlled_parameters.items():
standard_values[parameter_name] = parameter_descriptor.standard_value
# Create a standard state.
standard_state = copy.deepcopy(self)
for parameter_name, standard_value in standard_values.items():
# Skip undefined parameters.
if getattr(standard_state, parameter_name) is not None:
setattr(standard_state, parameter_name, standard_value)
# Standardize the system.
standard_state.apply_to_system(system)
def _on_setattr(self, standard_system, attribute_name, old_global_parameter_state):
"""Check if the standard system needs changes after a state attribute is set.
Parameters
----------
standard_system : openmm.System
The standard system before setting the attribute.
attribute_name : str
The name of the attribute that has just been set or retrieved.
old_global_parameter_state : GlobalParameterState
A copy of the composable state before the attribute was set.
Returns
-------
need_changes : bool
True if the standard system has to be updated, False if no change
occurred.
"""
# There are no attributes that can be set that can alter the standard system,
# but if a parameter goes from defined to undefined, we should raise an error.
old_attribute_value = getattr(old_global_parameter_state, attribute_name)
new_attribute_value = getattr(self, attribute_name)
if (old_attribute_value is None) != (new_attribute_value is None):
err_msg = 'Cannot set the parameter {} in the system from {} to {}'.format(
attribute_name, old_attribute_value, new_attribute_value)
# Set back old value to maintain a consistent state in case the exception
# is catched. If this attribute was associated to a GlobalParameterFunction,
# we need to retrieve the original function object before setting.
old_attribute_value = old_global_parameter_state._get_global_parameter_value(
attribute_name, resolve_function=None)
setattr(self, attribute_name, old_attribute_value)
raise self._GLOBAL_PARAMETER_ERROR(err_msg)
return False
def _find_force_groups_to_update(self, context, current_context_state, memo):
"""Find the force groups whose energy must be recomputed after applying self.
Parameters
----------
context : Context
The context, currently in `current_context_state`, that will
be moved to this state.
current_context_state : ThermodynamicState
The full thermodynamic state of the given context. This is
guaranteed to be compatible with self.
memo : dict
A dictionary that can be used by the state for memoization
to speed up consecutive calls on the same context.
Returns
-------
force_groups_to_update : set of int
The indices of the force groups whose energy must be computed
again after applying this state, assuming the context to be in
`current_context_state`.
"""
# Cache information about system force groups.
# We create a dictionary "memo" mapping parameter_name -> list of force groups to update.
if len(memo) == 0:
system = context.getSystem()
system_parameters = self._get_system_controlled_parameters(system, self._parameters_name_suffix)
for force, parameter_name, _ in system_parameters:
# Keep track of valid parameters only.
if self._parameters[parameter_name] is not None:
try:
memo[parameter_name].append(force.getForceGroup())
except KeyError:
memo[parameter_name] = [force.getForceGroup()]
# Find lambda parameters that will change.
force_groups_to_update = set()
for parameter_name, force_groups in memo.items():
self_parameter_value = getattr(self, parameter_name)
current_parameter_value = getattr(current_context_state, parameter_name)
if self_parameter_value != current_parameter_value:
force_groups_to_update.update(force_groups)
return force_groups_to_update
# -------------------------------------------------------------------------
# Internal usage: Attributes handling
# -------------------------------------------------------------------------
@classmethod
def _get_controlled_parameters(cls, parameters_name_suffix=None):
"""Return a set of the global parameters controlled by the state class.
This is constructed dynamically by introspection gathering all the
descriptors that belong to the GlobalParameter class.
Parameters
----------
parameters_name_suffix : str, optional
If specified, this returns the set of parameter names with the
name suffix.
Returns
-------
controlled_parameters : dict of str -> GlobalParameter
A map from the name of the controlled parameter to the
GlobalParameter descriptor handling it.
"""
if parameters_name_suffix is None:
suffix = ''
else:
suffix = '_' + parameters_name_suffix
# TODO just use inspect.getmembers when dropping Python 2 which automatically resolves the MRO.
# controlled_parameters = {name + suffix: descriptor for name, descriptor in inspect.getmembers(cls)
# if isinstance(descriptor, cls.GlobalParameter)}
controlled_parameters = {name + suffix: descriptor for c in inspect.getmro(cls)
for name, descriptor in c.__dict__.items()
if isinstance(descriptor, cls.GlobalParameter)}
return controlled_parameters
def _validate_global_parameter(self, parameter_name, parameter_value, descriptor=None):
"""Return the validated parameter value using the descriptor validator.
Parameters
----------
parameter_name : str
Parameter name (with eventual suffix) to validate.
parameter_value : float
Parameter value to validate. If a GlobalParameterFunction is associated
to the parameter, this must be evaluated before calling this.
descriptor : GlobalParameterState.GlobalParameter, optional
If None, the functions automatically looks for the descriptor associated
to this parameter and calls its validator (if any). This search is
skipped if this argument is provided.
Returns
-------
validated_value : float
The validated value.
Raises
------
KeyError
If parameter_name is not controlled by this state.
"""
if descriptor is None:
# Get the descriptors of all controlled parameters.
controlled_parameters = self._get_controlled_parameters(self._parameters_name_suffix)
# Call validator, before setting the parameter. This raises KeyError.
descriptor = controlled_parameters[parameter_name]
if descriptor.validator_func is not None:
parameter_value = descriptor.validator_func(descriptor, self, parameter_value)
return parameter_value
def _get_global_parameter_value(self, parameter_name, descriptor=None, resolve_function=True):
"""Retrieve the current value of a global parameter.
Parameters
----------
parameter_name : str
Parameter name (with eventual suffix) to validate.
descriptor : GlobalParameterState.GlobalParameter, optional
If None, and the parameter is associated to a GlobalParameterFunction,
the functions automatically looks for the descriptor associated to this
parameter and calls its validator (if any). This search is skipped if
this argument is provided. Default is None.
resolve_function : bool, optional
If False and the parameter is associated to a GlobalParameterFunction,
the function is not evaluated (and its result is not validated), and
the GlobalParameterFunction object is returned instead. Default is True.
Returns
-------
parameter_value : float
The parameter value.
Raises
------
KeyError
If parameter_name is not controlled by this state.
"""
parameter_value = self._parameters[parameter_name]
if resolve_function and isinstance(parameter_value, GlobalParameterFunction):
parameter_value = parameter_value(self._function_variables)
# If the value is generated through a mathematical expression,
# we validate the value after the expression is evaluated rather
# than on setting.
parameter_value = self._validate_global_parameter(parameter_name, parameter_value, descriptor)
return parameter_value
def _set_global_parameter_value(self, parameter_name, new_value, descriptor=None):
"""Set the value of a global parameter.
Parameters
----------
parameter_name : str
Parameter name (with eventual suffix) to validate.
new_value : float or GlobalParameterFunction
The new parameter value.
descriptor : GlobalParameterState.GlobalParameter, optional
If None, and the parameter is not a GlobalParameterFunction, the functions
automatically looks for the descriptor associated to this parameter and
calls its validator (if any). This search is skipped if this argument is
provided.
Raises
------
KeyError
If parameter_name is not controlled by this state.
"""
# Check if the parameter is defined and raise KeyError otherwise.
if parameter_name not in self._parameters:
raise KeyError(parameter_name)
# If the value is generated through a mathematical expression,
# we validate the value after the expression is evaluated rather
# than on setting.
if not isinstance(new_value, GlobalParameterFunction):
new_value = self._validate_global_parameter(parameter_name, new_value, descriptor)
self._parameters[parameter_name] = new_value
def __getattr__(self, key):
"""Handles global parameters with a suffix."""
# __getattr__ is called only if the item is not found in the
# usual ways, so we don't need to handle GlobalParameter here.
try:
parameter_value = self._get_global_parameter_value(key)
except KeyError:
# Parameter not found, fall back to normal behavior.
parameter_value = super(GlobalParameterState, self).__getattribute__(key)
return parameter_value
def __setattr__(self, key, value):
"""Handles global parameters with a suffix."""
# Check if the object has been initialized and we can
# start resolving dynamically suffixed parameters.
if '_parameters_name_suffix' in self.__dict__ and self._parameters_name_suffix is not None:
try:
self._set_global_parameter_value(key, value)
except KeyError:
pass
else:
return
# This is not a "suffixed" parameter. Fallback to normal behavior.
super(GlobalParameterState, self).__setattr__(key, value)
@classmethod
def _get_system_controlled_parameters(cls, system, parameters_name_suffix):
"""Yields the controlled global parameters defined in the System.
Yields
------
A tuple force, parameter_name, parameter_index for each supported
lambda parameter.
"""
searched_parameters = cls._get_controlled_parameters(parameters_name_suffix)
# Retrieve all the forces with global supported parameters.
for force_index in range(system.getNumForces()):
force = system.getForce(force_index)
try:
n_global_parameters = force.getNumGlobalParameters()
except AttributeError:
continue
for parameter_id in range(n_global_parameters):
parameter_name = force.getGlobalParameterName(parameter_id)
if parameter_name in searched_parameters:
yield force, parameter_name, parameter_id
def __getstate__(self):
"""Return a dictionary representation of the state."""
serialization = dict(
parameters={},
function_variables=self._function_variables.copy(),
parameters_name_suffix=self._parameters_name_suffix
)
# Copy parameters. We serialize the parameters with their original name
# (i.e., without suffix) because we'll pass them to _initialize().
if self._parameters_name_suffix is None:
suffix = ''
else:
suffix = '_' + self._parameters_name_suffix
for parameter_name in self._get_controlled_parameters():
parameter_value = self._parameters[parameter_name + suffix]
# Convert global parameter function into string expressions.
if isinstance(parameter_value, GlobalParameterFunction):
parameter_value = parameter_value._expression
serialization['parameters'][parameter_name] = parameter_value
return serialization
def __setstate__(self, serialization):
"""Set the state from a dictionary representation."""
parameters = serialization['parameters']
# parameters_name_suffix is optional for backwards compatibility since openmmtools 0.16.0.
parameters_name_suffix = serialization.get('parameters_name_suffix', None)
# Global parameter functions has been added in openmmtools 0.17.0.
function_variables = serialization.get('function_variables', {})
# Temporarily store global parameter functions.
global_parameter_functions = {}
for parameter_name, value in parameters.items():
if isinstance(value, str):
global_parameter_functions[parameter_name] = value
parameters[parameter_name] = None
# Initialize parameters and add all function variables.
self._initialize(parameters_name_suffix=parameters_name_suffix, **parameters)
for variable_name, value in function_variables.items():
self.set_function_variable(variable_name, value)
# Add global parameter functions.
if parameters_name_suffix is not None:
parameters_name_suffix = '_' + parameters_name_suffix
else:
parameters_name_suffix = ''
for parameter_name, expression in global_parameter_functions.items():
setattr(self, parameter_name + parameters_name_suffix, GlobalParameterFunction(expression))
# -------------------------------------------------------------------------
# Internal usage: Initialization
# -------------------------------------------------------------------------
def _initialize(self, parameters_name_suffix=None, **kwargs):
"""Initialize the state.
It takes the global parameters and their values as keywords arguments.
Controlled parameters that are not passed are left undefined (i.e. are
set to None).
"""
self._function_variables = {}
# Get controlled parameters from introspection.
controlled_parameters = set(self._get_controlled_parameters())
# Check for unknown parameters
unknown_parameters = set(kwargs) - controlled_parameters
if len(unknown_parameters) > 0:
err_msg = "Unknown parameters {}".format(unknown_parameters)
raise self._GLOBAL_PARAMETER_ERROR(err_msg)
# Append suffix to parameters before storing them internally.
if parameters_name_suffix is not None:
kwargs = {key + '_' + parameters_name_suffix: value for key, value in kwargs.items()}
controlled_parameters = {key + '_' + parameters_name_suffix for key in controlled_parameters}
# Default value for all parameters is None.
self._parameters = dict.fromkeys(controlled_parameters, None)
# This signals to __setattr__ that we can start resolving dynamically
# suffixed parameters so it should be the last direct assignment.
self._parameters_name_suffix = parameters_name_suffix
# Update parameters with constructor arguments.
for parameter_name, value in kwargs.items():
setattr(self, parameter_name, value)
if __name__ == '__main__':
import doctest
doctest.testmod()
# doctest.run_docstring_examples(CompoundThermodynamicState, globals())
|
choderalab/openmmtools
|
openmmtools/states.py
|
Python
|
mit
| 165,224
|
[
"OpenMM"
] |
325486bbdd1534d4e113365f95c1c49e225f72437cc1e513b4914ba557ab8e04
|
# -* coding: utf-8 -*-
# Utility functions for moose.
import types
import token
import symbol
import math
from datetime import datetime
from collections import defaultdict
import re
import logging
logger_ = logging.getLogger("moose.utils")
import moose
from moose.moose_constants import *
from moose.print_utils import *
try:
from moose.network_utils import *
except Exception as e:
logger_.warn("Netowrk utilities are not loaded due to %s" % e)
# Print and Plot utilities.
try:
from moose.plot_utils import *
except Exception as e:
logger_.warn("Plot utilities are not loaded due to '%s'" % e)
def create_table_path(model, graph, element, field):
field = field[0].upper() + field[1:]
tablePathSuffix = element.path.partition(model.path)[-1]
if tablePathSuffix.startswith("/"):
tablePathSuffix = tablePathSuffix[1:]
tablePathSuffix = tablePathSuffix.replace("/", "_") + "." + field
# tablePathSuffix = re.sub(
# ".", lambda m: {"[": "_", "]": "_"}.get(m.group(), m.group()), tablePathSuffix
# )
#if tablePathSuffix.startswith("_0__"):
# tablePathSuffix = tablePathSuffix[4:]
# tablePath = dataroot + '/' +tablePath
tablePath = graph.path + "/" + tablePathSuffix
return tablePath
def create_table(tablePath, element, field, tableType):
"""Create table to record `field` from element `element`
Tables are created under `dataRoot`, the names are generally
created by removing `/model` in the beginning of `elementPath`
and replacing `/` with `_`. If this conflicts with an existing
table, the id value of the target element (elementPath) is
appended to the name.
"""
if moose.exists(tablePath):
table = moose.element(tablePath)
else:
if tableType == "Table2":
table = moose.Table2(tablePath)
elif tableType == "Table":
table = moose.Table(tablePath)
moose.connect(table, "requestOut", element, "get%s" % (field))
return table
def readtable(table, filename, separator=None):
"""Reads the file specified by filename to fill the MOOSE table object.
The file can either have one float on each line, in that case the
table will be filled with values sequentially.
Or, the file can have
index value
on each line. If the separator between index and value is anything other than
white space, you can specify it in the separator argument."""
in_file = open(filename)
ii = 0
line_no = 0
for line in in_file:
line_no = line_no + 1
tokens = line.split(separator)
if not tokens:
continue
elif len(tokens) == 1:
table[ii] = float(tokens[0])
elif len(tokens) == 2:
table[int(tokens[0])] = float(tokens[1])
else:
print(
"pymoose.readTable(",
table,
",",
filename,
",",
separator,
") - line#",
line_no,
" does not fit.",
)
def getfields(moose_object):
"""Returns a dictionary of the fields and values in this object."""
field_names = moose_object.getFieldNames("valueFinfo")
fields = {}
for name in field_names:
fields[name] = moose_object.getField(name)
return fields
def findAllBut(moose_wildcard, stringToExclude):
allValidObjects = moose.wildcardFind(moose_wildcard)
refinedList = []
for validObject in allValidObjects:
if validObject.path.find(stringToExclude) == -1:
refinedList.append(validObject)
return refinedList
def apply_to_tree(moose_wildcard, python_filter=None, value=None):
"""
Select objects by a moose/genesis wildcard, apply a python filter on them and apply a value on them.
moose_wildcard - this follows GENESIS convention.
{path}/#[{condition}] returns all elements directly under {path} that satisfy condition. For example:
'/mynetwork/mycell_0/#[TYPE=Compartment]'
will return all Compartment objects directly under mycell_0 in mynetwork.
'{path}/##[{condition}]' will recursively go through all the
objects that are under {path} (i.e. children, grandchildren,
great-grandchildren and so on up to the leaf level) and a list of
the ones meet {condition} will be obtained.
Thus, '/mynetwork/##[TYPE=Compartment]' will return all
compartments under mynetwork or its children, or children thereof
and so on.
python_filter - if a single string, it will be taken as a
fieldname, and value will be assigned to this field. It can also
be a lambda function returning True or False which will be applied
to each id in the id list returned by moose wildcard
search. Remember, the argument to the lambda will be an Id, so it
is up to you to wrap it into a moose object of appropriate type. An example is:
lambda moose_id: Compartment(moose_id).diameter < 2e-6
If your moose_wildcard selected objects of Compartment class, then
this lambda function will select only those with diameter less
than 2 um.
value - can be a lambda function to apply arbitrary operations on
the selected objects.
If python_filter is a string it, the return
value of applying the lambda for value() will assigned to the
field specified by python_filter.
But if it is value is a data object and {python_filter} is a
string, then {value} will be assigned to the field named
{python_filter}.
If you want to assign Rm = 1e6 for each compartment in mycell
whose name match 'axon_*':
apply_to_tree('/mycell/##[Class=Compartment]',
lambda x: 'axon_' in Neutral(x).name,
lambda x: setattr(Compartment(x), 'Rm', 1e6))
[you must use setattr to assign value to a field because lambda
functions don't allow assignments].
"""
if not isinstance(moose_wildcard, str):
raise TypeError("moose_wildcard must be a string.")
id_list = moose.getWildcardList(moose_wildcard, True)
if isinstance(python_filter, types.LambdaType):
id_list = [moose_id for moose_id in id_list if python_filter(moose_id)]
elif isinstance(python_filter, str):
id_list = [
moose_id
for moose_id in id_list
if hasattr(
eval("moose.%s(moose_id)" % (moose.Neutral(moose_id).className)),
python_filter,
)
]
else:
pass
if isinstance(value, types.LambdaType):
if isinstance(python_filter, str):
for moose_id in id_list:
moose_obj = eval(
"moose.%s(moose_id)" % (moose.Neutral(moose_id).className)
)
setattr(moose_obj, python_filter, value(moose_id))
else:
for moose_id in id_list:
value(moose_id)
else:
if isinstance(python_filter, str):
for moose_id in id_list:
moose_obj = eval(
"moose.%s(moose_id)" % (moose.Neutral(moose_id).className)
)
setattr(moose_obj, python_filter, value)
else:
raise TypeError(
"Second argument must be a string specifying a field to assign to when third argument is a value"
)
# 2012-01-11 19:20:39 (+0530) Subha: checked for compatibility with dh_branch
def printtree(root, vchar="|", hchar="__", vcount=1, depth=0, prefix="", is_last=False):
"""Pretty-print a MOOSE tree.
root - the root element of the MOOSE tree, must be some derivatine of Neutral.
vchar - the character printed to indicate vertical continuation of
a parent child relationship.
hchar - the character printed just before the node name
vcount - determines how many lines will be printed between two
successive nodes.
depth - for internal use - should not be explicitly passed.
prefix - for internal use - should not be explicitly passed.
is_last - for internal use - should not be explicitly passed.
"""
root = moose.element(root)
# print('%s: "%s"' % (root, root.children))
for i in range(vcount):
print(prefix)
if depth != 0:
print(prefix + hchar, end=" ")
if is_last:
index = prefix.rfind(vchar)
prefix = prefix[:index] + " " * (len(hchar) + len(vchar)) + vchar
else:
prefix = prefix + " " * len(hchar) + vchar
else:
prefix = prefix + vchar
print(root.name)
children = []
for child_vec in root.children:
try:
child = moose.element(child_vec)
children.append(child)
except TypeError:
pass
# print 'TypeError:', child_vec, 'when converting to element.'
for i in range(0, len(children) - 1):
printtree(children[i], vchar, hchar, vcount, depth + 1, prefix, False)
if len(children) > 0:
printtree(children[-1], vchar, hchar, vcount, depth + 1, prefix, True)
def df_traverse(root, operation, *args):
"""Traverse the tree in a depth-first manner and apply the
operation using *args. The first argument is the root object by
default."""
if hasattr(root, "_visited"):
return
operation(root, *args)
for child in root.children:
childNode = moose.Neutral(child)
df_traverse(childNode, operation, *args)
root._visited = True
def autoposition(root):
"""Automatically set the positions of the endpoints of all the
compartments under `root`.
This keeps x and y constant and extends the positions in
z-direction only. This of course puts everything in a single line
but suffices for keeping electrical properties intact.
TODO: in future we may want to create the positions for nice
visual layout as well. My last attempt resulted in some
compartments overlapping in space.
"""
compartments = moose.wildcardFind("%s/##[TYPE=Compartment]" % (root.path))
stack = [
compartment
for compartment in map(moose.element, compartments)
if len(compartment.neighbors["axial"]) == 0
]
assert len(stack) == 1, (
"There must be one and only one top level\
compartment. Found %d"
% len(stack)
)
ret = stack[0]
while len(stack) > 0:
comp = stack.pop()
parentlist = comp.neighbors["axial"]
parent = None
if len(parentlist) > 0:
parent = parentlist[0]
comp.x0, comp.y0, comp.z0, = parent.x, parent.y, parent.z
else:
comp.x0, comp.y0, comp.z0, = (0, 0, 0)
if comp.length > 0:
comp.x, comp.y, comp.z, = comp.x0, comp.y0, comp.z0 + comp.length
else:
# for spherical compartments x0, y0, z0 are centre
# position nad x,y,z are on the surface
comp.x, comp.y, comp.z, = comp.x0, comp.y0, comp.z0 + comp.diameter / 2.0
# We take z == 0 as an indicator that this compartment has not
# been processed before - saves against inadvertent loops.
stack.extend(
[
childcomp
for childcomp in map(moose.element, comp.neighbors["raxial"])
if childcomp.z == 0
]
)
return ret
def loadModel(filename, target, method="ee"):
moose.loadModel(filename, target)
moose.mooseAddChemSolver(target, method)
if moose.exists(target + "/kinetics/info"):
moose.element(target + "/kinetics/info").solver = method
def readcell_scrambled(filename, target, method="ee"):
"""A special version for handling cases where a .p file has a line
with specified parent yet to be defined.
It creates a temporary file with a sorted version based on
connectivity, so that parent is always defined before child."""
pfile = open(filename, "r")
tmpfilename = filename + ".tmp"
graph = defaultdict(list)
data = {}
error = None
root = None
ccomment_started = False
current_compt_params = []
for line in pfile:
tmpline = line.strip()
if not tmpline or tmpline.startswith("//"):
continue
elif tmpline.startswith("/*"):
ccomment_started = True
if tmpline.endswith("*/"):
ccomment_started = False
if ccomment_started:
continue
if tmpline.startswith("*set_compt_param"):
current_compt_params.append(tmpline)
continue
node, parent, rest, = tmpline.partition(" ")
print("22222222", node, parent)
if parent == "none":
if root is None:
root = node
else:
raise ValueError(
"Duplicate root elements: ",
root,
node,
"> Cannot process any further.",
)
break
graph[parent].append(node)
data[node] = "\n".join(current_compt_params)
tmpfile = open(tmpfilename, "w")
stack = [root]
while stack:
current = stack.pop()
children = graph[current]
stack.extend(children)
print('#########"', current, '": ', data[current])
tmpfile.write(data[current])
tmpfile.close()
ret = moose.loadModel(tmpfilename, target, method)
return ret
## Subha: In many scenarios resetSim is too rigid and focussed on
## neuronal simulation. The setDefaultDt and
## assignTicks/assignDefaultTicks keep the process of assigning dt to
## ticks and assigning ticks to objects separate. reinit() should be
## explicitly called by user just before running a simulation, and not
## when setting it up.
def updateTicks(tickDtMap):
"""Try to assign dt values to ticks.
Parameters
----------
tickDtMap: dict
map from tick-no. to dt value. if it is empty, then default dt
values are assigned to the ticks.
"""
for tickNo, dt in list(tickDtMap.items()):
if tickNo >= 0 and dt > 0.0:
moose.setClock(tickNo, dt)
if all([(v == 0) for v in list(tickDtMap.values())]):
setDefaultDt()
def assignTicks(tickTargetMap):
"""
Assign ticks to target elements.
Parameters
----------
tickTargetMap:
Map from tick no. to target path and method. The path can be wildcard expression also.
"""
if len(tickTargetMap) == 0:
assignDefaultTicks()
for tickNo, target in list(tickTargetMap.items()):
if not isinstance(target, str):
if len(target) == 1:
moose.useClock(tickNo, target[0], "process")
elif len(target) == 2:
moose.useClock(tickNo, target[0], target[1])
else:
moose.useClock(tickNo, target, "process")
# # This is a hack, we need saner way of scheduling
# ticks = moose.vec('/clock/tick')
# valid = []
# for ii in range(ticks[0].localNumField):
# if ticks[ii].dt > 0:
# valid.append(ii)
# if len(valid) == 0:
# assignDefaultTicks()
def setDefaultDt(elecdt=1e-5, chemdt=0.01, tabdt=1e-5, plotdt1=1.0, plotdt2=0.25e-3):
"""Setup the ticks with dt values.
Parameters
----------
elecdt: dt for ticks used in computing electrical biophysics, like
neuronal compartments, ion channels, synapses, etc.
chemdt: dt for chemical computations like enzymatic reactions.
tabdt: dt for lookup tables
plotdt1: dt for chemical kinetics plotting
plotdt2: dt for electrical simulations
"""
moose.setClock(0, elecdt)
moose.setClock(1, elecdt)
moose.setClock(2, elecdt)
moose.setClock(3, elecdt)
moose.setClock(4, chemdt)
moose.setClock(5, chemdt)
moose.setClock(6, tabdt)
moose.setClock(7, tabdt)
moose.setClock(8, plotdt1) # kinetics sim
moose.setClock(9, plotdt2) # electrical sim
def assignDefaultTicks(modelRoot="/model", dataRoot="/data", solver="hsolve"):
if not isinstance(modelRoot, str):
modelRoot = modelRoot.path
if not isinstance(dataRoot, str):
dataRoot = dataRoot.path
if (
solver != "hsolve"
or len(moose.wildcardFind("%s/##[ISA=HSolve]" % (modelRoot))) == 0
):
moose.useClock(0, "%s/##[ISA=Compartment]" % (modelRoot), "init")
moose.useClock(1, "%s/##[ISA=Compartment]" % (modelRoot), "process")
moose.useClock(2, "%s/##[ISA=HHChannel]" % (modelRoot), "process")
# moose.useClock(2, '%s/##[ISA=ChanBase]' % (modelRoot), 'process')
moose.useClock(0, "%s/##[ISA=IzhikevichNrn]" % (modelRoot), "process")
moose.useClock(0, "%s/##[ISA=GapJunction]" % (modelRoot), "process")
moose.useClock(0, "%s/##[ISA=HSolve]" % (modelRoot), "process")
moose.useClock(1, "%s/##[ISA=LeakyIaF]" % (modelRoot), "process")
moose.useClock(1, "%s/##[ISA=IntFire]" % (modelRoot), "process")
moose.useClock(1, "%s/##[ISA=SpikeGen]" % (modelRoot), "process")
moose.useClock(1, "%s/##[ISA=PulseGen]" % (modelRoot), "process")
moose.useClock(1, "%s/##[ISA=StimulusTable]" % (modelRoot), "process")
moose.useClock(1, "%s/##[ISA=TimeTable]" % (modelRoot), "process")
moose.useClock(2, "%s/##[ISA=HHChannel2D]" % (modelRoot), "process")
moose.useClock(2, "%s/##[ISA=SynChan]" % (modelRoot), "process")
moose.useClock(2, "%s/##[ISA=MgBlock]" % (modelRoot), "process")
moose.useClock(3, "%s/##[ISA=CaConc]" % (modelRoot), "process")
moose.useClock(3, "%s/##[ISA=Func]" % (modelRoot), "process")
# The voltage clamp circuit depends critically on the dt used for
# computing soma Vm and need to be on a clock with dt=elecdt.
moose.useClock(0, "%s/##[ISA=DiffAmp]" % (modelRoot), "process")
moose.useClock(0, "%s/##[ISA=VClamp]" % (modelRoot), "process")
moose.useClock(0, "%s/##[ISA=PIDController]" % (modelRoot), "process")
moose.useClock(0, "%s/##[ISA=RC]" % (modelRoot), "process")
# Special case for kinetics models
kinetics = moose.wildcardFind("%s/##[FIELD(name)=kinetics]" % modelRoot)
if len(kinetics) > 0:
# Do nothing for kinetics models - until multiple scheduling issue is fixed.
moose.useClock(4, "%s/##[ISA!=PoolBase]" % (kinetics[0].path), "process")
moose.useClock(5, "%s/##[ISA==PoolBase]" % (kinetics[0].path), "process")
moose.useClock(18, "%s/##[ISA=Table2]" % (dataRoot), "process")
else:
# input() function is called in Table. process() which gets
# called at each timestep. When a message is connected
# explicitly to input() dest field, it is driven by the sender
# and process() adds garbage value to the vector. Hence not to
# be scheduled.
for tab in moose.wildcardFind("%s/##[ISA=Table]" % (dataRoot)):
if len(tab.neighbors["input"]) == 0:
moose.useClock(9, tab.path, "process")
def stepRun(simtime, steptime, verbose=True, logger=None):
"""Run the simulation in steps of `steptime` for `simtime`."""
global logger_
if logger is None:
logger = logger_
clock = moose.element("/clock")
if verbose:
msg = "Starting simulation for %g" % (simtime)
logger_.info(msg)
ts = datetime.now()
while clock.currentTime < simtime - steptime:
ts1 = datetime.now()
moose.start(steptime)
te = datetime.now()
td = te - ts1
if verbose:
msg = "Simulated till %g. Left: %g. %g of simulation took: %g s" % (
clock.currentTime,
simtime - clock.currentTime,
steptime,
td.days * 86400 + td.seconds + 1e-6 * td.microseconds,
)
logger_.info(msg)
remaining = simtime - clock.currentTime
if remaining > 0:
if verbose:
msg = "Running the remaining %g." % (remaining)
logger_.info(msg)
moose.start(remaining)
te = datetime.now()
td = te - ts
dt = min([t for t in moose.element("/clock").dts if t > 0.0])
if verbose:
msg = "Finished simulation of %g with minimum dt=%g in %g s" % (
simtime,
dt,
td.days * 86400 + td.seconds + 1e-6 * td.microseconds,
)
logger_.info(msg)
############# added by Aditya Gilra -- begin ################
def resetSim(simpaths, simdt, plotdt, simmethod="hsolve"):
""" For each of the MOOSE paths in simpaths, this sets the clocks and finally resets MOOSE.
If simmethod=='hsolve', it sets up hsolve-s for each Neuron under simpaths, and clocks for hsolve-s too. """
print("Solver:", simmethod)
moose.setClock(INITCLOCK, simdt)
moose.setClock(ELECCLOCK, simdt) # The hsolve and ee methods use clock 1
moose.setClock(
CHANCLOCK, simdt
) # hsolve uses clock 2 for mg_block, nmdachan and others.
moose.setClock(POOLCLOCK, simdt) # Ca/ion pools & funcs use clock 3
moose.setClock(STIMCLOCK, simdt) # Ca/ion pools & funcs use clock 3
moose.setClock(PLOTCLOCK, plotdt) # for tables
for simpath in simpaths:
## User can connect [qty]Out of an element to input of Table or
## requestOut of Table to get[qty] of the element.
## Scheduling the Table to a clock tick, will call process() of the Table
## which will send a requestOut and overwrite any value set by input(),
## thus adding garbage value to the vector. Hence schedule only if
## input message is not connected to the Table.
for table in moose.wildcardFind(simpath + "/##[TYPE=Table]"):
if len(table.neighbors["input"]) == 0:
moose.useClock(PLOTCLOCK, table.path, "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=PulseGen]", "process")
moose.useClock(STIMCLOCK, simpath + "/##[TYPE=DiffAmp]", "process")
moose.useClock(STIMCLOCK, simpath + "/##[TYPE=VClamp]", "process")
moose.useClock(STIMCLOCK, simpath + "/##[TYPE=PIDController]", "process")
moose.useClock(STIMCLOCK, simpath + "/##[TYPE=RC]", "process")
moose.useClock(STIMCLOCK, simpath + "/##[TYPE=TimeTable]", "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=LeakyIaF]", "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=IntFire]", "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=IzhikevichNrn]", "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=SpikeGen]", "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=Interpol]", "process")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=Interpol2D]", "process")
moose.useClock(CHANCLOCK, simpath + "/##[TYPE=HHChannel2D]", "process")
moose.useClock(CHANCLOCK, simpath + "/##[TYPE=SynChan]", "process")
## If simmethod is not hsolve, set clocks for the biophysics,
## else just put a clock on the hsolve:
## hsolve takes care of the clocks for the biophysics
if "hsolve" not in simmethod.lower():
print("Using exp euler")
moose.useClock(INITCLOCK, simpath + "/##[TYPE=Compartment]", "init")
moose.useClock(ELECCLOCK, simpath + "/##[TYPE=Compartment]", "process")
moose.useClock(CHANCLOCK, simpath + "/##[TYPE=HHChannel]", "process")
moose.useClock(POOLCLOCK, simpath + "/##[TYPE=CaConc]", "process")
moose.useClock(POOLCLOCK, simpath + "/##[TYPE=Func]", "process")
else: # use hsolve, one hsolve for each Neuron
print("Using hsolve")
element = moose.Neutral(simpath)
for childid in element.children:
childobj = moose.element(childid)
classname = childobj.className
if classname in ["Neuron"]:
neuronpath = childobj.path
h = moose.HSolve(neuronpath + "/solve")
h.dt = simdt
h.target = neuronpath
moose.useClock(INITCLOCK, h.path, "process")
moose.reinit()
def setupTable(name, obj, qtyname, tables_path=None, threshold=None, spikegen=None):
""" Sets up a table with 'name' which stores 'qtyname' field from 'obj'.
The table is created under tables_path if not None, else under obj.path . """
if tables_path is None:
tables_path = obj.path + "/data"
## in case tables_path does not exist, below wrapper will create it
tables_path_obj = moose.Neutral(tables_path)
qtyTable = moose.Table(tables_path_obj.path + "/" + name)
assert qtyTable, "%s not found" % qtyTable
## stepMode no longer supported, connect to 'input'/'spike' message dest to record Vm/spiktimes
# qtyTable.stepMode = TAB_BUF
if spikegen is None:
if threshold is None:
## below is wrong! reads qty twice every clock tick!
# moose.connect( obj, qtyname+'Out', qtyTable, "input")
## this is the correct method
moose.connect(qtyTable, "requestOut", obj, "get" + qtyname)
else:
## create new spikegen
spikegen = moose.SpikeGen(tables_path_obj.path + "/" + name + "_spikegen")
## connect the compartment Vm to the spikegen
moose.connect(obj, "VmOut", spikegen, "Vm")
## spikegens for different synapse_types can have different thresholds
spikegen.threshold = threshold
spikegen.edgeTriggered = (
1 # This ensures that spike is generated only on leading edge.
)
else:
moose.connect(
spikegen, "spikeOut", qtyTable, "input"
) ## spikeGen gives spiketimes
return qtyTable
def connectSynapse(compartment, synname, gbar_factor):
"""
Creates a synname synapse under compartment, sets Gbar*gbar_factor, and attaches to compartment.
synname must be a synapse in /library of MOOSE.
"""
synapseid = moose.copy(moose.SynChan("/library/" + synname), compartment, synname)
synapse = moose.SynChan(synapseid)
synapse.Gbar = synapse.Gbar * gbar_factor
synapse_mgblock = moose.Mstring(synapse.path + "/mgblockStr")
if (
synapse_mgblock.value == "True"
): # If NMDA synapse based on mgblock, connect to mgblock
mgblock = moose.Mg_block(synapse.path + "/mgblock")
compartment.connect("channel", mgblock, "channel")
else:
compartment.connect("channel", synapse, "channel")
return synapse
def printNetTree():
""" Prints all the cells under /, and recursive prints the cell tree for each cell. """
root = moose.Neutral("/")
for id in root.children: # all subelements of 'root'
if moose.Neutral(id).className == "Cell":
cell = moose.Cell(id)
print(
"-------------------- CELL : ",
cell.name,
" ---------------------------",
)
printCellTree(cell)
def printCellTree(cell):
"""
Prints the tree under MOOSE object 'cell'.
Assumes cells have all their compartments one level below,
also there should be nothing other than compartments on level below.
Apart from compartment properties and messages,
it displays the same for subelements of compartments only one level below the compartments.
Thus NMDA synapses' mgblock-s will be left out.
FIXME: no lenght cound on compartment.
"""
for compartmentid in cell.children: # compartments
comp = moose.Compartment(compartmentid)
print(
" |-",
comp.path,
"l=",
comp.length,
"d=",
comp.diameter,
"Rm=",
comp.Rm,
"Ra=",
comp.Ra,
"Cm=",
comp.Cm,
"EM=",
comp.Em,
)
# for inmsg in comp.inMessages():
# print " |---", inmsg
# for outmsg in comp.outMessages():
# print " |---", outmsg
printRecursiveTree(
compartmentid, level=2
) # for channels and synapses and recursively lower levels
## Use printCellTree which calls this
def printRecursiveTree(elementid, level):
""" Recursive helper function for printCellTree,
specify depth/'level' to recurse and print subelements under MOOSE 'elementid'. """
spacefill = " " * level
element = moose.Neutral(elementid)
for childid in element.children:
childobj = moose.Neutral(childid)
classname = childobj.className
if classname in ["SynChan", "KinSynChan"]:
childobj = moose.SynChan(childid)
print(
spacefill + "|--",
childobj.name,
childobj.className,
"Gbar=",
childobj.Gbar,
"numSynapses=",
childobj.numSynapses,
)
return # Have yet to figure out the children of SynChan, currently not going deeper
elif classname in ["HHChannel", "HHChannel2D"]:
childobj = moose.HHChannel(childid)
print(
spacefill + "|--",
childobj.name,
childobj.className,
"Gbar=",
childobj.Gbar,
"Ek=",
childobj.Ek,
)
elif classname in ["CaConc"]:
childobj = moose.CaConc(childid)
print(
spacefill + "|--",
childobj.name,
childobj.className,
"thick=",
childobj.thick,
"B=",
childobj.B,
)
elif classname in ["Mg_block"]:
childobj = moose.Mg_block(childid)
print(
spacefill + "|--",
childobj.name,
childobj.className,
"CMg",
childobj.CMg,
"KMg_A",
childobj.KMg_A,
"KMg_B",
childobj.KMg_B,
)
elif classname in ["SpikeGen"]:
childobj = moose.SpikeGen(childid)
print(
spacefill + "|--",
childobj.name,
childobj.className,
"threshold",
childobj.threshold,
)
elif classname in ["Func"]:
childobj = moose.Func(childid)
print(
spacefill + "|--",
childobj.name,
childobj.className,
"expr",
childobj.expr,
)
elif classname in [
"Table"
]: # Table gives segfault if printRecursiveTree is called on it
return # so go no deeper
# for inmsg in childobj.inMessages():
# print spacefill+" |---", inmsg
# for outmsg in childobj.outMessages():
# print spacefill+" |---", outmsg
if len(childobj.children) > 0:
printRecursiveTree(childid, level + 1)
def setup_vclamp(compartment, name, delay1, width1, level1, gain=0.5e-5):
"""
Sets up a voltage clamp with 'name' on MOOSE 'compartment' object:
adapted from squid.g in DEMOS (moose/genesis)
Specify the 'delay1', 'width1' and 'level1' of the voltage to be applied to the compartment.
Typically you need to adjust the PID 'gain'
For perhaps the Davison 4-compartment mitral or the Davison granule:
0.5e-5 optimal gain - too high 0.5e-4 drives it to oscillate at high frequency,
too low 0.5e-6 makes it have an initial overshoot (due to Na channels?)
Returns a MOOSE table with the PID output.
"""
## If /elec doesn't exists it creates /elec and returns a reference to it.
## If it does, it just returns its reference.
moose.Neutral("/elec")
pulsegen = moose.PulseGen("/elec/pulsegen" + name)
vclamp = moose.DiffAmp("/elec/vclamp" + name)
vclamp.saturation = 999.0
vclamp.gain = 1.0
lowpass = moose.RC("/elec/lowpass" + name)
lowpass.R = 1.0
lowpass.C = 50e-6 # 50 microseconds tau
PID = moose.PIDController("/elec/PID" + name)
PID.gain = gain
PID.tau_i = 20e-6
PID.tau_d = 5e-6
PID.saturation = 999.0
# All connections should be written as source.connect('',destination,'')
pulsegen.connect("outputSrc", lowpass, "injectMsg")
lowpass.connect("outputSrc", vclamp, "plusDest")
vclamp.connect("outputSrc", PID, "commandDest")
PID.connect("outputSrc", compartment, "injectMsg")
compartment.connect("VmSrc", PID, "sensedDest")
pulsegen.trigMode = 0 # free run
pulsegen.baseLevel = -70e-3
pulsegen.firstDelay = delay1
pulsegen.firstWidth = width1
pulsegen.firstLevel = level1
pulsegen.secondDelay = 1e6
pulsegen.secondLevel = -70e-3
pulsegen.secondWidth = 0.0
vclamp_I = moose.Table("/elec/vClampITable" + name)
vclamp_I.stepMode = TAB_BUF # TAB_BUF: table acts as a buffer.
vclamp_I.connect("inputRequest", PID, "output")
vclamp_I.useClock(PLOTCLOCK)
return vclamp_I
def setup_iclamp(compartment, name, delay1, width1, level1):
"""
Sets up a current clamp with 'name' on MOOSE 'compartment' object:
Specify the 'delay1', 'width1' and 'level1' of the current pulse to be applied to the compartment.
Returns the MOOSE pulsegen that sends the current pulse.
"""
## If /elec doesn't exists it creates /elec and returns a reference to it.
## If it does, it just returns its reference.
moose.Neutral("/elec")
pulsegen = moose.PulseGen("/elec/pulsegen" + name)
iclamp = moose.DiffAmp("/elec/iclamp" + name)
iclamp.saturation = 1e6
iclamp.gain = 1.0
pulsegen.trigMode = 0 # free run
pulsegen.baseLevel = 0.0
pulsegen.firstDelay = delay1
pulsegen.firstWidth = width1
pulsegen.firstLevel = level1
pulsegen.secondDelay = 1e6 # to avoid repeat
pulsegen.secondLevel = 0.0
pulsegen.secondWidth = 0.0
pulsegen.connect("output", iclamp, "plusIn")
iclamp.connect("output", compartment, "injectMsg")
return pulsegen
def get_matching_children(parent, names):
""" Returns non-recursive children of 'parent' MOOSE object
with their names containing any of the strings in list 'names'. """
matchlist = []
for childID in parent.children:
child = moose.Neutral(childID)
for name in names:
if name in child.name:
matchlist.append(childID)
return matchlist
def underscorize(path):
""" Returns: / replaced by underscores in 'path'.
But async13 branch has indices in the path like [0],
so just replacing / by _ is not enough,
should replace [ and ] also by _ """
path = path.replace("/", "_")
# remove [0] from path.
path = path.replace('[0]', '')
# now remove [\d+] with -\d-'
return re.sub(r'\[(\d+)\]', '-\1-', path)
def blockChannels(cell, channel_list):
"""
Sets gmax to zero for channels of the 'cell' specified in 'channel_list'
Substring matches in channel_list are allowed
e.g. 'K' should block all K channels (ensure that you don't use capital K elsewhere in your channel name!)
"""
for compartmentid in cell.children: # compartments
comp = moose.Compartment(compartmentid)
for childid in comp.children:
child = moose.Neutral(childid)
if child.className in ["HHChannel", "HHChannel2D"]:
chan = moose.HHChannel(childid)
for channame in channel_list:
if channame in chan.name:
chan.Gbar = 0.0
def get_child_Mstring(mooseobject, mstring):
for child in mooseobject.children:
if child.className == "Mstring" and child.name == mstring:
child = moose.element(child)
return child
return None
def connect_CaConc(compartment_list, temperature=None):
""" Connect the Ca pools and channels within each of the compartments in compartment_list
Ca channels should have a child Mstring named 'ion' with value set in MOOSE.
Ca dependent channels like KCa should have a child Mstring called 'ionDependency' with value set in MOOSE.
Call this only after instantiating cell so that all channels and pools have been created. """
for compartment in compartment_list:
caconc = None
for child in compartment.children:
if child.className == "CaConc":
caconc = moose.element(child)
break
if caconc is not None:
child = get_child_Mstring(caconc, "phi")
if child is not None:
caconc.B = float(
child.value
) # B = phi by definition -- see neuroml 1.8.1 defn
else:
## B has to be set for caconc based on thickness of Ca shell and compartment l and dia,
## OR based on the Mstring phi under CaConc path.
## I am using a translation from Neuron for mitral cell, hence this method.
## In Genesis, gmax / (surfacearea*thick) is set as value of B!
caconc.B = (
1
/ (2 * FARADAY)
/ (
math.pi
* compartment.diameter
* compartment.length
* caconc.thick
)
)
for neutralwrap in compartment.children:
if neutralwrap.className == "HHChannel":
channel = moose.HHChannel(child)
## If child Mstring 'ion' is present and is Ca, connect channel current to caconc
for childid in channel.children:
# in async13, gates which have not been created still 'exist'
# i.e. show up as a child, but cannot be wrapped.
try:
child = moose.element(childid)
if child.className == "Mstring":
child = moose.Mstring(child)
assert child
if child.name == "ion":
if child.value in ["Ca", "ca"]:
moose.connect(
channel, "IkOut", caconc, "current"
)
# print 'Connected IkOut of',channel.path,'to current of',caconc.path
## temperature is used only by Nernst part here...
if child.name == "nernst_str":
nernst = moose.Nernst(channel.path + "/nernst")
nernst_params = child.value.split(",")
nernst.Cout = float(nernst_params[0])
nernst.valence = int(nernst_params[1])
nernst.Temperature = temperature
moose.connect(nernst, "Eout", channel, "setEk")
moose.connect(caconc, "concOut", nernst, "ci")
# print 'Connected Nernst',nernst.path
except TypeError:
pass
if neutralwrap.className == "HHChannel2D":
channel = moose.HHChannel2D(child)
## If child Mstring 'ionDependency' is present, connect caconc Ca conc to channel
for childid in channel.children:
# in async13, gates which have not been created still 'exist'
# i.e. show up as a child, but cannot be wrapped.
try:
child = moose.element(childid)
if (
child.className == "Mstring"
and child.name == "ionDependency"
):
child = moose.Mstring(child)
if child.value in ["Ca", "ca"]:
moose.connect(caconc, "concOut", channel, "concen")
# print 'Connected concOut of',caconc.path,'to concen of',channel.path
except TypeError as e:
logger_.warning(e)
|
dilawar/moose-core
|
python/moose/utils.py
|
Python
|
gpl-3.0
| 40,615
|
[
"MOOSE",
"NEURON"
] |
1123e3214367b379a319a927a95afd31a98e75d01051238554ecc880fdbedce6
|
#!/usr/bin/env python
from tempfile import TemporaryFile, SpooledTemporaryFile
import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform, signal
class Debugger:
"""
The Debugger class is the entry point to our stack tracing capabilities.
It determins which debugger to inherit based on parsed arguments and
platform specs.
"""
def __init__(self, arguments):
if arguments.debugger == 'lldb':
self.debugger = lldbAPI(arguments)
else:
self.debugger = DebugInterpreter(arguments)
def getProcess(self, pid):
return self.debugger.getProcess(pid)
def getStackTrace(self, getProcess_tuple):
return self.debugger.getStackTrace(getProcess_tuple)
class lldbAPI:
def __init__(self, arguments):
self.debugger = lldb.SBDebugger.Create()
self.debugger.SetAsync(True)
def __del__(self):
lldb.SBDebugger.Destroy(self.debugger)
def getProcess(self, pid):
# Create and attach to the pid and return our debugger as a tuple
target = self.debugger.CreateTargetWithFileAndArch(None, None)
return target, pid
def getStackTrace(self, process_tuple):
target, pid = process_tuple
lldb_results = []
# reuse the process object if available
if target.process.id is not 0:
process = target.Attach(lldb.SBAttachInfo(target.process.id), lldb.SBError())
else:
process = target.Attach(lldb.SBAttachInfo(int(pid)), lldb.SBError())
# test if we succeeded at attaching to PID process
if process:
# grab thread information
lldb_results.append(process.GetThreadAtIndex(0).__str__())
# iterate through all frames and collect back trace information
for i in xrange(process.GetThreadAtIndex(0).GetNumFrames()):
lldb_results.append(process.GetThreadAtIndex(0).GetFrameAtIndex(i).__str__())
# Unfortunately we must detach each time we perform a stack
# trace. This severely limits our sample rate. It _appears_ to
# to be a bug in LLDB's Python API. Otherwise we would be able to:
#
# process.Stop()
# ..collect back trace..
# process.Continue()
#
# instead we have to:
process.Detach()
return '\n'.join(lldb_results)
else:
return ''
class DebugInterpreter:
"""
Currently, interfacing with LLDB via subprocess is impossible. This is due to lldb not printing
to stdout, or stderr when displaying the prompt to the user (informing the user, the debugger
is ready to receive input). However, this class may someday be able to, which is why
the self.debugger variable is present.
"""
def __init__(self, arguments):
self.last_position = 0
self.debugger = arguments.debugger
def _parseStackTrace(self, gibberish):
not_gibberish = re.findall(r'\(' + self.debugger + '\) (#.*)\(' + self.debugger + '\)', gibberish, re.DOTALL)
if len(not_gibberish) != 0:
return not_gibberish[0]
else:
# Return a blank line, as to not pollute the log. Gibberish here
# usually indicates a bunch of warnings or information about
# loading symbols
return ''
def _waitForResponse(self, dbg_stdout):
# Allow a maximum of 5 seconds to obtain a debugger prompt position.
# Otherwise we can hang indefinitely
end_queue = time.time() + float(5)
while time.time() < end_queue:
dbg_stdout.seek(self.last_position)
for line in dbg_stdout:
if line == '(' + self.debugger + ') ':
self.last_position = dbg_stdout.tell()
return True
time.sleep(0.01)
return False
def getProcess(self, pid):
# Create a temporary file the debugger can write stdout/err to
dbg_stdout = SpooledTemporaryFile()
# Create and attach to running proccess
process = subprocess.Popen([which(self.debugger)], stdin=subprocess.PIPE, stdout=dbg_stdout, stderr=dbg_stdout)
for command in [ 'attach ' + pid + '\n' ]:
if self._waitForResponse(dbg_stdout):
try:
process.stdin.write(command)
except:
return (False, self.debugger, 'quit unexpectedly')
else:
return (False, 'could not attach to process in allotted time')
return (process, dbg_stdout)
def getStackTrace(self, process_tuple):
process, dbg_stdout = process_tuple
# Store our current file position so we can return to it and read
# the eventual entire stack trace output
batch_position = dbg_stdout.tell()
# Loop through commands necessary to create a back trace
for command in ['ctrl-c', 'bt\n', 'c\n']:
if command == 'ctrl-c':
process.send_signal(signal.SIGINT)
else:
if self._waitForResponse(dbg_stdout):
process.stdin.write(command)
else:
dbg_stdout.seek(batch_position)
return self.detachProcess(process_tuple)
# Return to previous file position so that we can return the entire
# stack trace
dbg_stdout.seek(batch_position)
return self._parseStackTrace(dbg_stdout.read())
def detachProcess(self, process):
process, dbg_stdout = process
# Offset the position due to ctrl-c not generating a newline event
tmp_position = (dbg_stdout.tell() - 1)
for command in ['ctrl-c', 'quit\n', 'y\n']:
if command == 'ctrl-c':
process.send_signal(signal.SIGINT)
else:
# When these two variables are not equal, its a safe assumption the
# debugger is ready to receive input
if tmp_position != dbg_stdout.tell():
tmp_position = dbg_stdout.tell()
try:
process.stdin.write(command)
except:
# Because we are trying to detach and quit the debugger just pass
pass
# Always return True for a detach call. What would we do if it failed anyway?
# Why am I even leaving a comment about this?
return True
class Server:
def __init__(self, arguments):
self.arguments = arguments
self.arguments.cwd = os.getcwd()
# Test to see if we are starting as a server
if self.arguments.pbs == True:
if os.getenv('PBS_NODEFILE') != None:
# Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server'
self.agent = Agent(self.arguments, 'server')
if self.arguments.recover:
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
self.logfile = WriteCSV(self.arguments.outfile[0], True)
self.client_connections = []
self.startServer()
else:
print 'I could not find your PBS_NODEFILE. Is PBS loaded?'
sys.exit(1)
# If we are not a server, start the single client
else:
self.startClient()
def startServer(self):
# Setup the TCP socket
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((socket.gethostname(), 0))
self.server_socket.listen(5)
(self.host, self.port) = self.server_socket.getsockname()
# We will store all connections (sockets objects) made to the server in a list
self.client_connections.append(self.server_socket)
# Launch the actual binary we want to track
self._launchJob()
# Now launch all pbs agents
self._launchClients()
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to listen and accept active connections from agents
# until all agents report a STOP command.
AGENTS_ACTIVE = True
while AGENTS_ACTIVE:
read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[])
for sock in read_sockets:
if sock == self.server_socket:
# Accept an incomming connection
self.client_connections.append(self.server_socket.accept()[0])
else:
# Deal with the data being sent to the server by its agents
self.handleAgent()
# Check to see if _all_ agents are telling the server to stop
agent_count = len(self.agent.agent_data.keys())
current_count = 0
for agent in self.agent.agent_data.keys():
if self.agent.agent_data[agent]['STOP']:
current_count += 1
# if All Agents have reported a STOP command, begin to exit
if current_count == agent_count:
AGENTS_ACTIVE = False
# Gotta get out of the for loop somehow...
break
# Sleep a bit before reading additional data
time.sleep(self.arguments.repeat_rate[-1])
# Close the server socket
self.server_socket.close()
# Close the logfile as the server is about to exit
self.logfile.close()
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Normal exiting procedures
print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0]
sys.exit(0)
def startClient(self):
Client(self.arguments)
def _launchClients(self):
# Read the environment PBS_NODEFILE
self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r')
nodes = set(self._PBS_NODEFILE.read().split())
# Print some useful information about our setup
print 'Memory Logger running on Host:', self.host, 'Port:', self.port, \
'\nNodes:', ', '.join(nodes), \
'\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)', \
'\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n'
# Build our command list based on the PBS_NODEFILE
command = []
for node in nodes:
command.append([ 'ssh', node,
'bash --login -c "source /etc/profile && ' \
+ 'sleep ' + str(self.arguments.pbs_delay[-1]) + ' && ' \
+ os.path.abspath(__file__) \
+ ' --call-back-host ' \
+ self.host + ' ' + str(self.port) \
+ '"'])
# remote into each node and execute another copy of memory_logger.py
# with a call back argument to recieve further instructions
for pbs_node in command:
subprocess.Popen(pbs_node, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the binary we intend to track
def _launchJob(self):
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.agent.log, stderr=self.agent.log)
# A connection has been made from client to server
# Capture that data, and determin what to do with it
def handleAgent(self):
# Loop through all client connections, and receive data if any
for agent_socket in self.client_connections:
# Completely ignore the server_socket object
if agent_socket == self.server_socket:
continue
# Assign an AgentConnector for the task of handling data between client and server
reporting_agent = AgentConnector(self.arguments, agent_socket)
# OK... get data from a client and begin
new_data = reporting_agent.readData()
if new_data != None:
# There should be only one dictionary key (were reading data from just one client at a time)
agent_uuid = new_data.keys()[0]
# Update our dictionary of an agents data
self.agent.agent_data[agent_uuid] = new_data[agent_uuid]
# Modify incoming Agents timestamp to match Server's time (because every node is a little bit off)
if self.arguments.recover:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now - self.agent.delta
else:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now
# update total usage for all known reporting agents
total_usage = 0
for one_agent in self.agent.agent_data.keys():
total_usage += self.agent.agent_data[one_agent]['MEMORY']
self.agent.agent_data[agent_uuid]['TOTAL'] = int(total_usage)
# Get any stdout thats happened thus far and apply it to what ever agent just sent us data
self.agent.agent_data[agent_uuid]['STDOUT'] = self.agent._getStdout()
# Write to our logfile
self.logfile.write(self.agent.agent_data[agent_uuid])
# Check for any agents sending a stop command. If we find one,
# set some zeroing values, and close that agent's socket.
if self.agent.agent_data[agent_uuid]['STOP']:
self.agent.agent_data[agent_uuid]['MEMORY'] = 0
agent_socket.close()
if agent_socket != self.server_socket:
self.client_connections.remove(agent_socket)
# Go ahead and set our server agent to STOP as well.
# The server will continue recording samples from agents
self.agent.agent_data['server']['STOP'] = True
# If an Agent has made a request for instructions, handle it here
update_client = False
if new_data[agent_uuid]['REQUEST'] != None:
for request in new_data[agent_uuid]['REQUEST'].iteritems():
if new_data[agent_uuid]['REQUEST'][request[0]] == '':
update_client = True
# We only support sending any arguments supplied to ther server, back to the agent
for request_type in dir(self.arguments):
if request[0] == str(request_type):
self.agent.agent_data[agent_uuid]['REQUEST'][request[0]] = getattr(self.arguments, request[0])
# If an Agent needed additional instructions, go ahead and re-send those instructions
if update_client:
reporting_agent.sendData(self.agent.agent_data[agent_uuid])
class Client:
def __init__(self, arguments):
self.arguments = arguments
# Initialize an Agent with a UUID based on our hostname
self.my_agent = Agent(arguments, str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname())))
# Initialize an AgentConnector
self.remote_server = AgentConnector(self.arguments)
# If client will talk to a server (PBS)
if self.arguments.call_back_host:
# We know by initializing an agent, agent_data contains the necessary message asking for further instructions
self.my_agent.agent_data[self.my_agent.my_uuid] = self.remote_server.sendData(self.my_agent.agent_data)
# Apply new instructions received from server (this basically updates our arguments)
for request in self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'].iteritems():
for request_type in dir(self.arguments):
if request[0] == str(request_type):
setattr(self.arguments, request[0], request[1])
# Requests have been satisfied, set to None
self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'] = None
# Change to the same directory as the server was when initiated (needed for PBS stuff)
os.chdir(self.arguments.cwd)
# Client will not be talking to a server, save data to a file instead
else:
# Deal with --recover
if self.arguments.recover:
# Do not overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
# Overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], True)
# Lets begin!
self.startProcess()
# This function handles the starting and stoping of the sampler process.
# We loop until an agent returns a stop command.
def startProcess(self):
AGENTS_ACTIVE = True
# If we know we are the only client, go ahead and start the process we want to track.
if self.arguments.call_back_host == None:
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.my_agent.log, stderr=self.my_agent.log)
# Delay just a bit to keep from recording a possible zero memory usage as the binary starts up
time.sleep(self.arguments.sample_delay[0])
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to process data until an Agent reports a STOP command
while AGENTS_ACTIVE:
# Take a sample
current_data = self.my_agent.takeSample()
# Handle the data supplied by the Agent.
self._handleData(current_data)
# If an Agent reported a STOP command, go ahead and begin the shutdown phase
if current_data[current_data.keys()[0]]['STOP']:
AGENTS_ACTIVE = False
# Sleep just a bit between samples, as to not saturate the machine
time.sleep(self.arguments.repeat_rate[-1])
# An agent reported a stop command... so let everyone know where the log was saved, and exit!
if self.arguments.call_back_host == None:
print 'Binary has exited and a log file has been written. You can now attempt to view this file by running' \
'\nthe memory_logger with either the --plot or --read arguments:\n\n', sys.argv[0], '--plot', self.arguments.outfile[0], \
'\n\nSee --help for additional viewing options.'
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
self.logfile.close()
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Everything went smooth.
sys.exit(0)
# Figure out what to do with the sampled data
def _handleData(self, data):
# Sending the sampled data to a server
if self.arguments.call_back_host:
self.remote_server.sendData(data)
# Saving the sampled data to a file
else:
# Compute the TOTAL memory usage to be how much our one agent reported
# Because were the only client doing any work
data[self.my_agent.my_uuid]['TOTAL'] = data[self.my_agent.my_uuid]['MEMORY']
self.logfile.write(data[self.my_agent.my_uuid])
# If the agent has been told to stop, close the database file
if self.my_agent.agent_data[self.my_agent.my_uuid]['STOP'] == True:
self.logfile.close()
class AgentConnector:
"""
Functions used to communicate to and from Client and Server.
Both Client and Server classes use this object.
readData()
sendData('message', socket_connection=None)
if sendData's socket_connection is None, it will create a new connection to the server
based on supplied arguments
"""
def __init__(self, arguments, connection=None):
self.arguments = arguments
self.connection = connection
self.CREATED_CONNECTION = False
# If the connection is None, meaning this object was instanced by a client,
# we must create a connection to the server first
if self.connection == None and self.arguments.call_back_host != None:
self.CREATED_CONNECTION = True
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.arguments.call_back_host[0], int(self.arguments.call_back_host[1])))
# read all data sent by an agent
def readData(self):
# Get how much data there is to receive
# The first eight bytes is our data length
data_width = int(self.connection.recv(8))
tmp_received = ''
# We need to receive precisely the ammount of data the
# client is trying to send us.
while len(tmp_received) < data_width:
if data_width - len(tmp_received) > 1024:
tmp_received += self.connection.recv(1024)
else:
tmp_received += self.connection.recv(data_width - (len(tmp_received)))
# unpickle the received message
return self._unpickleMessage(tmp_received)
# send data to an agent
def sendData(self, message):
# pickle the data up, and send the message
self.connection.sendall(self._pickleMessage(message))
# If we had to create the socket (connection was none), and this client/agent is requesting
# instructions, go ahead and read the data that _better be there_ sent to us by the server.
if self.CREATED_CONNECTION and message[message.keys()[0]]['REQUEST'] != None:
return self.readData()
# The following two functions pickle up the data for easy socket transport
def _pickleMessage(self, message):
t = TemporaryFile()
pickle.dump(message, t)
t.seek(0)
str_msg = t.read()
str_len = len(str_msg)
message = "%-8d" % (str_len,) + str_msg
return message
def _unpickleMessage(self, message):
t = TemporaryFile()
t.write(message)
t.seek(0)
try:
return pickle.load(t)
except KeyError:
print 'Socket data was not pickled data: ', message
except:
raise
class WriteCSV:
def __init__(self, logfile, overwrite):
if overwrite:
self.file_object = open(logfile, 'w', 1)
else:
self.file_object = open(logfile, 'a', 1)
csv.field_size_limit(sys.maxsize)
self.log_file = csv.writer(self.file_object, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Close the logfile
def close(self):
self.file_object.close()
# Write a CSV row
def write(self, data):
formatted_string = self._formatString(data)
self.log_file.writerow(formatted_string)
# Format the CSV output
def _formatString(self, data):
# We will be saving this data in CSV format. Before we do, lets format it a bit here
format_order = ['TIMESTAMP', 'TOTAL', 'STDOUT', 'STACK', 'HOSTNAME', 'MEMORY']
formatted_text = []
for item in format_order:
# We have to handle python's way of formatting floats to strings specially
if item == 'TIMESTAMP':
formatted_text.append('%.6f' % data[item])
else:
formatted_text.append(data[item])
return formatted_text
class Agent:
"""
Each agent object contains its own sampled log data. The Agent class is responsible for
collecting and storing data. machine_id is used to identify the agent.
machine_id is supplied by the client class. This allows for multiple agents if desired
"""
def __init__(self, arguments, machine_id):
self.arguments = arguments
self.my_uuid = machine_id
self.track_process = ''
self.process = None
# This log object is for stdout purposes
self.log = TemporaryFile()
self.log_position = 0
# Discover if --recover is being used. If so, we need to obtain the
# timestamp of the last entry in the outfile log... a little bulky
# to do... and not a very good place to do it.
if self.arguments.recover:
if os.path.exists(self.arguments.outfile[-1]):
memory_list = []
history_file = open(self.arguments.outfile[-1], 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Get last item in list. Unfortunately, no way to do this until
# we have read the entire file...? Lucky for us, most memory log
# files are in the single digit megabytes
for row in reader:
memory_list.append(row)
history_file.close()
last_entry = float(memory_list[-1][0]) + self.arguments.repeat_rate[-1]
self.delta = (GetTime().now - last_entry)
else:
print 'Recovery options detected, but I could not find your previous memory log file.'
sys.exit(1)
else:
self.delta = 0
# Create the dictionary to which all sampled data will be stored
# NOTE: REQUEST dictionary items are instructions (arguments) we will
# ask the server to provide (if we are running with --pbs)
# Simply add them here. We _can not_ make the arguments match the
# server exactly, this would cause every agent launched to perform
# like a server... bad stuff
# Example: We added repeat_rate (see dictionary below). Now every
# agent would update their repeat_rate according to what the user
# supplied as an argument (--repeat_rate 0.02)
self.agent_data = { self.my_uuid :
{ 'HOSTNAME' : socket.gethostname(),
'STDOUT' : '',
'STACK' : '',
'MEMORY' : 0,
'TIMESTAMP' : GetTime().now - self.delta,
'REQUEST' : { 'run' : '',
'pstack' : '',
'repeat_rate' : '',
'cwd' : '',
'debugger' : ''},
'STOP' : False,
'TOTAL' : 0,
'DEBUG_LOG' : ''
}
}
# we need to create a place holder for our debugger because when
# memory_logger is run via --pbs, this Agent will not know what
# kind of debugger to use until it has made contact with the server
self.stack_trace = None
# NOTE: This is the only function that should be called in this class
def takeSample(self):
if self.arguments.pstack:
if self.stack_trace is None:
self.stack_trace = Debugger(self.arguments)
self.agent_data[self.my_uuid]['STACK'] = self._getStack()
# Always do the following
self.agent_data[self.my_uuid]['MEMORY'] = self._getMemory()
self.agent_data[self.my_uuid]['STDOUT'] = self._getStdout()
if self.arguments.recover:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now - self.delta
else:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now
# Return the data to whom ever asked for it
return self.agent_data
def _getStdout(self):
self.log.seek(self.log_position)
output = self.log.read()
self.log_position = self.log.tell()
sys.stdout.write(output)
return output
def _getMemory(self):
tmp_pids = self._getPIDs()
memory_usage = 0
if tmp_pids != {}:
for single_pid in tmp_pids.iteritems():
memory_usage += int(single_pid[1][0])
if memory_usage == 0:
# Memory usage hit zero? Then assume the binary being tracked has exited. So lets begin doing the same.
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found the total memory usage of all my processes hit 0. Stopping'
self.agent_data[self.my_uuid]['STOP'] = True
return 0
return int(memory_usage)
# No binay even detected? Lets assume it exited, so we should begin doing the same.
self.agent_data[self.my_uuid]['STOP'] = True
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found no processes running. Stopping'
return 0
def _getStack(self):
# Create a process object if none already exists. Reuse the old one if it does.
if self.process is None:
tmp_pids = self._getPIDs()
# Check if we actually found any running processes
if tmp_pids != {}:
# Obtain a single process id, any process id will do. This will be the process we attach to and perform stack traces
one_pid = tmp_pids.keys()[0]
self.process = self.stack_trace.getProcess(str(one_pid))
return self.stack_trace.getStackTrace(self.process)
else:
return ''
else:
return self.stack_trace.getStackTrace(self.process)
def _getPIDs(self):
pid_list = {}
# Determin the binary to sample and store it. Doing the findCommand is a little expensive.
if self.track_process == '':
self.track_process = self._findCommand(''.join(self.arguments.run))
# If we are tracking a binary
if self.arguments.run:
command = [which('ps'), '-e', '-o', 'pid,rss,user,args']
tmp_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
all_pids = tmp_proc.communicate()[0].split('\n')
# Figure out what we are allowed to track (strip away mpiexec, processes not owned by us, etc)
for single_pid in all_pids:
if single_pid.find(self.track_process) != -1 and \
single_pid.find(__file__) == -1 and \
single_pid.find('mpirun') == -1 and \
single_pid.find(os.getenv('USER')) != -1 and \
single_pid.find('mpiexec') == -1:
pid_list[int(single_pid.split()[0])] = []
pid_list[int(single_pid.split()[0])].extend([single_pid.split()[1], single_pid.split()[3]])
return pid_list
# Determine the command we are going to track
# A few things are happening here; first we strip off any MPI commands
# we then loop through the remaining items until we find a matching path
# exp: mpiexec -n 12 ../../../moose_test-opt -i simple_diffusion.i -r 6
# would first strip off mpiexec, check for the presence of -n in our
# current directory, then 12, then ../../../moose_test-opt <- found. It would
# stop and return the base name (moose_test-opt).
def _findCommand(self, command):
if command.find('mpiexec') == 0 or command.find('mpirun') == 0:
for binary in command.split():
if os.path.exists(binary):
return os.path.split(binary)[1]
elif os.path.exists(command.split()[0]):
return os.path.split(command.split()[0])[1]
class GetTime:
"""A simple formatted time object.
"""
def __init__(self, posix_time=None):
import datetime
if posix_time == None:
self.posix_time = datetime.datetime.now()
else:
self.posix_time = datetime.datetime.fromtimestamp(posix_time)
self.now = float(datetime.datetime.now().strftime('%s.%f'))
self.microsecond = self.posix_time.microsecond
self.second = self.posix_time.second
self.minute = self.posix_time.strftime('%M')
self.hour = self.posix_time.strftime('%H')
self.day = self.posix_time.strftime('%d')
self.month = self.posix_time.strftime('%m')
self.year = self.posix_time.year
self.dayname = self.posix_time.strftime('%a')
self.monthname = self.posix_time.strftime('%b')
class MemoryPlotter:
def __init__(self, arguments):
self.arguments = arguments
self.buildGraph()
def buildPlots(self):
plot_dictionary = {}
for log in self.arguments.plot:
memory_list = []
if os.path.exists(log):
log_file = open(log, 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(log_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
for row in reader:
memory_list.append(row)
log_file.close()
plot_dictionary[log.split('/')[-1:][0]] = memory_list
else:
print 'log not found:', log
sys.exit(1)
return plot_dictionary
def buildGraph(self):
try:
import matplotlib.pyplot as plt
except ImportError:
print 'Error importing matplotlib. Matplotlib not available on this system?'
sys.exit(1)
plot_dictionary = self.buildPlots()
fig = plt.figure()
plot_list = []
tmp_plot = []
tmp_legend = []
self.stdout_msgs = {}
self.pstack_msgs = {}
self.multiples = 1
self.memory_label = 'Memory in Bytes'
# Try and calculate memory sizes, so we can move annotations around a bit more accurately
largest_memory = []
for plot_name, value_list in plot_dictionary.iteritems():
for records in value_list:
largest_memory.append(int(records[1]))
largest_memory.sort()
# Determine the scale of the graph
suffixes = ["Terabytes", "Gigabytes", "Megabytes", "Kilobytes", "Bytes"]
multiplier = 1 << 40;
index = 0
while largest_memory[-1] < multiplier and multiplier >= 1:
multiplier = multiplier >> 10
index = index + 1
self.multiples = multiplier
self.memory_label = "Memory in " + suffixes[index-1]
# Loop through each log file
for plot_name, value_list in plot_dictionary.iteritems():
plot_list.append(fig.add_subplot(111))
tmp_memory = []
tmp_time = []
tmp_stdout_x = []
tmp_stdout_y = []
tmp_pstack_x = []
tmp_pstack_y = []
stdout_msg = []
pstack_msg = []
# Get the start time, and make this 0
try:
tmp_zero = decimal.Decimal(value_list[0][0])
except:
print 'Could not parse log file:', plot_name, 'is this a valid memory_logger file?'
sys.exit(1)
# Populate the graph
for records in value_list:
tmp_memory.append(decimal.Decimal(records[1]) / self.multiples)
tmp_time.append(str(decimal.Decimal(records[0]) - tmp_zero))
if len(records[2]) > 0 and self.arguments.stdout:
tmp_stdout_x.append(tmp_time[-1])
tmp_stdout_y.append(tmp_memory[-1])
stdout_msg.append(records[2])
if len(records[3]) > 0 and self.arguments.pstack:
tmp_pstack_x.append(tmp_time[-1])
tmp_pstack_y.append(tmp_memory[-1])
pstack_msg.append(records[3])
# Do the actual plotting:
f, = plot_list[-1].plot(tmp_time, tmp_memory)
tmp_plot.append(f)
tmp_legend.append(plot_name)
plot_list[-1].grid(True)
plot_list[-1].set_ylabel(self.memory_label)
plot_list[-1].set_xlabel('Time in Seconds')
# Enable dork mode
if self.arguments.darkmode:
fig.set_facecolor('0.1')
plot_list[-1].set_axis_bgcolor('0.1')
plot_list[-1].spines['bottom'].set_color('white')
plot_list[-1].spines['top'].set_color('white')
plot_list[-1].spines['right'].set_color('white')
plot_list[-1].spines['left'].set_color('white')
plot_list[-1].tick_params(axis='x', colors='white')
plot_list[-1].tick_params(axis='y', colors='white')
plot_list[-1].xaxis.label.set_color('white')
plot_list[-1].yaxis.label.set_color('white')
plot_list[-1].grid(color='0.6')
# Plot annotations
if self.arguments.stdout:
stdout_line, = plot_list[-1].plot(tmp_stdout_x, tmp_stdout_y, 'x', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1)
next_index = str(len(plot_list))
stdout_line.set_gid('stdout' + next_index)
self.stdout_msgs[next_index] = stdout_msg
self.buildAnnotation(plot_list[-1], tmp_stdout_x, tmp_stdout_y, stdout_msg, f.get_color())
if self.arguments.pstack:
pstack_line, = plot_list[-1].plot(tmp_pstack_x, tmp_pstack_y, 'o', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1)
next_index = str(len(plot_list))
pstack_line.set_gid('pstack' + next_index)
self.pstack_msgs[next_index] = pstack_msg
# Make points clickable
fig.canvas.mpl_connect('pick_event', self)
# Create legend
legend = plt.legend(tmp_plot, tmp_legend, loc = 2)
legend.get_frame().set_alpha(0.7)
# More dork mode settings
if self.arguments.darkmode:
legend.get_frame().set_facecolor('0.2')
for text in legend.get_texts():
text.set_color('0.8')
plt.show()
def __call__(self, event):
color_codes = {'RESET':'\033[0m', 'r':'\033[31m','g':'\033[32m','c':'\033[36m','y':'\033[33m', 'b':'\033[34m', 'm':'\033[35m', 'k':'\033[0m', 'w':'\033[0m' }
line = event.artist
ind = event.ind
name = line.get_gid()[:-1]
index = line.get_gid()[-1]
if self.arguments.stdout and name == 'stdout':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "stdout -----------------------------------------------------\n"
for id in ind:
print self.stdout_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
if self.arguments.pstack and name == 'pstack':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "pstack -----------------------------------------------------\n"
for id in ind:
print self.pstack_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
def buildAnnotation(self,fig,x,y,msg,c):
for i in range(len(x)):
fig.annotate(str(msg[i].split('\n')[0][:self.arguments.trim_text[-1]]),
xy=(x[i], y[i]),
rotation=self.arguments.rotate_text[-1],
xytext=(decimal.Decimal(x[i]) + decimal.Decimal(self.arguments.move_text[0]), decimal.Decimal(y[i]) + decimal.Decimal(self.arguments.move_text[1])),
color=c, horizontalalignment='center', verticalalignment='bottom',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.5",
color=c
)
)
class ReadLog:
"""Read a memory_logger log file, and display the results to stdout in an easy to read form.
"""
def __init__(self, arguments):
self.arguments = arguments
history_file = open(self.arguments.read[-1], 'r')
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
self.memory_list = []
for row in reader:
self.memory_list.append(row)
history_file.close()
self.sorted_list = []
self.mem_list = []
self.use_nodes = False
self.printHistory()
def printHistory(self):
RESET = '\033[0m'
BOLD = '\033[1m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
CYAN = '\033[36m'
YELLOW = '\033[33m'
last_memory = 0.0
(terminal_width, terminal_height) = self.getTerminalSize()
for timestamp in self.memory_list:
to = GetTime(float(timestamp[0]))
total_memory = int(timestamp[1])
log = timestamp[2].split('\n')
pstack = timestamp[3].split('\n')
node_name = str(timestamp[4])
node_memory = int(timestamp[5])
self.mem_list.append(total_memory)
self.sorted_list.append([str(to.day) + ' ' + str(to.monthname) + ' ' + str(to.hour) + ':' + str(to.minute) + ':' + '{:02.0f}'.format(to.second) + '.' + '{:06.0f}'.format(to.microsecond), total_memory, log, pstack, node_name, node_memory])
largest_memory = decimal.Decimal(max(self.mem_list))
if len(set([x[4] for x in self.sorted_list])) > 1:
self.use_nodes = True
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
for item in self.sorted_list:
tmp_str = ''
if decimal.Decimal(item[1]) == largest_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RESET, terminal_width)
elif item[1] > last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RED, terminal_width)
elif item[1] == last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], CYAN, terminal_width)
else:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], GREEN, terminal_width)
last_memory = item[1]
sys.stdout.write(tmp_str)
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
def formatText(self, largest_memory, date, total_memory, node_memory, log, pstack, reporting_host, color_code, terminal_width):
RESET = '\033[0m'
if decimal.Decimal(total_memory) == largest_memory:
percent = '100'
elif (decimal.Decimal(total_memory) / largest_memory) == 0:
percent = '0'
else:
percent = str(decimal.Decimal(total_memory) / largest_memory)[2:4] + '.' + str(decimal.Decimal(total_memory) / largest_memory)[4:6]
header = len(date) + 18
footer = len(percent) + 6
additional_correction = 0
max_length = decimal.Decimal(terminal_width - header) / largest_memory
total_position = total_memory * decimal.Decimal(max_length)
node_position = node_memory * decimal.Decimal(max_length)
tmp_log = ''
if self.arguments.stdout:
for single_log in log:
if single_log != '':
tmp_log += ' '*(header - len(' stdout |')) + ' stdout | ' + single_log + '\n'
if self.arguments.pstack:
for single_pstack in pstack:
if single_pstack != '':
tmp_log += ' '*(header - len(' pstack |')) + ' pstack | ' + single_pstack + '\n'
if self.arguments.separate and self.use_nodes != False:
message = '< ' + RESET + reporting_host + ' - ' + '{:10,.0f}'.format(node_memory) + ' K' + color_code + ' >'
additional_correction = len(RESET) + len(color_code)
elif self.use_nodes:
message = '< >'
else:
node_position = 0
message = ''
return date + '{:15,.0f}'.format(total_memory) + ' K | ' + color_code + '-'*int(node_position) + message + '-'*(int(total_position) - (int(node_position) + ((len(message) - additional_correction) + footer))) + RESET + '| ' + percent + '%\n' + tmp_log
def getTerminalSize(self):
"""Quicky to get terminal window size"""
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
# A simple which function to return path to program
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
print 'I could not find the following binary:', program
sys.exit(1)
def verifyArgs(args):
option_count = 0
if args.read:
option_count += 1
if args.run:
option_count += 1
if args.plot:
option_count += 1
if option_count != 1 and args.pbs != True:
if args.call_back_host == None:
print 'You must use one of the following: run, read, or plot'
sys.exit(1)
args.cwd = os.getcwd()
# Work with --recover (a MOOSE application specific option)
args.recover = False
if args.run:
if args.run[0].find('--recover') != -1:
args.recover = True
if args.outfile == None and args.run:
# Attempt to build the output file based on input file
if re.findall(r'-i (\w+)', args.run[0]) != []:
args.outfile = [os.getcwd() + '/' + re.findall(r'-i (\w+)', args.run[0])[0] + '_memory.log']
else:
args.outfile = [os.getcwd() + '/' + args.run[0].replace('..', '').replace('/', '').replace(' ', '_') + '.log']
if args.pstack and (args.read is None and args.plot is None):
if args.debugger is not None:
if args.debugger == 'lldb':
if platform.platform().find('Darwin') != -1:
try:
import lldb
except ImportError:
lldbImportError()
sys.exit(1)
else:
results = which('lldb')
elif args.debugger == 'gdb':
results = which('gdb')
else:
print 'Invalid debugger selected. You must choose between gdb and lldb using the --debugger argument'
sys.exit(1)
return args
def parseArguments(args=None):
parser = argparse.ArgumentParser(description='Track and Display memory usage')
rungroup = parser.add_argument_group('Tracking', 'The following options control how the memory logger tracks memory usage')
rungroup.add_argument('--run', nargs=1, metavar='command', help='Run specified command. You must encapsulate the command in quotes\n ')
rungroup.add_argument('--pbs', dest='pbs', metavar='', action='store_const', const=True, default=False, help='Instruct memory logger to tally all launches on all nodes\n ')
rungroup.add_argument('--pbs-delay', dest='pbs_delay', metavar='float', nargs=1, type=float, default=[1.0], help='For larger jobs, you may need to increase the delay as to when the memory_logger will launch the tracking agents\n ')
rungroup.add_argument('--sample-delay', dest='sample_delay', metavar='float', nargs=1, type=float, default=[0.25], help='The time to delay before taking the first sample (when not using pbs)')
rungroup.add_argument('--repeat-rate', nargs=1, metavar='float', type=float, default=[0.25], help='Indicate the sleep delay in float seconds to check memory usage (default 0.25 seconds)\n ')
rungroup.add_argument('--outfile', nargs=1, metavar='file', help='Save log to specified file. (Defaults based on run command)\n ')
readgroup = parser.add_argument_group('Read / Display', 'Options to manipulate or read log files created by the memory_logger')
readgroup.add_argument('--read', nargs=1, metavar='file', help='Read a specified memory log file to stdout\n ')
readgroup.add_argument('--separate', dest='separate', action='store_const', const=True, default=False, help='Display individual node memory usage (read mode only)\n ')
readgroup.add_argument('--plot', nargs="+", metavar='file', help='Display a graphical representation of memory usage (Requires Matplotlib). Specify a single file or a list of files to plot\n ')
commongroup = parser.add_argument_group('Common Options', 'The following options can be used when displaying the results')
commongroup.add_argument('--pstack', dest='pstack', action='store_const', const=True, default=False, help='Display/Record stack trace information (if available)\n ')
commongroup.add_argument('--stdout', dest='stdout', action='store_const', const=True, default=False, help='Display stdout information\n ')
commongroup.add_argument('--debugger', dest='debugger', metavar='gdb | lldb', nargs='?', help='Specify the debugger to use. Possible values: gdb or lldb\n ')
plotgroup = parser.add_argument_group('Plot Options', 'Additional options when using --plot')
plotgroup.add_argument('--rotate-text', nargs=1, metavar='int', type=int, default=[30], help='Rotate stdout/pstack text by this ammount (default 30)\n ')
plotgroup.add_argument('--move-text', nargs=2, metavar='int', default=['0', '0'], help='Move text X and Y by this ammount (default 0 0)\n ')
plotgroup.add_argument('--trim-text', nargs=1, metavar='int', type=int, default=[15], help='Display this many characters in stdout/pstack (default 15)\n ')
plotgroup.add_argument('--no-color', dest='no_color', metavar='', action='store_const', const=False, help='When printing output to stdout do not use color codes\n ')
plotgroup.add_argument('--darkmode', dest='darkmode', metavar='', action='store_const', const=True, help='When you want to be cool\n ')
internalgroup = parser.add_argument_group('Internal PBS Options', 'The following options are used to control how memory_logger as a tracking agent connects back to the caller. These are set automatically when using PBS and can be ignored.')
internalgroup.add_argument('--call-back-host', nargs=2, help='Server hostname and port that launched memory_logger\n ')
return verifyArgs(parser.parse_args(args))
def lldbImportError():
print """
Unable to import lldb
The Python lldb API is now supplied by Xcode but not
automatically set in your PYTHONPATH. Please search
the internet for how to do this if you wish to use
--pstack on Mac OS X.
Note: If you installed Xcode to the default location of
/Applications, you should only have to perform the following:
export PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python:$PYTHONPATH
###!! IMPORTANT !!###
It may also be necessary to unload the miniconda module.
If you receive a fatal Python error about PyThreadState
try using your system's version of Python instead.
"""
if __name__ == '__main__':
args = parseArguments()
if args.read:
ReadLog(args)
sys.exit(0)
if args.plot:
MemoryPlotter(args)
sys.exit(0)
Server(args)
|
giopastor/moose
|
scripts/memory_logger.py
|
Python
|
lgpl-2.1
| 47,922
|
[
"MOOSE"
] |
37a038c5a3e3046adb40f1c3bbe871cbed5e4b5992b7d96ca38074cbf92b0575
|
# Generated from STIXPattern.g4 by ANTLR 4.8
from antlr4 import *
# This class defines a complete generic visitor for a parse tree produced by STIXPatternParser.
class STIXPatternVisitor(ParseTreeVisitor):
# Visit a parse tree produced by STIXPatternParser#pattern.
def visitPattern(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressions.
def visitObservationExpressions(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionOr.
def visitObservationExpressionOr(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionAnd.
def visitObservationExpressionAnd(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionRepeated.
def visitObservationExpressionRepeated(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionSimple.
def visitObservationExpressionSimple(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionCompound.
def visitObservationExpressionCompound(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionWithin.
def visitObservationExpressionWithin(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionStartStop.
def visitObservationExpressionStartStop(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpression.
def visitComparisonExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionAnd.
def visitComparisonExpressionAnd(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestEqual.
def visitPropTestEqual(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestOrder.
def visitPropTestOrder(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestSet.
def visitPropTestSet(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestLike.
def visitPropTestLike(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestRegex.
def visitPropTestRegex(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSubset.
def visitPropTestIsSubset(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSuperset.
def visitPropTestIsSuperset(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestParen.
def visitPropTestParen(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#startStopQualifier.
def visitStartStopQualifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#withinQualifier.
def visitWithinQualifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#repeatedQualifier.
def visitRepeatedQualifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectPath.
def visitObjectPath(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectType.
def visitObjectType(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#firstPathComponent.
def visitFirstPathComponent(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#indexPathStep.
def visitIndexPathStep(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#pathStep.
def visitPathStep(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#keyPathStep.
def visitKeyPathStep(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#setLiteral.
def visitSetLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#primitiveLiteral.
def visitPrimitiveLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#orderableLiteral.
def visitOrderableLiteral(self, ctx):
return self.visitChildren(ctx)
|
chisholm/cti-pattern-validator
|
stix2patterns/v20/grammars/STIXPatternVisitor.py
|
Python
|
bsd-3-clause
| 5,117
|
[
"VisIt"
] |
04164adf4bea47729c39af9179772a3f32d8f40cc27ff7163f50fbb8b0e17cdf
|
"""pybutton.py - a button for displaying Python code.
This module defines two classes, together they implement a button that
a module can use to display Python.
PyButton
--------
PyButton is a gkt.Button with the label 'Python'. When pressed, it
opens a PyWindow displaying some Python code, or an error message if
no Python code is ready.
The script is stored in the attribute .python, it is the
responsability of the owning object to keep this attribute up to date:
when pressing the Apply button would result in a sensible
configuration being created, the python attribute must be set to a
string creating this code. When pressing Apply would cause an error,
the python attribute must be set to None.
PyWindow
--------
Displays the Python code. This object is created by the PyButton
object when needed.
"""
import gtk
import time
from ase.gui.widgets import oops, pack
class PyButton(gtk.Button):
"A button for displaying Python code."
def __init__(self, title):
gtk.Button.__init__(self, "Python")
self.title = title
self.python = None
self.connect_after('clicked', self.run)
def run(self, *args):
"The method called when the button is click."
if self.python:
now = time.ctime()
win = PyWindow(self.title, now, self.python)
else:
oops("No Python code",
"You have not (yet) specified a consistent set of parameters.")
fr1_template = """
Title: %s
Time: %s
"""
class PyWindow(gtk.Window):
"A window displaying Python code."
def __init__(self, title, time, code):
gtk.Window.__init__(self)
self.set_title("ag: Python code")
vbox = gtk.VBox()
lbl = gtk.Label(fr1_template % (title, time))
lbl.set_alignment(0.0, 0.5)
fr = gtk.Frame("Information:")
fr.add(lbl)
pack(vbox, fr)
txtbuf = gtk.TextBuffer()
txtbuf.set_text(code)
txtview = gtk.TextView(txtbuf)
txtview.set_editable(False)
fr = gtk.Frame("Python code:")
fr.add(txtview)
fr.set_label_align(0.0, 0.5)
pack(vbox, fr)
but = gtk.Button(stock=gtk.STOCK_OK)
but.connect('clicked', lambda x: self.destroy())
pack(vbox, [but], end=True, bottom=True)
self.add(vbox)
self.show_all()
|
slabanja/ase
|
ase/gui/pybutton.py
|
Python
|
gpl-2.0
| 2,355
|
[
"ASE"
] |
b2d4b76ca165e1c41c401922297e22839a1d24951f728cecf31ff34e00812d5b
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode
~~~~~~~~~~~~~
Utilities parsing and analyzing Python code.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import re
import sys
from os import path
from six import iteritems, text_type, BytesIO, StringIO
from sphinx import package_dir
from sphinx.errors import PycodeError
from sphinx.pycode import nodes
from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
# load the Python grammar
_grammarfile = path.join(package_dir, 'pycode',
'Grammar-py%d.txt' % sys.version_info[0])
pygrammar = driver.load_grammar(_grammarfile)
pydriver = driver.Driver(pygrammar, convert=nodes.convert)
# an object with attributes corresponding to token and symbol names
class sym(object):
pass
for k, v in iteritems(pygrammar.symbol2number):
setattr(sym, k, v)
for k, v in iteritems(token.tok_name):
setattr(sym, v, k)
# a dict mapping terminal and nonterminal numbers to their names
number2name = pygrammar.number2symbol.copy()
number2name.update(token.tok_name)
_eq = nodes.Leaf(token.EQUAL, '=')
emptyline_re = re.compile('^\s*(#.*)?$')
class AttrDocVisitor(nodes.NodeVisitor):
"""
Visitor that collects docstrings for attribute assignments on toplevel and
in classes (class attributes and attributes set in __init__).
The docstrings can either be in special '#:' comments before the assignment
or in a docstring after it.
"""
def init(self, scope, encoding):
self.scope = scope
self.in_init = 0
self.encoding = encoding
self.namespace = []
self.collected = {}
self.tagnumber = 0
self.tagorder = {}
def add_tag(self, name):
name = '.'.join(self.namespace + [name])
self.tagorder[name] = self.tagnumber
self.tagnumber += 1
def visit_classdef(self, node):
"""Visit a class."""
self.add_tag(node[1].value)
self.namespace.append(node[1].value)
self.generic_visit(node)
self.namespace.pop()
def visit_funcdef(self, node):
"""Visit a function (or method)."""
# usually, don't descend into functions -- nothing interesting there
self.add_tag(node[1].value)
if node[1].value == '__init__':
# however, collect attributes set in __init__ methods
self.in_init += 1
self.generic_visit(node)
self.in_init -= 1
def visit_expr_stmt(self, node):
"""Visit an assignment which may have a special comment before (or
after) it.
"""
if _eq not in node.children:
# not an assignment (we don't care for augmented assignments)
return
# look *after* the node; there may be a comment prefixing the NEWLINE
# of the simple_stmt
parent = node.parent
idx = parent.children.index(node) + 1
while idx < len(parent):
if parent[idx].type == sym.SEMI:
idx += 1
continue # skip over semicolon
if parent[idx].type == sym.NEWLINE:
prefix = parent[idx].get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
if docstring:
self.add_docstring(node, docstring)
return # don't allow docstrings both before and after
break
# now look *before* the node
pnode = node[0]
prefix = pnode.get_prefix()
# if the assignment is the first statement on a new indentation
# level, its preceding whitespace and comments are not assigned
# to that token, but the first INDENT or DEDENT token
while not prefix:
pnode = pnode.get_prev_leaf()
if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
break
prefix = pnode.get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
docstring = prepare_commentdoc(prefix)
self.add_docstring(node, docstring)
def visit_simple_stmt(self, node):
"""Visit a docstring statement which may have an assignment before."""
if node[0].type != token.STRING:
# not a docstring; but still need to visit children
return self.generic_visit(node)
prev = node.get_prev_sibling()
if not prev:
return
if prev.type == sym.simple_stmt and \
prev[0].type == sym.expr_stmt and _eq in prev[0].children:
# need to "eval" the string because it's returned in its
# original form
docstring = literals.evalString(node[0].value, self.encoding)
docstring = prepare_docstring(docstring)
self.add_docstring(prev[0], docstring)
def add_docstring(self, node, docstring):
# add an item for each assignment target
for i in range(0, len(node) - 1, 2):
target = node[i]
if self.in_init and self.number2name[target.type] == 'power':
# maybe an attribute assignment -- check necessary conditions
if ( # node must have two children
len(target) != 2 or
# first child must be "self"
target[0].type != token.NAME or target[0].value != 'self' or
# second child must be a "trailer" with two children
self.number2name[target[1].type] != 'trailer' or
len(target[1]) != 2 or
# first child must be a dot, second child a name
target[1][0].type != token.DOT or
target[1][1].type != token.NAME):
continue
name = target[1][1].value
elif target.type != token.NAME:
# don't care about other complex targets
continue
else:
name = target.value
self.add_tag(name)
if docstring:
namespace = '.'.join(self.namespace)
if namespace.startswith(self.scope):
self.collected[namespace, name] = docstring
class ModuleAnalyzer(object):
# cache for analyzer objects -- caches both by module and file name
cache = {}
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
if isinstance(string, bytes):
return cls(BytesIO(string), modname, srcname)
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
fileobj = open(filename, 'rb')
except Exception as err:
raise PycodeError('error opening %r' % filename, err)
obj = cls(fileobj, modname, filename)
cls.cache['file', filename] = obj
return obj
@classmethod
def for_module(cls, modname):
if ('module', modname) in cls.cache:
entry = cls.cache['module', modname]
if isinstance(entry, PycodeError):
raise entry
return entry
try:
type, source = get_module_source(modname)
if type == 'string':
obj = cls.for_string(source, modname)
else:
obj = cls.for_file(source, modname)
except PycodeError as err:
cls.cache['module', modname] = err
raise
cls.cache['module', modname] = obj
return obj
def __init__(self, source, modname, srcname, decoded=False):
# name of the module
self.modname = modname
# name of the source file
self.srcname = srcname
# file-like object yielding source lines
self.source = source
# cache the source code as well
pos = self.source.tell()
if not decoded:
self.encoding = detect_encoding(self.source.readline)
self.source.seek(pos)
self.code = self.source.read().decode(self.encoding)
self.source.seek(pos)
self.source = TextIOWrapper(self.source, self.encoding)
else:
self.encoding = None
self.code = self.source.read()
self.source.seek(pos)
# will be filled by tokenize()
self.tokens = None
# will be filled by parse()
self.parsetree = None
# will be filled by find_attr_docs()
self.attr_docs = None
self.tagorder = None
# will be filled by find_tags()
self.tags = None
def tokenize(self):
"""Generate tokens from the source."""
if self.tokens is not None:
return
try:
self.tokens = list(tokenize.generate_tokens(self.source.readline))
except tokenize.TokenError as err:
raise PycodeError('tokenizing failed', err)
self.source.close()
def parse(self):
"""Parse the generated source tokens."""
if self.parsetree is not None:
return
self.tokenize()
try:
self.parsetree = pydriver.parse_tokens(self.tokens)
except parse.ParseError as err:
raise PycodeError('parsing failed', err)
def find_attr_docs(self, scope=''):
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is not None:
return self.attr_docs
self.parse()
attr_visitor = AttrDocVisitor(number2name, scope, self.encoding)
attr_visitor.visit(self.parsetree)
self.attr_docs = attr_visitor.collected
self.tagorder = attr_visitor.tagorder
# now that we found everything we could in the tree, throw it away
# (it takes quite a bit of memory for large modules)
self.parsetree = None
return attr_visitor.collected
def find_tags(self):
"""Find class, function and method definitions and their location."""
if self.tags is not None:
return self.tags
self.tokenize()
result = {}
namespace = []
stack = []
indent = 0
defline = False
expect_indent = False
emptylines = 0
def tokeniter(ignore = (token.COMMENT,)):
for tokentup in self.tokens:
if tokentup[0] not in ignore:
yield tokentup
tokeniter = tokeniter()
for type, tok, spos, epos, line in tokeniter:
if expect_indent and type != token.NL:
if type != token.INDENT:
# no suite -- one-line definition
assert stack
dtype, fullname, startline, _ = stack.pop()
endline = epos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline - emptylines)
expect_indent = False
if tok in ('def', 'class'):
name = next(tokeniter)[1]
namespace.append(name)
fullname = '.'.join(namespace)
stack.append((tok, fullname, spos[0], indent))
defline = True
elif type == token.INDENT:
expect_indent = False
indent += 1
elif type == token.DEDENT:
indent -= 1
# if the stacklevel is the same as it was before the last
# def/class block, this dedent closes that block
if stack and indent == stack[-1][3]:
dtype, fullname, startline, _ = stack.pop()
endline = spos[0]
namespace.pop()
result[fullname] = (dtype, startline, endline - emptylines)
elif type == token.NEWLINE:
# if this line contained a definition, expect an INDENT
# to start the suite; if there is no such INDENT
# it's a one-line definition
if defline:
defline = False
expect_indent = True
emptylines = 0
elif type == token.NL:
# count up if line is empty or comment only
if emptyline_re.match(line):
emptylines += 1
else:
emptylines = 0
self.tags = result
return result
if __name__ == '__main__':
import time
import pprint
x0 = time.time()
# ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html')
ma = ModuleAnalyzer.for_file('sphinx/environment.py',
'sphinx.environment')
ma.tokenize()
x1 = time.time()
ma.parse()
x2 = time.time()
# for (ns, name), doc in iteritems(ma.find_attr_docs()):
# print '>>', ns, name
# print '\n'.join(doc)
pprint.pprint(ma.find_tags())
x3 = time.time()
# print nodes.nice_repr(ma.parsetree, number2name)
print("tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2))
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/pycode/__init__.py
|
Python
|
gpl-3.0
| 13,599
|
[
"VisIt"
] |
5ead088a6d090561951d8427d7cefde5d66656bf19d0269b7d5208cbbe64de4e
|
"""This module defines the Atom object."""
import numpy as np
from ase.data import atomic_numbers, chemical_symbols, atomic_masses
# singular, plural, type, shape
data = {'symbol': ('symbols', str, () ),
'number': ('numbers', int, () ),
'position': ('positions', float, (3,)),
'tag': ('tags', int, () ),
'momentum': ('momenta', float, (3,)),
'mass': ('masses', float, () ),
'magmom': ('magmoms', float, () ),
'charge': ('charges', float, () ),
}
class Atom(object):
"""Class for representing a single atom.
Parameters:
symbol: str or int
Can be a chemical symbol (str) or an atomic number (int).
position: sequence of 3 floats
Atomi position.
tag: int
Special purpose tag.
momentum: sequence of 3 floats
Momentum for atom.
mass: float
Atomic mass in atomic units.
magmom: float or 3 floats
Magnetic moment.
charge: float
Atomic charge.
Examples:
>>> a = Atom('O', charge=-2)
>>> b = Atom(8, charge=-2)
>>> c = Atom('H', (1, 2, 3), magmom=1)
>>> print a.charge, a.position
-2 [ 0. 0. 0.]
>>> c.x = 0.0
>>> c.position
array([ 0., 2., 3.])
>>> b.symbol
'O'
>>> c.tag = 42
>>> c.number
1
>>> c.symbol = 'Li'
>>> c.number
3
If the atom object belongs to an Atoms object, then assigning
values to the atom attributes will change the corresponding
arrays of the atoms object:
>>> OH = Atoms('OH')
>>> OH[0].charge = -1
>>> OH.get_charges()
array([-1., 0.])
Another example:
>>> for atom in bulk:
... if atom.symbol = 'Ni':
... atom.magmom = 0.7
"""
__slots__ = ['_number', '_symbol', '_position', '_tag', '_momentum',
'_mass', '_magmom', '_charge', 'atoms', 'index']
def __init__(self, symbol='X', position=(0, 0, 0),
tag=None, momentum=None, mass=None,
magmom=None, charge=None,
atoms=None, index=None):
if atoms is None:
# This atom is not part of any Atoms object:
if isinstance(symbol, str):
self._number = atomic_numbers[symbol]
self._symbol = symbol
else:
self._number = symbol
self._symbol = chemical_symbols[symbol]
self._position = np.array(position, float)
self._tag = tag
if momentum is not None:
momentum = np.array(momentum, float)
if magmom is not None:
magmom = np.array(magmom, float)
self._momentum = momentum
self._mass = mass
self._magmom = magmom
self._charge = charge
self.index = index
self.atoms = atoms
def __repr__(self):
s = "Atom('%s', %s" % (self.symbol, list(self.position))
for attr in ['tag', 'momentum', 'mass', 'magmom', 'charge']:
value = getattr(self, attr)
if value is not None:
if isinstance(value, np.ndarray):
value = value.tolist()
s += ', %s=%s' % (attr, value)
if self.atoms is None:
s += ')'
else:
s += ', index=%d)' % self.index
return s
def get_data(self):
"""Helper method."""
return (self.position, self.number,
self.tag, self.momentum, self.mass,
self.magmom, self.charge)
def cut_reference_to_atoms(self):
"""Cut reference to atoms object."""
data = self.get_data()
self.index = None
self.atoms = None
(self._position,
self._number,
self._tag,
self._momentum,
self._mass,
self._magmom,
self._charge) = data
self._symbol = chemical_symbols[self._number]
def _get(self, name, copy=False):
if self.atoms is None:
return getattr(self, '_' + name)
elif name == 'symbol':
return chemical_symbols[self.number]
else:
plural = data[name][0]
if plural in self.atoms.arrays:
value = self.atoms.arrays[plural][self.index]
if copy:
value = value.copy()
return value
else:
return None
def _set(self, name, value):
if self.atoms is None:
setattr(self, '_' + name, value)
if name == 'symbol':
self._number = atomic_numbers[value]
elif name == 'number':
self._symbol = chemical_symbols[value]
elif name == 'symbol':
self.number = atomic_numbers[value]
else:
plural, dtype, shape = data[name]
if plural in self.atoms.arrays:
array = self.atoms.arrays[plural]
if name == 'magmom' and array.ndim == 2:
assert len(value) == 3
array[self.index] = value
else:
if name == 'magmom' and np.asarray(value).ndim == 1:
shape = (3,)
array = np.zeros((len(self.atoms),) + shape, dtype)
array[self.index] = value
self.atoms.new_array(plural, array)
def get_symbol(self): return self._get('symbol')
def get_atomic_number(self): return self._get('number')
def get_position(self): return self._get('position', True)
def _get_position(self): return self._get('position')
def get_tag(self): return self._get('tag')
def get_momentum(self): return self._get('momentum', True)
def _get_momentum(self): return self._get('momentum')
def get_initial_magnetic_moment(self): return self._get('magmom', True)
def _get_initial_magnetic_moment(self): return self._get('magmom')
def get_charge(self): return self._get('charge')
def set_symbol(self, symbol): self._set('symbol', symbol)
def set_atomic_number(self, number): self._set('number', number)
def set_position(self, position):
self._set('position', np.array(position, float))
def set_tag(self, tag): self._set('tag', tag)
def set_momentum(self, momentum): self._set('momentum', momentum)
def set_initial_magnetic_moment(self, magmom): self._set('magmom', magmom)
def set_charge(self, charge): self._set('charge', charge)
def set_magmom(self, magmom):
"Deprecated, use set_initial_magnetic_moment instead."
import warnings
warnings.warn('set_magmom is deprecated. Please use set_initial_magnetic_moment' \
' instead.', DeprecationWarning, stacklevel=2)
return self.set_initial_magnetic_moment(magmom)
def get_number(self):
"Deprecated, use get_atomic_number instead."
import warnings
warnings.warn(
'get_number is deprecated. Please use get_atomic_number instead.',
DeprecationWarning, stacklevel=2)
return self.get_atomic_number()
def set_number(self, number):
"Deprecated, use set_atomic_number instead."
import warnings
warnings.warn(
'set_number is deprecated. Please use set_atomic_number instead.',
DeprecationWarning, stacklevel=2)
return self.set_atomic_number(number)
def get_mass(self):
"""Get the mass of the atom.
Returns the mass of the atom, if known. If unknown, returns the
atomic mass corresponding to the element.
"""
m = self._get('mass')
if m is None:
m = atomic_masses[self.get_atomic_number()]
return m
def set_mass(self, mass):
"""Sets the mass of the atom.
If the atom is part of a list of atoms, and the atoms do not yet
have masses, all other atoms are assigned their default masses.
"""
if self.atoms is None:
self._mass = mass
else:
if 'masses' not in self.atoms.arrays:
# Assign default masses to all atoms
self.atoms.set_masses(self.atoms.get_masses())
self.atoms.arrays['masses'][self.index] = mass
symbol = property(get_symbol, set_symbol, doc='Chemical symbol')
number = property(get_atomic_number, set_atomic_number, doc='Atomic number')
position = property(_get_position, set_position, doc='XYZ-coordinates')
tag = property(get_tag, set_tag, doc='Integer tag')
momentum = property(_get_momentum, set_momentum, doc='XYZ-momentum')
mass = property(get_mass, set_mass, doc='Atomic mass')
magmom = property(_get_initial_magnetic_moment, set_initial_magnetic_moment,
doc='Initial magnetic moment')
charge = property(get_charge, set_charge, doc='Atomic charge')
def get_x(self): return self.position[0]
def get_y(self): return self.position[1]
def get_z(self): return self.position[2]
def set_x(self, x): self.position[0] = x
def set_y(self, y): self.position[1] = y
def set_z(self, z): self.position[2] = z
x = property(get_x, set_x, doc='X-coordiante')
y = property(get_y, set_y, doc='Y-coordiante')
z = property(get_z, set_z, doc='Z-coordiante')
|
slabanja/ase
|
ase/atom.py
|
Python
|
gpl-2.0
| 9,569
|
[
"ASE"
] |
1ba5b17c9b5e3e5f8c874dc5a4050b7e8d324ef4678520dade3fac94711978df
|
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import os
import sys
import py_entitymatching as mg
p = mg.get_install_path()
datasets_path = os.sep.join([p, 'datasets', 'example_datasets'])
bb = mg.BlackBoxBlocker()
ob = mg.OverlapBlocker()
ab = mg.AttrEquivalenceBlocker()
def _restaurants_function(x, y):
# x, y will be of type pandas series
black_list = ['for', 'the', 'of', 'a', 'an', 'and', '&', 'on', 'cafe',
'restaurant', 'grill', 'pizza', 'pizzeria', 'pub', 'bar']
# get name attribute
x_name = x['NAME'].lower()
y_name = y['NAME'].lower()
# get last names
x_name = x_name.split(' ')
y_name = y_name.split(' ')
# check if last names match
for index in range(len(x_name)):
x_name[index].replace(' ', '')
if(x_name[index] in black_list):
continue
for z in range(len(y_name)):
y_name[z].replace(' ', '')
if(x_name[index] == "" or y_name[z] == ""):
continue
if(y_name[z] in black_list):
continue
if(x_name[index] == y_name[z]):
return False
else:
if len(x_name[index]) > len(y_name[z]):
x_name[index], y_name[z] = y_name[z], x_name[index]
distances = range(len(x_name[index]) + 1)
for index2,char2 in enumerate(y_name[z]):
newDistances = [index2 + 1]
for index1, char1 in enumerate(x_name[index]):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1],
distances[index1 + 1],
newDistances[-1])))
distances = newDistances
if(distances[-1] < 3 and len(x_name[index]) > 2 and len(y_name[z]) > 2):
return False
return True
def _bikes_function(x, y):
# x, y will be of type pandas series
# get kilometer driven attribute
x_km_driven = x['km_driven']
y_km_driven = y['km_driven']
# max_value = max(x_km_driven, y_km_driven)
# percent_weight = (PERCENT_WEIGHT_ABOVE_BELOW/100)* max_value
# check if kilometer driven difference is less than 1000
if abs(x_km_driven - y_km_driven) <= 1000:
return False
else:
return True
def _electronics_function(x, y):
try:
x_price = x['Amazon_Price']
x_price = x_price.replace(',', '')
x_price = x_price.replace('$', '')
y_price = y['Price']
y_price = y_price.replace(',', '')
y_price = y_price.replace('$', '')
x_price = float(x_price)
y_price = float(y_price)
except:
return True
if x_price > 1.4 * y_price or x_price < 0.6 * y_price:
return True
else:
return False
def _music_function(ltuple, rtuple):
rtuple_date=None
ltuple_date=None
try:
rtuple_date=rtuple['Released']
ltuple_date=ltuple['Released']
if len(rtuple_date)==0 or len(ltuple_date):
return True
date_object1 = datetime.strptime(rtuple.strip(), '%B %d, %Y')
date_object2 = datetime.strptime(ltuple.strip(), '%d-%b-%y')
return abs(date_object1-date_object2).days > 3
except:
return False
# The blocker function should drop tuple pairs whose ABV values are similar
# The function has to do the following steps
# 1) Get ABV attributes from each of the tuples
# 2) Check whether the ABV is a missing value
# 3) Translate ABV from string to float value
# 4) Compute and check if two ABV values are similar
def _beer_function(x, y):
# x, y will be of type pandas series
# get ABV attribute
x_ABV = x['ABV']
y_ABV = y['ABV']
# if missing value exists
if x_ABV == '-':
return False
if y_ABV == '-':
return False
# translate ABV string to float value
x_ABV_Value = float(x_ABV[0 : len(x_ABV) - 1])
y_ABV_Value = float(y_ABV[0 : len(y_ABV) - 1])
# check if two ABV values are similar by relative threshold t = 0.01
if abs(x_ABV_Value - y_ABV_Value) / max(x_ABV_Value, y_ABV_Value) > 0.01:
return True
else:
return False
class TimeBlockTablesBeer:
timeout=10000.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'beer', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'beer', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'Label')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'Label')
bb.set_black_box_function(_beer_function)
except AssertionError:
print("Dataset \'beer\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables(self):
bb.block_tables(self.A, self.B, ['ABV'], ['ABV'])
def teardown(self):
del self.A
del self.B
class TimeBlockTablesBikes:
timeout = 10000.0
def setup(self):
p = mg.get_install_path()
path_for_A = os.sep.join([datasets_path, 'bikes', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'bikes', 'B.csv'])
self.l_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
self.r_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'id')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'id')
bb.set_black_box_function(_bikes_function)
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables(self):
bb.block_tables(self.A, self.B, self.l_output_attrs, self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesElectronics:
timeout = 10000.0
def setup(self):
p = mg.get_install_path()
path_for_A = os.sep.join([datasets_path, 'electronics', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'electronics', 'B.csv'])
self.A = mg.read_csv_metadata(path_for_A)
try:
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
self.l_output_attrs = ['Brand', 'Amazon_Price']
self.r_output_attrs = ['Brand', 'Price']
bb.set_black_box_function(_electronics_function)
except AssertionError:
print("Dataset \'electronics\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables(self):
bb.block_tables(self.A, self.B, self.l_output_attrs, self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesMusic:
timeout=10000.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'music', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'music', 'B.csv'])
self.l_output_attrs = ['Album_Name', 'Artist_Name', 'CopyRight',
'Released', 'Song_Name', 'Time']
self.r_output_attrs = ['Album_Name', 'Artist_Name', 'Copyright',
'Released', 'Song_Name', 'Time']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'Sno')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'Sno')
bb.set_black_box_function(_music_function)
except AssertionError:
print("Dataset \'music\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables(self):
bb.block_tables(self.A, self.B, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesRestaurants:
timeout=10000.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'restaurants', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'restaurants', 'B.csv'])
self.l_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
self.r_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
bb.set_black_box_function(_restaurants_function)
except AssertionError:
print("Dataset \'restaurants\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_tables(self):
bb.block_tables(self.A, self.B, self.l_output_attrs, self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockCandsetBikes:
timeout = 300.0
def setup(self):
p = mg.get_install_path()
path_for_A = os.sep.join([datasets_path, 'bikes', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'bikes', 'B.csv'])
l_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
r_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'id')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'id')
C = ab.block_tables(A, B, 'city_posted', 'city_posted',
l_output_attrs, r_output_attrs)
self.D = ab.block_candset(C, 'model_year', 'model_year')
bb.set_black_box_function(_bikes_function)
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_candset(self):
bb.block_candset(self.D)
def teardown(self):
del self.D
class TimeBlockCandsetElectronics:
timeout = 300.0
def setup(self):
p = mg.get_install_path()
path_for_A = os.sep.join([datasets_path, 'electronics', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'electronics', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'ID')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'ID')
self.C = ab.block_tables(A, B, 'Brand', 'Brand',
['Brand', 'Amazon_Price'],
['Brand', 'Price'])
bb.set_black_box_function(_electronics_function)
except AssertionError:
print("Dataset \'electronics\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_candset(self):
bb.block_candset(self.C)
def teardown(self):
del self.C
class TimeBlockCandsetRestaurants:
timeout=300.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'restaurants', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'restaurants', 'B.csv'])
l_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
r_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'ID')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'ID')
self.C = ob.block_tables(A, B, 'NAME', 'NAME',
l_output_attrs=l_output_attrs,
r_output_attrs=r_output_attrs)
bb.set_black_box_function(_restaurants_function)
except AssertionError:
print("Dataset \'restaurants\' not found. Please visit the project "
"website to download the dataset.")
raise SystemExit
def time_block_candset(self):
bb.block_candset(self.C)
def teardown(self):
del self.C
|
anhaidgroup/py_entitymatching
|
benchmarks/benchmark_blackbox_blocker.py
|
Python
|
bsd-3-clause
| 13,074
|
[
"VisIt"
] |
7741198eded2b7d2032c83f95104006db1ddd906c0dae6d3739a3753ad0f123c
|
"""
alignReads
"""
from __future__ import print_function
import subprocess
import os
import logging
logger = logging.getLogger('root')
logger.propagate = False
def alignReads(BWA_path, HG19_path, read1, read2, outfile):
sample_name = os.path.basename(outfile).split('.')[0]
output_folder = os.path.dirname(outfile)
base_name = os.path.join(output_folder, sample_name)
sam_filename = outfile
bam_filename = base_name + '.bam'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Check if genome is already indexed by bwa
index_files_extensions = ['.pac', '.amb', '.ann', '.bwt', '.sa']
genome_indexed = True
for extension in index_files_extensions:
if not os.path.isfile(HG19_path + extension):
genome_indexed = False
break
# If the genome is not already indexed, index it
if not genome_indexed:
logger.info('Genome index files not detected. Running BWA to generate indices.')
bwa_index_command = '{0} index {1}'.format(BWA_path, HG19_path)
logger.info('Running bwa command: %s', bwa_index_command)
subprocess.call(bwa_index_command.split())
logger.info('BWA genome index generated')
else:
logger.info('BWA genome index found.')
# Run paired end alignment against the genome
logger.info('Running paired end mapping for {0}'.format(sample_name))
bwa_alignment_command = '{0} mem {1} {2} {3} > {4}'.format(BWA_path, HG19_path, read1, read2, sam_filename)
samtools_sam_to_bam_command = 'samtools sort -o {0} {1}'.format(bam_filename, sam_filename)
samtools_index_command = 'samtools index {0}'.format(bam_filename)
samtools_sort_by_name_command = 'samtools sort -o {0} -n {1}'.format("".join([base_name, '_sorted.bam']), bam_filename)
# Open the outfile and redirect the output of the alignment to it.
logger.info(bwa_alignment_command)
subprocess.check_call(bwa_alignment_command, shell=True)
logger.info('Paired end mapping for {0} completed.'.format(sample_name))
# Convert SAM to BAM file
logger.info(samtools_sam_to_bam_command)
subprocess.check_call(samtools_sam_to_bam_command, shell=True)
logger.info('Sorting by coordinate position for {0} complete.'.format(sample_name))
# Index BAM file
logger.info(samtools_index_command)
subprocess.check_call(samtools_index_command, shell=True)
logger.info('Indexing for {0} complete.'.format(sample_name))
# Sort BAM file by name
logger.info(samtools_sort_by_name_command)
subprocess.check_call(samtools_sort_by_name_command, shell=True)
logger.info('Sorting for {0} by name complete.'.format(sample_name))
|
tsailabSJ/circleseq
|
circleseq/alignReads.py
|
Python
|
agpl-3.0
| 2,711
|
[
"BWA"
] |
924cfdc58a215d8feab9d401251af4f30d68b33777f6e8fe7b4bd1fcea32d687
|
import numpy
import math
import re
import csv
import os
import sys
import logging
from __main__ import vtk, qt, ctk, slicer
from random import randint
from slicer.ScriptedLoadableModule import *
class MeshStatistics(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = 'Mesh Statistics'
parent.categories = ['Quantification']
parent.dependencies = []
parent.contributors = ['Lucie Macron']
parent.helpText = """
The goal of this module is to compute statistics on a model,
considering a specific region (defined with Pick'n Paint) or on the entire shape.
Statistics are: Minimum Value, Maximum Value, Average, Standard Deviation, and different type of percentile.
It's possible to export those values as CSV file.
Before working on Mesh Statistics, you have to compute ModelToModelDistance.
"""
parent.acknowledgementText = """
This file was originally developed by Lucie Macron, University of Michigan.
"""
self.parent = parent
class MeshStatisticsWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
print "-------Mesh Statistic Widget Setup-------"
self.moduleName = 'MeshStatistics'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
libPath = os.path.join(scriptedModulesPath)
sys.path.insert(0, libPath)
# import the external library that contain the functions comon to all DCBIA modules
import ShapeQuantifierCore
reload(ShapeQuantifierCore)
# -------------------------------------------------------------------------------------
self.ShapeQuantifierCore = ShapeQuantifierCore.ShapeQuantifierCore(interface = self)
self.logic = MeshStatisticsLogic(self, ShapeQuantifierCore)
self.modelList = list()
self.fieldList = list()
self.ROIList = list()
self.ROIDict = dict() # Key = Name of ROI
# Value = Dictionary of Fields (key = Name of Field
# Value = dictionary of shapes
# key = name of shapes
# value = Statistics store()
# ---------------------------------------------------------------- #
# ---------------- Definition of the UI interface ---------------- #
# ---------------------------------------------------------------- #
# ------------ Loading of the .ui file ---------- #
loader = qt.QUiLoader()
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' %self.moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
# ------------------------------------------------------------------------------------
# SHAPES INPUT
# ------------------------------------------------------------------------------------
self.inputComboBox = self.ShapeQuantifierCore.get("inputComboBox")
self.inputComboBox.setMRMLScene(slicer.mrmlScene)
self.inputComboBox.connect('checkedNodesChanged()', self.onInputComboBoxCheckedNodesChanged)
# ------------------------------------------------------------------------------------
# ROI TABLE
# ------------------------------------------------------------------------------------
self.ROIComboBox = self.ShapeQuantifierCore.get("ROIComboBox")
self.ROICheckBox = self.ShapeQuantifierCore.get("ROICheckBox")
self.ROICheckBox.connect('stateChanged(int)', self.onROICheckBoxStateChanged)
# ------------------------------------------------------------------------------------
# FIELD TABLE
# ------------------------------------------------------------------------------------
self.tableField = self.ShapeQuantifierCore.get("tableField")
self.tableField.setColumnCount(2)
self.tableField.setMinimumHeight(250)
self.tableField.setHorizontalHeaderLabels([' ', ' Field Name '])
self.tableField.setColumnWidth(0, 20)
self.tableField.setColumnWidth(1, 260)
self.tableField.setSizePolicy(qt.QSizePolicy().Expanding, qt.QSizePolicy().Expanding)
# ------------------------------------------------------------------------------------
# RUN
# ------------------------------------------------------------------------------------
self.runButton = self.ShapeQuantifierCore.get("runButton")
self.runButton.connect('clicked()', self.onRunButton)
# ------------------------------------------------------------------------------------
# Statistics Table - Export
# ------------------------------------------------------------------------------------
self.mainLayout = self.ShapeQuantifierCore.get("mainLayout")
self.tabROI = qt.QTabWidget()
self.tabROI.setTabPosition(0)
self.tabROI.adjustSize()
# ---------------------------- Directory - Export Button -----------------------------
self.directoryExport = ctk.ctkDirectoryButton()
self.exportCheckBox = qt.QCheckBox('Separate Files')
self.exportCheckBox.setChecked(True)
self.exportButton = qt.QPushButton(' Export ')
self.exportButton.enabled = True
self.exportPointValueCheckBox = qt.QCheckBox('Export Value on Each Point')
self.exportLayout = qt.QVBoxLayout()
self.directoryAndExportLayout = qt.QHBoxLayout()
self.directoryAndExportLayout.addWidget(self.directoryExport)
self.directoryAndExportLayout.addWidget(self.exportCheckBox)
self.directoryAndExportLayout.addWidget(self.exportPointValueCheckBox)
self.exportButtonsLayout = qt.QHBoxLayout()
self.exportButtonsLayout.addWidget(self.exportButton)
self.exportLayout.addLayout(self.directoryAndExportLayout)
self.exportLayout.addLayout(self.exportButtonsLayout)
self.layout.addStretch(1)
self.logic.updateInterface(self.tableField, self.ROIComboBox, self.ROIList, self.modelList, self.mainLayout)
# ------------------------------------------------------------------------------------
# OBSERVERS
# ------------------------------------------------------------------------------------
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
def onCloseScene(self, obj, event):
# initialize Parameters
self.modelList = list()
self.fieldList = list()
self.ROIList = list()
self.ROIDict = dict()
self.ROIComboBox.clear()
self.tableField.clearContents()
self.tableField.setRowCount(0)
self.tableField.setRowCount(1)
self.tableField.setSpan(0,0,1,2)
label = qt.QLabel(' Please select at least a model! ')
label.setStyleSheet(' qproperty-alignment: AlignCenter; }')
self.tableField.setCellWidget(0, 0, label)
def onInputComboBoxCheckedNodesChanged(self):
self.modelList = self.inputComboBox.checkedNodes()
self.runButton.enabled = not self.inputComboBox.noneChecked()
self.logic.updateInterface(self.tableField, self.ROIComboBox, self.ROIList, self.modelList, self.mainLayout)
def onROICheckBoxStateChanged(self, intCheckState):
# intCheckState == 2 when checked
# intCheckState == 0 when unchecked
if intCheckState == 2:
self.ROIComboBox.setEnabled(False)
else:
if intCheckState == 0:
self.ROIComboBox.setEnabled(True)
def onRunButton(self):
self.ROIDict.clear()
if self.modelList:
self.logic.removeTable(self.mainLayout, self.tabROI)
self.exportButton.disconnect('clicked()', self.onExportButton)
self.mainLayout.removeWidget(self.exportButton)
self.mainLayout.removeItem(self.exportLayout)
self.logic.displayStatistics(self.ROICheckBox.isChecked(), self.ROIList, self.ROIDict, self.ROIComboBox,
self.tableField, self.modelList, self.tabROI, self.mainLayout)
self.mainLayout.addLayout(self.exportLayout)
self.exportButton.connect('clicked()', self.onExportButton)
def onExportButton(self):
self.logic.exportationFunction(self.directoryExport, self.exportCheckBox.isChecked(), self.ROIDict)
if self.exportPointValueCheckBox.isChecked():
self.logic.ExportationValueOnEachPoint(self.directoryExport, self.ROIDict)
class MeshStatisticsLogic(ScriptedLoadableModuleLogic):
class StatisticStore(object):
def __init__(self):
self.min = 0
self.max = 0
self.mean = 0
self.std = 0
self.percentile5 = 0
self.percentile15 = 0
self.percentile25 = 0
self.percentile50 = 0
self.percentile75 = 0
self.percentile85 = 0
self.percentile95 = 0
def __init__(self, interface=None, ShapeQuantifierCore = None):
self.ShapeQuantifierCore = ShapeQuantifierCore
self.interface = interface
self.numberOfDecimals = 3
system = qt.QLocale().system()
self.decimalPoint = chr(system.decimalPoint())
def updateInterface(self, tableField, ROIComboBox, ROIList, modelList, layout):
tableField.clearContents()
tableField.setRowCount(0)
ROIComboBox.clear()
ROIComboBox.addItem('Entire Model')
del ROIList[:]
ROIList.append('Entire Model')
tableFieldNumRows = 0
expression = '_ROI'
if tableField.rowCount == 0:
tableField.setRowCount(1)
tableField.setSpan(0,0,1,2)
label = qt.QLabel(' Please select at least a model! ')
label.setStyleSheet(' qproperty-alignment: AlignCenter; }')
tableField.setCellWidget(tableFieldNumRows, 0, label)
if modelList:
tableField.setSpan(0,0,1,1)
numberOfArrayList = list()
for model in modelList:
numberOfArrayList.append(model.GetPolyData().GetPointData().GetNumberOfArrays())
# set the model with the higher number of fields as reference
modelOfReference = modelList[numberOfArrayList.index(max(numberOfArrayList))]
PointDataOfReference = modelOfReference.GetPolyData().GetPointData()
numOfArrayOfReference = PointDataOfReference.GetNumberOfArrays()
fieldInCommon = list()
fieldNotInCommon = []
fieldNameOfRefList = list()
fieldModel = list()
del fieldNameOfRefList[:]
for i in range(0, numOfArrayOfReference):
if PointDataOfReference.GetArray(i).GetNumberOfComponents() == 1:
fieldNameOfRefList.append(PointDataOfReference.GetArray(i).GetName())
fieldInCommon.append(PointDataOfReference.GetArray(i).GetName())
if modelList.__len__() > 1:
for model in modelList:
del fieldModel[:]
if model.GetID() != modelOfReference.GetID():
numOfArray = model.GetPolyData().GetPointData().GetNumberOfArrays()
for i in range(0, numOfArray):
if model.GetPolyData().GetPointData().GetArray(i).GetNumberOfComponents() == 1:
fieldModel.append(model.GetPolyData().GetPointData().GetArray(i).GetName())
fieldInCommon, tempFieldNotInCommon = self.compareList(fieldInCommon, fieldModel)
fieldNotInCommon = fieldNotInCommon + tempFieldNotInCommon
for arrayName in set(fieldInCommon):
if not re.search(expression, arrayName):
tableFieldNumRows += 1
tableField.setMinimumHeight(tableFieldNumRows*35)
tableField.setRowCount(tableFieldNumRows)
tableField.setCellWidget(tableFieldNumRows - 1, 0, qt.QCheckBox())
label = qt.QLabel(arrayName)
label.setStyleSheet(' QLabel{qproperty-alignment: AlignVCenter | AlignLeft; }')
tableField.setCellWidget(tableFieldNumRows - 1, 1, label)
else:
ROIComboBox.addItem(arrayName)
ROIList.append(arrayName)
for arrayName in set(fieldNotInCommon):
if not re.search(expression, arrayName):
tableFieldNumRows += 1
tableField.setMinimumHeight(tableFieldNumRows*35)
tableField.setRowCount(tableFieldNumRows)
label = qt.QLabel(arrayName)
label.setStyleSheet(' QLabel{ font-style:oblique; text-decoration:line-through; }')
tableField.setCellWidget(tableFieldNumRows - 1, 1, label )
layout.addStretch(1)
def compareList(self, list1, list2):
ListInCommon = list(set(list1) & set(list2))
ListNotInCommon = (list(set(list1) - set(list2)) + list(set(list2) - set(list1)))
return ListInCommon, ListNotInCommon
def defineStatisticsTable(self, fieldDictionaryValue):
statTable = qt.QTableWidget()
numberOfRows = fieldDictionaryValue.__len__()
statTable.setRowCount(numberOfRows)
i = numberOfRows - 1
statTable.setMinimumHeight(numberOfRows*35)
statTable.setMinimumWidth(55)
statTable.setColumnCount(12)
statTable.setHorizontalHeaderLabels(['Model','Min','Max','Mean','SD','Per5','Per15','Per25','Per50','Per75','Per85','Per95'])
# Add Values:
for key, value in fieldDictionaryValue.iteritems():
statTable.setCellWidget(i, 0, qt.QLabel(key))
statTable.setCellWidget(i, 1, qt.QLabel(value.min))
statTable.cellWidget(i,1).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 2, qt.QLabel(value.max))
statTable.cellWidget(i,2).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 3, qt.QLabel(value.mean))
statTable.cellWidget(i,3).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 4, qt.QLabel(value.std))
statTable.cellWidget(i,4).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 5, qt.QLabel(value.percentile5))
statTable.cellWidget(i,5).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 6, qt.QLabel(value.percentile15))
statTable.cellWidget(i,6).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 7, qt.QLabel(value.percentile25))
statTable.cellWidget(i,7).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 8, qt.QLabel(value.percentile50))
statTable.cellWidget(i,8).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 9, qt.QLabel(value.percentile75))
statTable.cellWidget(i,9).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 10, qt.QLabel(value.percentile85))
statTable.cellWidget(i,10).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
statTable.setCellWidget(i, 11, qt.QLabel(value.percentile95))
statTable.cellWidget(i,11).setStyleSheet(' QLabel{ qproperty-alignment: AlignCenter;}')
i -= 1
statTable.resizeColumnToContents(0)
return statTable
def updateTable(self, ROIDict, tabROI, layout):
tabROI.setMinimumWidth(100*ROIDict.__len__())
for ROIName, FieldDict in ROIDict.iteritems():
tab = qt.QTabWidget()
tab.adjustSize()
tab.setTabPosition(1)
for fieldName, fieldDictValue in FieldDict.iteritems():
statisticsTable = self.defineStatisticsTable(fieldDictValue)
tab.addTab(statisticsTable, fieldName)
tabROI.addTab(tab, ROIName)
layout.addWidget(tabROI)
def displayStatistics(self, ROICheckBoxState, ROIList, ROIDict, ROIComboBox, tableField, modelList, tabROI, layout):
if ROICheckBoxState:
for ROIName in ROIList:
if not ROIDict.has_key(ROIName):
ROIDict[ROIName] = dict()
else:
ROIToCompute = ROIComboBox.currentText.encode('utf-8')
if not ROIDict.has_key(ROIToCompute):
ROIDict[ROIToCompute] = dict()
numberOfRowField = tableField.rowCount
for ROIName, ROIFieldDict in ROIDict.iteritems():
for i in range(0, numberOfRowField):
widget = tableField.cellWidget(i, 0)
if widget and widget.isChecked():
ROIFieldDict[tableField.cellWidget(i, 1).text.encode('utf-8')] = dict()
for fieldName, fieldValue in ROIFieldDict.iteritems():
for shape in modelList:
activePointData = shape.GetModelDisplayNode().GetInputPolyData().GetPointData()
fieldArray = activePointData.GetArray(fieldName)
fieldValue[shape.GetName()] = self.StatisticStore()
if ROIName == 'Entire Model':
self.computeAll(fieldArray, fieldValue[shape.GetName()], 'None')
else:
ROIArray = activePointData.GetArray(ROIName)
self.computeAll(fieldArray, fieldValue[shape.GetName()], ROIArray)
self.updateTable(ROIDict, tabROI, layout)
def removeTable(self, layout, tabROI):
# Remove table if it already exists:
indexWidgetTabROI = layout.indexOf(tabROI)
if indexWidgetTabROI != -1:
for i in range(0, tabROI.count):
tabWidget = tabROI.widget(i)
for i in range(0, tabWidget.count):
tableWidget = tabWidget.widget(i)
tableWidget.clearContents()
tableWidget.setRowCount(0)
tabWidget.clear()
tabROI.clear()
def defineArray(self, fieldArray, ROIArray):
# Define array of value from fieldArray(array with all the distances from ModelToModelDistance)
# using ROIArray as a mask
# Return a numpy.array to be able to use numpy's method to compute statistics
valueList = list()
bool = True
if ROIArray == 'None':
for i in range(0, fieldArray.GetNumberOfTuples()):
valueList.append(fieldArray.GetValue(i))
valueArray = numpy.array(valueList)
else:
if ROIArray.GetNumberOfTuples() != fieldArray.GetNumberOfTuples():
print 'Size of ROIArray and fieldArray are not the same!!!'
bool = False
else:
for i in range(0, fieldArray.GetNumberOfTuples()):
if ROIArray.GetValue(i) == 1.0:
valueList.append(fieldArray.GetValue(i))
valueArray = numpy.array(valueList)
return bool, valueArray
def computeMean(self, valueArray):
# valueArray is an array in which values to compute statistics on are stored
return round(numpy.mean(valueArray), self.numberOfDecimals)
def computeMinMax(self, valueArray):
# valueArray is an array in which values to compute statistics on are stored
return round(numpy.min(valueArray), self.numberOfDecimals), round(numpy.max(valueArray), self.numberOfDecimals)
def computeStandardDeviation(self, valueArray):
# valueArray is an array in which values to compute statistics on are stored
return round(numpy.std(valueArray), self.numberOfDecimals)
def computePercentile(self, valueArray, percent):
# Function to compute different percentile
# valueArray is an array in which values to compute statistics on are stored
# percent is a value between 0 and 1
# The lowest value is taken
valueArray = numpy.sort(valueArray)
index = (valueArray.size * percent) - 1
ceilIndex = math.ceil(index)
return round(valueArray[ceilIndex], self.numberOfDecimals)
def computeAll(self, fieldArray, fieldState, ROIArray):
bool, array = self.defineArray(fieldArray, ROIArray)
if len(array) is 0:
slicer.util.errorDisplay("The ROI is empty")
return
if bool:
fieldState.min, fieldState.max = self.computeMinMax(array)
fieldState.mean = self.computeMean(array)
fieldState.std = self.computeStandardDeviation(array)
fieldState.percentile5 = self.computePercentile(array, 0.05)
fieldState.percentile15 = self.computePercentile(array, 0.15)
fieldState.percentile25 = self.computePercentile(array, 0.25)
fieldState.percentile50 = self.computePercentile(array, 0.50)
fieldState.percentile75 = self.computePercentile(array, 0.75)
fieldState.percentile85 = self.computePercentile(array, 0.85)
fieldState.percentile95 = self.computePercentile(array, 0.95)
def writeFieldFile(self, fileWriter, modelDict):
# Function defined to export all statistics of a field concidering a file writer (fileWriter)
# and a dictionary of models (modelDict) where statistics are stored
for shapeName, shapeStats in modelDict.iteritems():
fileWriter.writerow([shapeName,
shapeStats.min,
shapeStats.max,
shapeStats.mean,
shapeStats.std,
shapeStats.percentile5,
shapeStats.percentile15,
shapeStats.percentile25,
shapeStats.percentile50,
shapeStats.percentile75,
shapeStats.percentile85,
shapeStats.percentile95])
def exportAllAsCSV(self, filename, ROIName, ROIDictValue):
# Export all fields on the same csv file considering a region
file = open(filename, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow([ROIName])
cw.writerow([' '])
for fieldName, shapeDict in sorted(ROIDictValue.iteritems()):
cw.writerow([fieldName])
cw.writerow(['Model','Min','Max','Mean','SD','Per5','Per15','Per25','Per50','Per75','Per85','Per95'])
self.writeFieldFile(cw, shapeDict)
cw.writerow([' '])
file.close()
if self.decimalPoint != '.':
self.replaceCharac(filename, ',', ';') # change the Delimiter and put a semicolon instead of a comma
self.replaceCharac(filename, '.', self.decimalPoint) # change the decimal separator '.' for a comma
def exportFieldAsCSV(self, filename, fieldName, shapeDict):
# Export fields on different csv files
file = open(filename, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow([fieldName])
cw.writerow(['Model','Min','Max','Mean','SD','Per5','Per15','Per25','Per50','Per75','Per85','Per95'])
self.writeFieldFile(cw, shapeDict)
file.close()
if self.decimalPoint != '.':
self.replaceCharac(filename, ',', ';') # change the Delimiter and put a semicolon instead of a comma
self.replaceCharac(filename, '.', self.decimalPoint) # change the decimal separator '.' for a comma
def exportPointValueAsCSV(self, filename, fieldArray, ROIArray):
#Exportation of the value stored for each point:
file = open(filename, 'w')
cw = csv.writer(file, delimiter=',')
bool, arrayToReturn = self.defineArray(fieldArray, ROIArray)
if len(arrayToReturn) is 0:
slicer.util.errorDisplay("The ROI is empty")
return
if bool:
for value in arrayToReturn:
cw.writerow([value])
file.close()
if self.decimalPoint != '.':
self.replaceCharac(filename, ',', ';') # change the Delimiter and put a semicolon instead of a comma
self.replaceCharac(filename, '.', self.decimalPoint) # change the decimal separator '.' for a comma
def replaceCharac(self, filename, oldCharac, newCharac):
# Function to replace a charactere (oldCharac) in a file (filename) by a new one (newCharac)
file = open(filename,'r')
lines = file.readlines()
with open(filename, 'r') as file:
lines = [line.replace(oldCharac, newCharac) for line in file.readlines()]
file.close()
file = open(filename, 'w')
file.writelines(lines)
file.close()
def exportationFunction(self, directoryExport, exportCheckBoxState, ROIDict):
directory = directoryExport.directory.encode('utf-8')
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
if exportCheckBoxState: # if exportation in different files
for ROIName, ROIDictValue in sorted(ROIDict.iteritems()):
directoryFolder = directory + '/' + ROIName
if not os.path.exists(directoryFolder):
os.mkdir(directoryFolder)
for fieldName, modelDict in sorted(ROIDictValue.iteritems()):
filename = directoryFolder + '/' + fieldName + '.csv'
if os.path.exists(filename):
messageBox.setText('On folder ' + ROIName + ', file ' + fieldName + '.csv already exists.')
messageBox.setInformativeText('Do you want to replace it on ' + ROIName + '?')
messageBox.setStandardButtons(messageBox.NoToAll | messageBox.No | messageBox.YesToAll | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.NoToAll:
return True
if choice == messageBox.Yes:
self.exportFieldAsCSV(filename, fieldName, modelDict)
if choice == messageBox.YesToAll:
for ROIName, ROIDictValue in sorted(ROIDict.iteritems()):
directoryFolder = directory + '/' + ROIName
if not os.path.exists(directoryFolder):
os.mkdir(directoryFolder)
for fieldName, shapeDict in sorted(ROIDictValue.iteritems()):
filename = directoryFolder + '/' + fieldName + '.csv'
self.exportFieldAsCSV(filename, fieldName, shapeDict)
return True
else:
self.exportFieldAsCSV(filename, fieldName, modelDict)
else:
for ROIName, ROIDictValue in sorted(ROIDict.iteritems()):
filename = directory + '/' + ROIName + '.csv'
if os.path.exists(filename):
messageBox.setText('File ' + ROIName + '.csv already exists in this folder.')
messageBox.setInformativeText('Do you want to replace it? ')
messageBox.setStandardButtons(messageBox.NoToAll | messageBox.No | messageBox.YesToAll | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.NoToAll:
return True
if choice == messageBox.Yes:
self.exportAllAsCSV(filename, ROIName, ROIDictValue)
if choice == messageBox.YesToAll:
for ROIName, ROIDictValue in sorted(ROIDict.iteritems()):
filename = directory + '/' + ROIName + '.csv'
self.exportAllAsCSV(filename, ROIName, ROIDictValue)
return True
else:
self.exportAllAsCSV(filename, ROIName, ROIDictValue)
def ExportationValueOnEachPoint(self,directoryExport, ROIDict):
directory = directoryExport.directory.encode('utf-8')
directoryPointValuesFolder = directory + '/ValuesOnEachPoint'
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
if not os.path.exists(directoryPointValuesFolder):
os.mkdir(directoryPointValuesFolder)
for ROIName, ROIDictValue in sorted(ROIDict.iteritems()):
if ROIName != 'Entire Model':
directoryFolder = directoryPointValuesFolder + '/' + ROIName
if not os.path.exists(directoryFolder):
os.mkdir(directoryFolder)
for fieldName, modelDict in sorted(ROIDictValue.iteritems()):
directoryFilename = directoryFolder + '/' + fieldName
if not os.path.exists(directoryFilename):
os.mkdir(directoryFilename)
for modelName in modelDict.iterkeys():
filename = directoryFilename + '/' + modelName + '.csv'
if os.path.exists(filename):
messageBox.setText('File ' + fieldName + '.csv already exist for the model ' + modelName)
messageBox.setInformativeText('Do you want to replace it on ?')
messageBox.setStandardButtons(messageBox.NoToAll | messageBox.No | messageBox.YesToAll | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.NoToAll:
return True
if choice == messageBox.Yes:
pointData = slicer.util.getNode(modelName).GetModelDisplayNode().GetInputPolyData().GetPointData()
fieldArray = pointData.GetArray(fieldName)
ROIArray = pointData.GetArray(ROIName)
self.exportPointValueAsCSV(filename, fieldArray, ROIArray)
if choice == messageBox.YesToAll:
for fieldName, modelDict in sorted(ROIDictValue.iteritems()):
for modelName in modelDict.iterkeys():
filename = directoryFilename + '/' + modelName + '.csv'
pointData = slicer.util.getNode(modelName).GetModelDisplayNode().GetInputPolyData().GetPointData()
fieldArray = pointData.GetArray(fieldName)
ROIArray = pointData.GetArray(ROIName)
self.exportPointValueAsCSV(filename, fieldArray, ROIArray)
return True
else:
pointData = slicer.util.getNode(modelName).GetModelDisplayNode().GetInputPolyData().GetPointData()
fieldArray = pointData.GetArray(fieldName)
ROIArray = pointData.GetArray(ROIName)
self.exportPointValueAsCSV(filename, fieldArray, ROIArray)
class MeshStatisticsTest(ScriptedLoadableModuleTest):
def setUp(self):
# reset the state - clear scene
self.widget = slicer.modules.MeshStatisticsWidget
slicer.mrmlScene.Clear(0)
def runTest(self):
# run all tests needed
self.delayDisplay("Clear the scene")
self.setUp()
self.delayDisplay("Download and load datas")
self.downloaddata()
self.delayDisplay("Starting the tests")
self.delayDisplay("Test1: Test Min Max Mean Functions")
self.assertTrue(self.testMinMaxMeanFunctions())
self.delayDisplay("Test2: Test Percentile Function")
self.assertTrue(self.testPercentileFunction())
self.delayDisplay("Test3: Test storage of Values Function")
self.assertTrue(self.testStorageValue())
self.delayDisplay("Test4: Test on entire models")
self.delayDisplay("Test4-1: Test on T1toT2")
self.assertTrue(self.testOnMesh(slicer.mrmlScene.GetNodesByName("T1toT2").GetItemAsObject(0),
0, ["AbsolutePointToPointDistance",
"PointToPointAlongZ",
"SignedMagNormDirDistance",""],
[[0.039, 5.766, 1.152, 0.821, 0.258, 0.459, 0.627, 0.958, 1.5, 1.727, 2.59],
[-3.631, 1.187, -0.478, 0.787, -1.912, -1.279, -0.854, -0.336, 0.03, 0.218, 0.57],
[-5.62, 0.947, -0.225, 0.786, -1.616, -0.542, -0.296, -0.037, 0.099, 0.238, 0.485],
[]],
"Test4-1"))
self.delayDisplay("Test4-2: Test on T1toT3")
self.assertTrue(self.testOnMesh(slicer.mrmlScene.GetNodesByName("T1toT3").GetItemAsObject(0),
0, ["AbsoluteMagNormDirDistance",
"AbsolutePointToPointDistance",
"PointToPointAlongY",
"SignedPointToPointDistance",""],
[[0.001, 6.14, 0.54, 0.779, 0.018, 0.059, 0.11, 0.264, 0.558, 0.904, 2.328],
[0.016, 6.45, 1.696, 0.805, 0.347, 0.736, 1.16, 1.797, 2.213, 2.336, 2.828],
[-4.919, 0.897, -0.15, 0.704, -1.196, -0.719, -0.532, -0.026, 0.356, 0.472, 0.613],
[-6.45, 3.217, -0.218, 1.865, -2.78, -2.239, -1.943, -0.43, 1.696, 2.046, 2.301],
[]],
"Test4-2"))
self.delayDisplay("Test4-3: Test on T2toT3")
self.assertTrue(self.testOnMesh(slicer.mrmlScene.GetNodesByName("T2toT3").GetItemAsObject(0),
0, ["PointToPointAlongX",
"PointToPointAlongY",
"PointToPointAlongZ",""],
[[-2.542, 2.153, -0.233, 0.933, -1.802, -1.343, -0.929, -0.069, 0.386, 0.647, 1.273],
[-2.63, 2.266, 0.159, 0.923, -1.38, -0.904, -0.513, 0.309, 0.912, 1.074, 1.394],
[-3.431, 1.172, -0.956, 0.924, -2.388, -2.04, -1.665, -0.916, -0.28, 0.048, 0.626],
[]],
"Test4-3"))
self.delayDisplay("Test5: Test on a ROI")
self.delayDisplay("Test5-1: Test on T1toT2")
self.assertTrue(self.testOnMesh(slicer.mrmlScene.GetNodesByName("T1toT2").GetItemAsObject(0),
1, ["AbsolutePointToPointDistance",
"PointToPointAlongZ",
"SignedMagNormDirDistance",""],
[[0.214, 4.152, 1.56, 0.671, 0.389, 0.895, 1.131, 1.584, 1.919, 2.063, 2.498],
[-3.025, 1.159, -0.639, 0.986, -1.955, -1.687, -1.584, -0.294, 0.135, 0.396, 0.716],
[-3.666, 0.947, -0.24, 0.807, -2.302, -0.667, -0.496, -0.076, 0.247, 0.377, 0.754],
[]],
"Test5-1"))
self.delayDisplay("Test5-2: Test on T1toT3")
self.assertTrue(self.testOnMesh(slicer.mrmlScene.GetNodesByName("T1toT3").GetItemAsObject(0),
1, ["AbsoluteMagNormDirDistance",
"AbsolutePointToPointDistance",
"PointToPointAlongY",
"SignedPointToPointDistance",""],
[[0.001, 4.031, 0.887, 0.98, 0.057, 0.144, 0.232, 0.519, 0.94, 2.255, 3.202],
[1.529, 4.344, 2.293, 0.553, 1.608, 1.765, 1.879, 2.175, 2.544, 2.875, 3.412],
[-3.537, 0.806, -0.515, 0.914, -2.439, -1.552, -0.894, -0.256, 0.14, 0.288, 0.572],
[-4.344, 2.74, -1.265, 1.991, -3.412, -2.875, -2.489, -2.051, 1.583, 1.749, 2.363],
[]],
"Test5-2"))
self.delayDisplay("Test5-3: Test on T2toT3")
self.assertTrue(self.testOnMesh(slicer.mrmlScene.GetNodesByName("T2toT3").GetItemAsObject(0),
1, ["PointToPointAlongX",
"PointToPointAlongY",
"PointToPointAlongZ",""],
[[-2.542, 2.153, 0.203, 1.306, -2.003, -1.563, -0.975, 0.473, 1.268, 1.698, 1.999],
[-2.593, 1.354, -0.315, 1.02, -2.201, -1.313, -1.138, -0.319, 0.651, 0.873, 0.995],
[-3.431, 0.582, -1.32, 1.04, -3.036, -2.22, -2.097, -1.62, -0.199, -0.1, 0.03],
[]],
"Test5-3"))
self.delayDisplay("All test passed!")
def downloaddata(self):
import urllib
downloads = (
('http://slicer.kitware.com/midas3/download?items=240003', 'T1toT2.vtk', slicer.util.loadModel),
('http://slicer.kitware.com/midas3/download?items=240002', 'T1toT3.vtk', slicer.util.loadModel),
('http://slicer.kitware.com/midas3/download?items=240001', 'T2toT3.vtk', slicer.util.loadModel),
)
for url, name, loader in downloads:
filePath = slicer.app.temporaryPath + '/' + name
print filePath
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
logging.info('Requesting download %s from %s...\n' % (name, url))
urllib.urlretrieve(url, filePath)
if loader:
logging.info('Loading %s...' % (name,))
loader(filePath)
layoutManager = slicer.app.layoutManager()
threeDWidget = layoutManager.threeDWidget(0)
threeDView = threeDWidget.threeDView()
threeDView.resetFocalPoint()
def defineArrays(self, logic, firstValue, lastValue):
arrayValue = vtk.vtkDoubleArray()
ROIArray = vtk.vtkDoubleArray()
for i in range(firstValue, lastValue):
arrayValue.InsertNextValue(i)
ROIArray.InsertNextValue(1.0)
bool, array = logic.defineArray(arrayValue, ROIArray)
if bool :
return array
return False
def testStorageValue(self):
logic = MeshStatisticsLogic()
print ' Test storage of Values: '
arrayValue = vtk.vtkDoubleArray()
arrayMask = vtk.vtkDoubleArray()
for i in range(0, 1000, 2):
arrayValue.InsertNextValue(i)
arrayValue.InsertNextValue(i)
arrayMask.InsertNextValue(0.0)
arrayMask.InsertNextValue(0.0)
listOfRandomNumber = list()
del listOfRandomNumber[:]
for i in range(0, 250):
listOfRandomNumber.append(randint(0, 998))
listOfRandomNumber = list(set(listOfRandomNumber))
listOfRandomNumber = sorted(listOfRandomNumber)
for index in listOfRandomNumber:
arrayMask.SetValue(index, 1.0)
bool, array = logic.defineArray(arrayValue, arrayMask)
array = sorted(array)
a = 0
for i in listOfRandomNumber:
if arrayValue.GetValue(i) != array[a]:
print ' Failed', a, array[a], i, arrayValue.GetValue(i)
return False
a += 1
print ' Passed'
return True
def testMinMaxMeanFunctions(self):
logic = MeshStatisticsLogic()
print 'Test min, max, mean, and std: '
array = self.defineArrays(logic, 1, 1001)
min, max = logic.computeMinMax(array)
mean = logic.computeMean(array)
std = logic.computeStandardDeviation(array)
print 'min=', min, 'max=', max, 'mean=', mean, 'std=', std
if min != 1.0 or max != 1000.0 or mean != 500.5 or std != 288.675:
print ' Failed! '
return False
else:
print ' Passed! '
return True
def testPercentileFunction(self):
logic = MeshStatisticsLogic()
# pair number of value:
print ' TEST Percentile '
print ' TEST Pair number of values '
array = self.defineArrays(logic, 1, 1001)
percentile5 = logic.computePercentile(array, 0.05)
percentile15 = logic.computePercentile(array, 0.15)
percentile25 = logic.computePercentile(array, 0.25)
percentile50 = logic.computePercentile(array, 0.50)
percentile75 = logic.computePercentile(array, 0.75)
percentile85 = logic.computePercentile(array, 0.85)
percentile95 = logic.computePercentile(array, 0.95)
if percentile5 != 50 or percentile15 != 150 or percentile25 != 250 or percentile50 != 500 or percentile75 != 750 or percentile85 != 850 or percentile95 != 950:
print ' Failed ! '
return False
else:
print ' Passed'
# odd number of value:
print ' TEST Odd number of values '
array = self.defineArrays(logic, 1, 1000)
percentile5 = logic.computePercentile(array, 0.05)
percentile15 = logic.computePercentile(array, 0.15)
percentile25 = logic.computePercentile(array, 0.25)
percentile50 = logic.computePercentile(array, 0.50)
percentile75 = logic.computePercentile(array, 0.75)
percentile85 = logic.computePercentile(array, 0.85)
percentile95 = logic.computePercentile(array, 0.95)
if percentile5 != 50 or percentile15 != 150 or percentile25 != 250 or percentile50 != 500 or percentile75 != 750 or percentile85 != 850 or percentile95 != 950:
print ' Failed ! '
return False
else:
print ' Passed! '
return True
def testOnMesh(self, model, indexOfTheRegionConsidered, fieldToCheck, measurements, NameOftheTest):
self.widget.inputComboBox.setCheckState(model, 2)
self.widget.ROIComboBox.setCurrentIndex(indexOfTheRegionConsidered)
for i in range(0, 7):
widget = self.widget.tableField.cellWidget(i, 0)
widget.setChecked(True)
self.widget.runButton.click()
for ROIName, ROIDictValue in self.widget.ROIDict.iteritems():
i = 0
for fieldName, modelDict in sorted(ROIDictValue.iteritems()):
if fieldName == fieldToCheck[i]:
self.delayDisplay(NameOftheTest + "-" + str(i+1) + ": test on " + fieldName)
for a in modelDict.iteritems():
if measurements[i] != [a[1].min, a[1].max, a[1].mean, a[1].std, a[1].percentile5,
a[1].percentile15, a[1].percentile25, a[1].percentile50, a[1].percentile75,
a[1].percentile85, a[1].percentile95]:
print measurements[i]
print [a[1].min, a[1].max, a[1].mean, a[1].std, a[1].percentile5,
a[1].percentile15, a[1].percentile25, a[1].percentile50, a[1].percentile75,
a[1].percentile85, a[1].percentile95]
return False
i = i + 1
self.widget.inputComboBox.setCheckState(model, 0)
return True
|
jbvimort/LongitudinalQuantificationExtension
|
MeshStatistics/MeshStatistics.py
|
Python
|
apache-2.0
| 45,519
|
[
"VTK"
] |
bd367ddadba88354056e83193a8688bb985e9d1fc5f1c537e7b20ffb2555e235
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""JSON Schemas for request/response pairs used with Viewfinder ops.
All times are expressed in seconds (possibly subsecond floating-point
precision) since the epoch (January 1st, 1970) in UTC.
Every mutating request may be run synchronously by specifying
'synchronous' = True in the request header.
"""
__authors__ = ['spencer@emailscrubbed.com (Spencer Kimball)',
'andy@emailscrubbed.com (Andy Kimball)']
from copy import deepcopy
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.settings import AccountSettings
from viewfinder.backend.db.viewpoint import Viewpoint
##
# HELPER METHODS
##
def _MakeOptional(property_dict, test_key):
"""Iterates through all key/value pairs in the property dictionary. If the "test_key" function
returns True for a particular property key, then makes that property optional. Returns the
updated property dict.
"""
for key, value in property_dict.items():
if test_key(key):
property_dict[key]['required'] = False
return property_dict
def _CopyProperties(target_dict, source_dict):
"""Deep copies properties in source_dict['properties'] to target_dict['properties']. Asserts
if a property of the same name already exists in source_dict['properties'], but has a
different value.
"""
for key, value in source_dict['properties'].items():
assert key not in target_dict['properties'] or target_dict['properties'][key] == value, (source_dict, target_dict)
target_dict['properties'][key] = deepcopy(value)
##
# COMMON DATA STRUCTURES
##
# Headers object that must be at the top-level of every request or response message.
HEADERS = {
'description': 'contains headers used for read-only methods',
'type': 'object',
'properties': {
'version': {
'description': 'version of the message format; this is not necessarily equal '
'to the max version supported by the sender; for example, the server will '
'respond in an older dialect to older clients',
'type': 'integer',
},
'min_required_version': {
'description': 'this field requires that the recipient be able to understand '
'messages of this version or greater; if not, the recipient may report an '
'error (server), or it may try again after upgrade to a later version (client)',
'type': 'integer',
'required': False,
},
'synchronous': {
'description': 'used in tests to wait until a requested operation completes',
'type': 'boolean',
'required': False,
},
},
}
OP_HEADERS = {
'description': 'contains headers used for mutable methods',
'type': 'object',
'properties': {
'op_id': {
'description': 'id of the requested operation; the id is a composite of '
'the device id and a unique operation id generated by that device; the '
'client may provide the op_id, or if it does not, the server will '
'generate one',
'type': 'string',
},
'op_timestamp': {
'description': 'timestamp of this requested operation; the client may '
'provide the op_timestamp, or if it does not, the server will generate one',
'type': 'number',
},
},
}
_CopyProperties(target_dict=OP_HEADERS, source_dict=HEADERS)
# Location in degrees of latitude and longitude and accuracy in meters.
LOCATION = {
'description': 'location in degrees of latitude & longitude and accuracy in meters',
'type': 'object',
'properties': {
'latitude': {'type': 'number'},
'longitude': {'type': 'number'},
'accuracy': {'type': 'number'},
},
}
OPTIONAL_LOCATION = deepcopy(LOCATION)
OPTIONAL_LOCATION['required'] = False
# Hierarchical place names from country to street level.
PLACEMARK = {
'description': 'placemark identifies a place by name from country to street level',
'type': 'object',
'properties': {
'iso_country_code': {'type': 'string', 'blank': True, 'required': False},
'country': {'type': 'string', 'blank': True, 'required': False},
'state': {'type': 'string', 'blank': True, 'required': False},
'locality': {'type': 'string', 'blank': True, 'required': False},
'sublocality': {'type': 'string', 'blank': True, 'required': False},
'thoroughfare': {'type': 'string', 'blank': True, 'required': False},
'subthoroughfare': {'type': 'string', 'blank': True, 'required': False},
},
}
OPTIONAL_PLACEMARK = deepcopy(PLACEMARK)
OPTIONAL_PLACEMARK['required'] = False
# Select a set of assets, with some control over the scope of projection.
VIEWPOINT_SELECTION = {
'description': 'select a set of viewpoints by id; if "get_attributes" is '
'True or not specified, then return all attributes on the viewpoints; '
'"get_followers", "get_activities", "get_episodes", and "get_comments" '
'specify whether to return the corresponding collections associated with '
'the viewpoint; "start_key" fields enable paging of the collections',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'get_attributes': {'type': 'boolean', 'required': False},
'get_followers': {'type': 'boolean', 'required': False},
'follower_start_key': {'type': 'string', 'required': False},
'get_activities': {'type': 'boolean', 'required': False},
'activity_start_key': {'type': 'string', 'required': False},
'get_episodes': {'type': 'boolean', 'required': False},
'episode_start_key': {'type': 'string', 'required': False},
'get_comments': {'type': 'boolean', 'required': False},
'comment_start_key': {'type': 'string', 'required': False},
},
},
}
EPISODE_SELECTION = {
'description': 'select a set of episodes by id; if "get_attributes" is '
'True or not specified, then return all attributes on the episodes; if '
'"get_photos" is True or not specified, return photos in the episode, '
'starting with "photo_start_key" if it is specified',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'get_attributes': {'type': 'boolean', 'required': False},
'get_photos': {'type': 'boolean', 'required': False},
'photo_start_key': {'type': 'string', 'required': False},
},
},
}
USER_SELECTION = {
'description': 'select set of users by id',
'type': 'array',
'items': {'type': 'number'},
}
CONTACT_SELECTION = {
'description': 'select set of contacts that have a sort_key greater '
'than start_key',
'type': 'object',
'properties': {
'all': {
'description': 'invalidate all contacts, forcing complete client reload',
'type': 'boolean', 'required': False,
},
'start_key': {'type': 'string'},
},
}
# Cover photo metadata.
COVER_PHOTO_METADATA = {
'description': 'describes cover photo for a shared viewpoint',
'type': 'object',
'required': False,
'properties': {
'episode_id': {
'description': 'episode_id of episode that contains cover photo',
'type': 'string'
},
'photo_id': {
'description': 'photo_id of cover photo',
'type': 'string'
},
},
}
# Activity metadata.
CREATE_ACTIVITY_METADATA = {
'description': 'activity metadata for creation',
'type': 'object',
'properties': {
'activity_id': {'type': 'string'},
'timestamp': {
'description': 'time that activity was created on the client',
'type': 'number',
},
},
}
ACTIVITY_POST_ARRAY = {
'description': 'array of (episode, photo_id) tuples used by share '
'and unshare activities',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'photo_ids': {
'type': 'array',
'items': {'type': 'string'},
},
},
},
}
ACTIVITY_METADATA = {
'description': 'full activity metadata (includes create metadata)',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'user_id': {
'description': 'id of user that caused the activity to be created',
'type': 'number',
},
'update_seq': {
'description': 'set to the value of the viewpoint\'s update_seq '
'attribute after it is incremented during the creation of the '
'activity',
'type': 'number',
},
'add_followers': {
'description': 'new followers added to this viewpoint',
'required': False,
'type': 'object',
'properties': {
'follower_ids': {
'description': 'user ids of new viewpoint followers',
'type': 'array',
'items': {'type': 'number'},
},
},
},
'merge_accounts': {
'description': 'user accounts merged; target user added to this viewpoint',
'required': False,
'type': 'object',
'properties': {
'target_user_id': {
'description': 'user that receives the assets to be merged; this user remains after '
'the merge is completed',
'type': 'number',
},
'source_user_id': {
'description': 'user that provides the assets to be merged; the account of this user '
'is terminated after the merge is completed',
'type': 'number',
},
},
},
'post_comment': {
'description': 'comment posted to this viewpoint',
'required': False,
'type': 'object',
'properties': {
'comment_id': {'type': 'string'},
},
},
'remove_followers': {
'description': 'followers removed from this viewpoint',
'required': False,
'type': 'object',
'properties': {
'follower_ids': {
'description': 'user ids of removed viewpoint followers',
'type': 'array',
'items': {'type': 'number'},
},
},
},
'save_photos': {
'description': 'photos saved to default viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
},
},
'share_existing': {
'description': 'photos shared to an already existing viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
},
},
'share_new': {
'description': 'photos shared to a newly created viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
'follower_ids': {
'description': 'user ids of new viewpoint followers; excludes '
'the creating user id',
'type': 'array',
'items': {'type': 'number'},
},
},
},
'unshare': {
'description': 'photos unshared from this viewpoint',
'required': False,
'type': 'object',
'properties': {
'episodes': ACTIVITY_POST_ARRAY,
},
},
'update_episode': {
'description': 'episode metadata updated in this viewpoint',
'required': False,
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
},
},
'update_viewpoint': {
'description': 'viewpoint metadata updated',
'required': False,
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'prev_title': {'type': 'string', 'required': False},
'prev_cover_photo': COVER_PHOTO_METADATA,
},
},
'upload_episode': {
'description': 'photos uploaded to an episode in this viewpoint',
'required': False,
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'photo_ids': {
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
}
_CopyProperties(target_dict=ACTIVITY_METADATA, source_dict=CREATE_ACTIVITY_METADATA)
# Photo metadata.
PHOTO_URL_METADATA = {
'description': 'metadata for signed S3 URLs that reference photo image data',
'type': 'object',
'properties': {
'tn_get_url': {
'description': 'url for thumbnail resolution image file; '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
'med_get_url': {
'description': 'url for medium-screen resolution image file (max 480 pixels); '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
'full_get_url': {
'description': 'url for full-screen resolution image file (max 960 pixels); '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
'orig_get_url': {
'description': 'url for full-screen resolution image file; '
'URL expires in 24 hours--only returned with photo query responses',
'type': 'string', 'required': False,
},
},
}
USER_PHOTO_METADATA = {
'description': 'per-user photo metadata',
'type': 'object',
'properties': {
'photo_id': {'type': 'string'},
'asset_keys': {
'description': 'identifiers for copies of this photo in the user\'s devices\' native '
'asset library. This field is per-user, and its format is client-specific',
'type': 'array',
'required': False,
'items': {'type': 'string'},
},
},
}
UPDATE_PHOTO_METADATA = {
'description': 'photo metadata for updates',
'type': 'object',
'properties': {
'location': OPTIONAL_LOCATION,
'placemark': OPTIONAL_PLACEMARK,
'caption': {'type': 'string', 'required': False},
'link': {'type': 'string', 'required': False},
},
}
_CopyProperties(target_dict=UPDATE_PHOTO_METADATA, source_dict=USER_PHOTO_METADATA)
UPLOAD_PHOTO_METADATA = {
'description': 'photo metadata for upload (includes update metadata)',
'type': 'object',
'properties': {
'timestamp': {
'description': 'time that photo was created on the client',
'type': 'number',
},
'aspect_ratio': {
'description': 'floating point value: width / height',
'type': 'number',
},
'tn_md5': {
'description': 'thumbnail resolution md5 csum',
'type': 'string',
},
'med_md5': {
'description': 'medium resolution md5 csum (max 480 pixels)',
'type': 'string',
},
'full_md5': {
'description': 'full-screen resolution md5 csum (max 960 pixels)',
'type': 'string',
},
'orig_md5': {
'description': 'original resolution md5 csum',
'type': 'string',
},
'tn_size': {
'description': 'thumbnail resolution size in bytes',
'type': 'integer', 'required': False,
},
'med_size': {
'description': 'medium resolution size in bytes (max 480 pixels)',
'type': 'integer', 'required': False,
},
'full_size': {
'description': 'full-screen resolution size in bytes (max 960 pixels)',
'type': 'integer', 'required': False,
},
'orig_size': {
'description': 'original resolution size in bytes',
'type': 'integer', 'required': False,
},
'content_type': {
'description': 'image file content type (e.g. image/jpeg)',
'type': 'string', 'required': False,
},
'parent_id': {
'description': 'if specified, this photo was derived from another',
'type': 'string', 'required': False,
},
},
}
_CopyProperties(target_dict=UPLOAD_PHOTO_METADATA, source_dict=UPDATE_PHOTO_METADATA)
PHOTO_METADATA = {
'description': 'full photo metadata (includes upload metadata)',
'type': 'object',
'properties': {
'user_id': {
'description': 'id of user that created the photo',
'type': 'number'
},
'episode_id': {
'description': 'episode in which the photo was originally uploaded',
'type': 'string', 'required': False,
},
'labels': {
'description': 'set of boolean modifiers affecting the photo (e.g. "removed")',
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
'sharing_user_id': {
'description': 'user who shared this photo (if applicable)',
'type': 'number', 'required': False,
},
},
}
_CopyProperties(target_dict=PHOTO_METADATA, source_dict=UPLOAD_PHOTO_METADATA)
_CopyProperties(target_dict=PHOTO_METADATA, source_dict=PHOTO_URL_METADATA)
# Older photos may be missing one or more MD5 attributes.
PHOTO_METADATA['properties']['tn_md5']['required'] = False
PHOTO_METADATA['properties']['med_md5']['required'] = False
PHOTO_METADATA['properties']['full_md5']['required'] = False
PHOTO_METADATA['properties']['orig_md5']['required'] = False
POST_PHOTO_METADATA = deepcopy(PHOTO_METADATA)
# Episode metadata.
UPDATE_EPISODE_METADATA = {
'description': 'episode metadata for updates',
'type': 'object',
'properties': {
'episode_id': {'type': 'string'},
'title': {'type': 'string', 'required': False},
'description': {'type': 'string', 'required': False},
'location': OPTIONAL_LOCATION,
'placemark': OPTIONAL_PLACEMARK,
},
}
UPLOAD_EPISODE_METADATA = {
'description': 'episode metadata for upload (includes update metadata)',
'type': 'object',
'properties': {
'timestamp': {
'description': 'timestamp of the newest photo in the episode',
'type': 'number',
},
},
}
_CopyProperties(target_dict=UPLOAD_EPISODE_METADATA, source_dict=UPDATE_EPISODE_METADATA)
EPISODE_METADATA = {
'description': 'full episode metadata (includes upload metadata)',
'type': 'object',
'properties': {
'user_id': {
'description': 'id of user that created the episode',
'type': 'number'
},
'viewpoint_id': {
'description': 'viewpoint to which the episode belongs',
'type': 'string',
},
'publish_timestamp': {
'description': 'time at which the episode was uploaded',
'type': 'number',
},
'sharing_user_id': {
'description': 'user who shared this episode (if applicable)',
'type': 'number', 'required': False,
},
'parent_ep_id': {
'description': 'id of the parent episode, if one exists',
'type': 'string', 'required': False,
},
},
}
_CopyProperties(target_dict=EPISODE_METADATA, source_dict=UPLOAD_EPISODE_METADATA)
# Follower metadata.
UPDATE_FOLLOWER_METADATA = {
'description': 'follower metadata for updates',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'labels': {
'description': 'set of boolean permissions and modifiers affecting '
'the viewpoint (e.g. "personal")',
'type': 'array', 'required': False, 'uniqueItems': True,
'items': {'type': 'string', 'enum': Follower.ALL_LABELS},
},
'viewed_seq': {
'description': 'sequence number of last viewpoint update that the '
'client has viewed on any device; the client and server will always '
'"ratchet up" this value; they will ignore any value that is smaller '
'than a value already received',
'type': 'number', 'required': False,
},
},
}
FRIEND_FOLLOWER_METADATA = {
'description': 'follower metadata that is returned to other followers of same viewpoint when '
'they invoke query_viewpoints',
'type': 'object',
'properties': {
'follower_id': {'type': 'number'},
'labels': {
'description': 'set of boolean permissions and modifiers affecting the follower\'s '
'relationship to the viewpoint',
'type': 'array', 'required': False, 'uniqueItems': True,
'items': {'type': 'string', 'enum': [Follower.REMOVED, Follower.UNREVIVABLE]},
},
'adding_user_id': {
'description': 'user who added this follower to the viewpoint; for older viewpoints, '
'this may not be present; it also is not present for the user that created the viewpoint',
'type': 'number', 'required': False,
},
'follower_timestamp': {
'description': 'timestamp at which follower was added to the viewpoint; if not present, '
'assume follower was added more than 7 days ago',
'type': 'number', 'required': False,
},
},
}
# Viewpoint metadata.
UPDATE_VIEWPOINT_METADATA = {
'description': 'viewpoint metadata for updates',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'title': {'type': 'string', 'required': False},
'description': {'type': 'string', 'required': False},
'name': {'type': 'string', 'required': False},
'cover_photo': COVER_PHOTO_METADATA,
},
}
CREATE_VIEWPOINT_METADATA = {
'description': 'viewpoint metadata for create (includes update metadata)',
'type': 'object',
'properties': {
'type': {
'description': 'kind of viewpoint (only allow event viewpoint to be created by users)',
'type': 'string', 'enum': [Viewpoint.EVENT],
},
},
}
_CopyProperties(target_dict=CREATE_VIEWPOINT_METADATA, source_dict=UPDATE_VIEWPOINT_METADATA)
VIEWPOINT_METADATA = {
'description': 'full viewpoint metadata (includes create metadata)',
'type': 'object',
'properties': {
'follower_id': {
'description': 'id of the calling user who follows this viewpoint',
'type': 'number',
},
'user_id': {
'description': 'id of user that created the viewpoint',
'type': 'number'
},
'timestamp': {
'description': 'timestamp at which viewpoint was created',
'type': 'number',
},
'update_seq': {
'description': 'sequence number of the last add, remove, or update of '
'any assets or metadata within the viewpoint; only updates to shared '
'assets increment this value (i.e. not changes to user-specific '
'tables like Follower or UserPost)',
'type': 'number', 'required': False,
},
'adding_user_id': {
'description': 'user who added this follower to the viewpoint (if applicable)',
'type': 'number', 'required': False,
},
'last_updated': {
'description': 'timestamp of the activity that was last added to '
'the viewpoint',
'type': 'number', 'required': False,
},
},
}
_CopyProperties(target_dict=VIEWPOINT_METADATA, source_dict=CREATE_VIEWPOINT_METADATA)
_CopyProperties(target_dict=VIEWPOINT_METADATA, source_dict=UPDATE_FOLLOWER_METADATA)
_CopyProperties(target_dict=VIEWPOINT_METADATA['properties']['cover_photo'], source_dict=PHOTO_URL_METADATA)
VIEWPOINT_METADATA['properties']['type']['enum'] = Viewpoint.TYPES
# Copying episodes between viewpoints.
COPY_EPISODES_METADATA = {
'description': 'array of episode copy information; each item specifies the existing episode '
'id, the new episode id, and the photo ids to include in the copied episode',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'existing_episode_id': {
'description': 'id of the episode from which the copy originates; '
'this will be the parent_ep_id of the new episode',
'type': 'string',
},
'new_episode_id': {
'description': 'id of the new episode to create',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to copy from the existing episode',
'type': 'array',
'items': {'type': 'string'},
},
},
},
}
OPTIONAL_COPY_EPISODES_METADATA = deepcopy(COPY_EPISODES_METADATA)
OPTIONAL_COPY_EPISODES_METADATA['required'] = False
# Device metadata.
DEVICE_METADATA = {
'description': 'full device metadata properties',
'type': 'object',
'properties': {
'device_id': {
'description': 'unique identifier of the device. Generated on the server.',
'type': 'number',
},
'name': {
'description': 'name of device',
'type': 'string', 'blank': True, 'required': False,
},
'version': {
'description': 'version of the Viewfinder mobile application',
'type': 'string', 'blank': True, 'required': False,
},
'platform': {
'description': 'mobile platform (e.g. iPhone 4S, Samsung Galaxy S)',
'type': 'string', 'blank': True, 'required': False,
},
'os': {
'description': 'mobile os (e.g. iOS 5.0.1, Android 4.0)',
'type': 'string', 'blank': True, 'required': False,
},
'push_token': {
'description': 'opaque token for push notifications',
'type': 'string', 'blank': True, 'required': False,
},
'device_uuid': {
'description': 'per-install unique device id. Generated on the device.',
'type': 'string', 'blank': True, 'required': False,
},
'language': {
'description': 'device language code',
'type': 'string', 'blank': True, 'required': False,
},
'country': {
'description': 'device country code',
'type': 'string', 'blank': True, 'required': False,
},
'test_udid': {
'description': 'unique device ID. Only sent by DEV and ADHOC builds. ID matches that found on testflight.',
'type': 'string', 'blank': True, 'required': False,
},
},
}
# Device id is optional when registering a device.
REGISTER_DEVICE_METADATA = deepcopy(DEVICE_METADATA)
REGISTER_DEVICE_METADATA['required'] = False
REGISTER_DEVICE_METADATA['properties']['device_id']['required'] = False
# Information message sent as part of the ping response.
INFO_MESSAGE = {
'description': 'an informative message to the client',
'type': 'object',
'properties': {
'title': {'type': 'string'},
'body': {'type': 'string', 'required': False},
'link': {'type': 'string', 'required': False},
'identifier': {
'description': 'Unique identifier for this message. The client will not re-display a message with the '
'same identifier. However, a new identifier, then an old again will work (eg: A -> B -> A)',
'type': 'string',
},
'severity': {
'description': 'Severity level. One of "SILENT", "INFO", "ATTENTION", "DISABLE_NETWORK"',
'type': 'string',
},
},
}
PING_RESPONSE_MESSAGE = deepcopy(INFO_MESSAGE)
PING_RESPONSE_MESSAGE['required'] = False
# Comment metadata.
POST_COMMENT_METADATA = {
'description': 'comment metadata used when posting a comment',
'type': 'object',
'properties': {
'viewpoint_id': {'type': 'string'},
'comment_id': {'type': 'string'},
'asset_id': {
'description': 'id of the viewpoint asset to which this comment is '
'attached; this may be a photo, if the comment was about a photo; it '
'may be another comment, if this comment was a direct response to '
'that comment',
'type': 'string',
'required': False,
},
'timestamp': {
'description': 'timestamp of the new comment; this timestamp MUST '
'be the same across successive request attempts by the client in '
'order to guarantee idempotency',
'type': 'number',
},
'message': {
'description': 'text of the comment',
'type': 'string',
},
},
}
COMMENT_METADATA = {
'description': 'full comment metadata (includes post metadata)',
'type': 'object',
'properties': {
'user_id': {
'description': 'id of user that caused the comment to be created',
'type': 'number',
},
},
}
_CopyProperties(target_dict=COMMENT_METADATA, source_dict=POST_COMMENT_METADATA)
# Contact metadata.
UPLOAD_CONTACT_METADATA = {
'description': '(name, given_name, family_name, rank, contact_source, identities) tuple',
'type': 'object',
'properties': {
'contact_source': {
'description': 'Source of contacts: ip (iPhone), or m (Manual)',
'type': 'string',
'enum': Contact.UPLOAD_SOURCES
},
'identities': {
'description': 'Order of this list will be preserved by server for query_contacts responses',
'type': 'array',
'maxItems': 50,
'items': {
'type': 'object',
'properties': {
'identity': {'type': 'string', 'maxLength': 1000},
'description': {'type': 'string', 'maxLength': 1000, 'required': False},
},
},
},
'name': {'type': 'string', 'maxLength': 1000, 'required': False},
'given_name': {'type': 'string', 'maxLength': 1000, 'required': False},
'family_name': {'type': 'string', 'maxLength': 1000, 'required': False},
'rank': {'type': 'number', 'required': False},
},
}
QUERY_CONTACT_METADATA = {
'description': 'Metadata returned in query_contacts response',
'type': 'object',
'properties': {
'contact_id': {'type': 'string'},
'labels': {
'description': 'set of boolean modifiers affecting the contact (e.g. "removed")',
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
},
}
_CopyProperties(target_dict=QUERY_CONTACT_METADATA, source_dict=UPLOAD_CONTACT_METADATA)
QUERY_CONTACT_METADATA['properties']['contact_source']['enum'] = Contact.ALL_SOURCES
QUERY_CONTACT_METADATA['properties']['identities']['items']['properties']['user_id'] = \
{'type': 'number', 'required': False}
QUERY_CONTACT_METADATA['properties']['identities']['required'] = False
FOLLOWER_CONTACTS_METADATA = {
'description': 'array of contacts to add as followers of a viewpoint',
'type': 'array',
'items': {
'description': 'contacts: identity key and name if available. '
'user_id is required if known.',
'type': 'object',
'properties': {
'user_id': {'type': 'number', 'required': False},
'identity': {'type': 'string', 'required': False},
'name': {'type': 'string', 'required': False},
},
},
}
# Invalidate structure.
INVALIDATE = {
'description': 'each notification can select parts of the asset tree to '
'invalidate if the operation that triggered the notification modified '
'the tree',
'type': 'object',
'properties': {
'all': {
'description': 'invalidate all assets, forcing complete client reload',
'type': 'boolean',
},
'viewpoints': VIEWPOINT_SELECTION,
'episodes': EPISODE_SELECTION,
'users': USER_SELECTION,
'contacts': CONTACT_SELECTION,
},
}
# Usage information for a given category.
USAGE_CATEGORY_METADATA = {
'description': 'usage information for a single category',
'type': 'object',
'required': False,
'properties': {
'num_photos': { 'type': 'number', 'required': False },
'tn_size': { 'type': 'number', 'required': False },
'med_size': { 'type': 'number', 'required': False },
'full_size': { 'type': 'number', 'required': False },
'orig_size': { 'type': 'number', 'required': False },
},
}
# Usage information for a single user.
USAGE_METADATA = {
'description': 'usage information by category',
'type': 'object',
'properties': {
'owned_by': deepcopy(USAGE_CATEGORY_METADATA),
'shared_by': deepcopy(USAGE_CATEGORY_METADATA),
'visible_to': deepcopy(USAGE_CATEGORY_METADATA),
},
}
# The optional variant is used in NOTIFICATION. Currently, the last notification in the response to
# QueryNotifications will have the usage information.
OPTIONAL_USAGE_METADATA = deepcopy(USAGE_METADATA)
OPTIONAL_USAGE_METADATA['required'] = False
# Notification structure.
NOTIFICATION = {
'description': 'a union of notifications delivered to client asynchronously',
'type': 'object',
'properties': {
'notification_id': {'type': 'number'},
'name': {'type': 'string'},
'sender_id': {'type': 'number'},
'op_id': {
'description': 'id of the operation that produced this notification; this attribute '
'will be missing if no operation was involved',
'type': 'string', 'required': False,
},
'timestamp': {'type': 'number'},
'invalidate': deepcopy(INVALIDATE),
'inline': {
'description': 'some common invalidations are in-lined in the notification '
'in order to avoid extra round-trips',
'type': 'object', 'required': False,
'properties': {
'activity': deepcopy(ACTIVITY_METADATA),
'viewpoint': {
'description': 'if this notification updates the value of the update_seq '
'and / or viewed_seq attributes, then in-line the changed value(s) in '
'order to reduce round-trips',
'type': 'object', 'required': False,
'properties': {
'viewpoint_id': {'type': 'string'},
'update_seq': {
'description': 'value of the viewpoint update_seq attribute after '
'it was incremented by the operation; the client will "ratchet up" '
'this value, discarding any that is smaller than a value already '
'received',
'type': 'number', 'required': False,
},
'viewed_seq': {
'description': 'value of the follower viewed_seq attribute after '
'it was incremented for the user who submitted the operation; '
'the client will "ratchet up" this value, discarding any that '
'is smaller than a value already received',
'type': 'number', 'required': False,
},
},
},
'comment': deepcopy(COMMENT_METADATA),
'user': {
'description': 'user information', 'type': 'object', 'required': False,
'usage': deepcopy(OPTIONAL_USAGE_METADATA),
},
},
},
},
}
_MakeOptional(NOTIFICATION['properties']['invalidate']['properties'], lambda key: True)
NOTIFICATION['properties']['invalidate']['required'] = False
NOTIFICATION['properties']['inline']['properties']['activity']['required'] = False
NOTIFICATION['properties']['inline']['properties']['comment']['required'] = False
# Error response.
ERROR_RESPONSE = {
'description': 'on an error, returns code and message for debugging client',
'type': 'object',
'properties': {
'error': {
'type': 'object',
'required': False,
'properties': {
'method': {'type': 'string', 'required': False},
'id': {'type': 'string', 'required': False},
'message': {'type': 'string', 'blank': True},
},
},
},
}
# Prospective user invitation.
PROSPECTIVE_USER_INVITATION = {
'description': 'format of the prospective user invitation query parameter',
'type': 'object',
'properties': {
'timestamp': {
'description': 'timestamp at which the invitation was issued',
'type': 'number'
},
'identity': {
'description': 'identity to which this invitation was made; this may '
'be different than the actual identity of the bearer, as in cases '
'where the link was forwarded',
'type': 'string'
},
'viewpoint_id': {
'description': 'viewpoint to which this invitation grants access; the '
'bearer should not have access to sensitive data in other viewpoints',
'type': 'string'
},
'service_sig': {
'description': 'signature with service-wide secret that is used only to '
'sign invitations; filters out attempted attempted forgeries at a minimal '
'cost in terms of server resources; the service_sig attribute is not '
'included in the signature',
'type': 'string'},
}
}
# Subscription metadata.
SUBSCRIPTION_METADATA = {
'description': 'information about a subscription',
'type': 'object',
'properties': {
'transaction_id': {'type': 'string'},
'subscription_id': {'type': 'string'},
'timestamp': {'type': 'number'},
'expiration_ts': {'type': 'number'},
'product_type': {'type': 'string'},
'quantity': {'type': 'number'},
'payment_type': {'type': 'string'},
'extra_info': {
'description': 'additional data about the transaction; format depends on payment_type',
'type': 'object',
'required': False,
'additionalProperties': {},
},
},
}
# Friend metadata.
FRIEND_METADATA = {
'description': 'information stored for per user about friends of that user; only the user '
'can view and update this information',
'type': 'object',
'required': False,
'properties': {
'user_id': {'type': 'number'},
'nickname': {
'type': ['string', 'null'],
'required': False,
},
},
}
# User account settings metadata.
UPDATE_ACCOUNT_SETTINGS_METADATA = {
'description': 'options and choices affecting the user account that can be updated by the '
'user; see header comment in the AccountSettings class for details on allowed settings',
'type': 'object',
'required': False,
'properties': {
'email_alerts': {'type': 'string', 'required': False, 'enum': AccountSettings.ALL_EMAIL_ALERTS},
'sms_alerts': {'type': 'string', 'required': False, 'enum': AccountSettings.ALL_SMS_ALERTS},
'push_alerts': {'type': 'string', 'required': False, 'enum': AccountSettings.ALL_PUSH_ALERTS},
'storage_options': {
'type': 'array', 'required': False, 'uniqueItems': True,
'items': {'type': 'string', 'enum': AccountSettings.ALL_STORAGE_OPTIONS},
},
},
}
# At this time, all user account settings can be updated by the user.
ACCOUNT_SETTINGS_METADATA = UPDATE_ACCOUNT_SETTINGS_METADATA
# Identity metadata.
IDENTITY_METADATA = {
'description': 'Identity metadata. Returned in list_identifies and query_users on self.',
'type': 'object',
'properties': {
'identity': {
'description': 'e.g. Email:spencer.kimball.gmail.com | Phone:6464174337 | FacebookGraph:62052443',
'type': 'string',
},
'authority': {
'description': 'e.g. Google | Facebook | Twitter | Viewfinder | <empty>',
'type': 'string', 'required': False,
},
},
}
# User profile metadata.
UPDATE_USER_PROFILE_METADATA = {
'description': 'public profile of a user; all properties can be updated by the user',
'type': 'object',
'properties': {
'name': {
'description': 'full name of the user; if any name part (name, given_name, or family_name) '
'is given, then all parts are set -- any missing parts are set to None; this helps to '
'avoid accidental divergence',
'type': 'string',
'required': False,
'dependencies': 'given_name',
},
'given_name': {'type': 'string', 'required': False, 'dependencies': 'name'},
'family_name': {'type': 'string', 'required': False, 'dependencies': 'name'},
'picture': {
'description': 'URL to avatar photo',
'type': 'string',
'required': False,
},
},
}
USER_PROFILE_METADATA = {
'description': 'additional user profile properties that cannot be updated by the user, '
'but are returned by query_users',
'type': 'object',
'properties': {
'email': {'type': 'string', 'required': False},
'labels': {
'description': 'set of boolean modifiers affecting the user (e.g. "terminated")',
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
'merged_with': {'type': 'number', 'required': False},
'private': {
'description': 'additional fields that are only present when querying for the '
'authenticated user',
'type': 'object',
'required': False,
'properties': {
'subscriptions': {
'description': 'all active subscriptions for this user',
'type': 'array',
'items': SUBSCRIPTION_METADATA,
},
'account_settings': ACCOUNT_SETTINGS_METADATA,
'no_password': {
'description': 'if true, then this user has no password set',
'type': 'boolean', 'required': False,
},
'user_identities': {
'description': 'all identities for this user',
'type': 'array',
'items': IDENTITY_METADATA,
},
},
},
},
}
_CopyProperties(target_dict=USER_PROFILE_METADATA, source_dict=UPDATE_USER_PROFILE_METADATA)
# Don't require first name to be set on returned users, even if name is set.
del USER_PROFILE_METADATA['properties']['name']['dependencies']
# Add friend attributes.
_CopyProperties(target_dict=USER_PROFILE_METADATA, source_dict=FRIEND_METADATA)
# Confirmed identity.
CONFIRMED_IDENTITY = {
'description': 'an identity is confirmed when it is paired with an access token, the '
'possession of which proves control of the identity',
'type': 'object',
'properties': {
'identity': {
'description': 'identity to verify; make sure to use the full identity scheme '
'(e.g. Email:foo@example.com, Phone:+16461234567)',
'type': 'string',
},
'access_token': {
'description': 'N-digit access code sent to email address or SMS phone number',
'type': 'string',
},
},
}
##
# AUTH METHODS
##
# User cookies and identity access tokens.
#
# /link/facebook, /link/google, /link/viewfinder
# /login/facebook, /login/google, /login/viewfinder
# /register/facebook, /register/google, /register/viewfinder
# /login_reset/viewfinder
# /merge_token/viewfinder
AUTH_REQUEST = {
'description': 'registers new users, logs in existing users, or links identities to existing '
'users; connects to Facebook or Google to gather information about the user, including his/her '
'contacts',
'type': 'object',
'properties': {
'headers': HEADERS,
'use_session_cookie': {
'description': 'if true, then the user cookie is set to expire when the user ends the '
'session (e.g. by closing the browser)',
'type': 'boolean',
'required': False
},
},
}
AUTH_RESPONSE = {
'description': 'returns user-id & device-id for registered user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'user_id': {
'description': 'id of the user that was registered',
'type': 'number',
},
'device_id': {
'description': 'id of the device that was registered; this will not be present if the '
'device section was not present in the original request',
'type': 'number', 'required': False,
},
},
}
# Used by /<auth>/facebook and /<auth>/google.
AUTH_FB_GOOGLE_REQUEST = deepcopy(AUTH_REQUEST)
AUTH_FB_GOOGLE_REQUEST['properties']['device'] = REGISTER_DEVICE_METADATA
# Used by /<auth>/viewfinder.
AUTH_VIEWFINDER_REQUEST = deepcopy(AUTH_REQUEST)
AUTH_VIEWFINDER_REQUEST['properties']['device'] = REGISTER_DEVICE_METADATA
AUTH_VIEWFINDER_REQUEST['properties']['auth_info'] = {
'description': 'identity and optional user registration information used in Viewfinder auth '
'requests; this is in addition to the inherited AUTH_REQUEST fields',
'type': 'object',
'properties': {
'identity': {
'description': 'identity to authenticate; make sure to use the full identity scheme '
'(e.g. Email:foo@example.com, Phone:6464174337)',
'type': 'string'
},
},
}
AUTH_VIEWFINDER_RESPONSE = {
'description': 'returned by Viewfinder auth methods after an email/SMS message has been sent',
'type': 'object',
'properties': {
'headers': HEADERS,
'token_digits': {
'description': 'number of digits in the access token that was sent',
'type': 'number',
},
},
}
# Used by /login/viewfinder.
LOGIN_VIEWFINDER_PROPERTIES = {
'description': 'additional auth_info properties used only in user login',
'type': 'object',
'properties': {
'password': {
'description': 'user\'s password; if not specified then an authentication email/SMS '
'will be sent; otherwise, the password will be validated and allow the auth process to '
'be short-circuited',
'type': 'string',
'required': False,
},
},
}
LOGIN_VIEWFINDER_REQUEST = deepcopy(AUTH_VIEWFINDER_REQUEST)
_CopyProperties(target_dict=LOGIN_VIEWFINDER_REQUEST['properties']['auth_info'],
source_dict=LOGIN_VIEWFINDER_PROPERTIES)
# Used by /merge_token/viewfinder
MERGE_TOKEN_REQUEST = {
'description': 'sends an access token which proves control of an identity, and which will '
'be passed to /service/merge_accounts',
'type': 'object',
'properties': {
'headers': HEADERS,
'identity': {
'description': 'e.g. Email:spencer.kimball.gmail.com | Phone:+16464174337',
'type': 'string',
},
'error_if_linked': {
'description': 'reports ALREADY_LINKED error if the identity is already linked to a user '
'account',
'type': 'boolean', 'required': False,
},
},
}
# Used by /register/viewfinder.
REGISTER_VIEWFINDER_PROPERTIES = {
'description': 'additional auth_info properties used only in user registration',
'type': 'object',
'properties': {
'password': {
'description': 'user password to set as part of registration',
'type': 'string',
'required': False,
},
'name': {
'description': 'full user name',
'type': 'string',
'dependencies': 'given_name',
},
'given_name': {
'description': 'user\'s given name (i.e. first name)',
'type': 'string',
'required': False,
'dependencies': 'name',
},
'family_name': {
'description': 'user\'s family name (i.e. last name)',
'type': 'string',
'required': False,
'dependencies': 'name',
},
},
}
REGISTER_VIEWFINDER_REQUEST = deepcopy(AUTH_VIEWFINDER_REQUEST)
_CopyProperties(target_dict=REGISTER_VIEWFINDER_REQUEST['properties']['auth_info'],
source_dict=REGISTER_VIEWFINDER_PROPERTIES)
# Used by /verify/viewfinder.
# Verifies access code and complete the auth operation that was started by a call to /<auth>/viewfinder.
VERIFY_VIEWFINDER_REQUEST = deepcopy(AUTH_REQUEST)
_CopyProperties(target_dict=VERIFY_VIEWFINDER_REQUEST, source_dict=CONFIRMED_IDENTITY)
# Used by auth.html.
CONFIRM_PASSWORD_REQUEST = {
'description': 'confirm user password before completing user registration',
'type': 'object',
'properties': {
'headers': HEADERS,
'password': {
'description': 'password which will be checked against the password that was supplied '
'during user registration',
'type': 'string',
},
},
}
CONFIRM_PASSWORD_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
##
# SERVICE METHODS
##
# Add followers to an existing viewpoint.
#
# /service/add_followers
ADD_FOLLOWERS_REQUEST = {
'description': 'add resolved contacts as followers of an existing '
'viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {
'description': 'id of the viewpoint to which to add followers',
'type': 'string',
},
'contacts': FOLLOWER_CONTACTS_METADATA,
},
}
ADD_FOLLOWERS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Allocate unique ids for use when uploading photos or creating episodes.
#
# /service/allocate_ids
ALLOCATE_IDS_REQUEST = {
'description': 'allocate unique ids for the requesting user',
'type': 'object',
'properties': {
'headers': HEADERS,
'asset_types': {
'description': 'An array of single-character prefixes describing the asset ids to be '
'generated. Assets can be of mixed type - for example, you may request an operation '
'id and an activity id in a single request by passing the array ["o", "a"].',
'type': 'array',
'items': {
'type' : 'string'
}
},
},
}
ALLOCATE_IDS_RESPONSE = {
'description': 'returns the first id in a pre-allocated block',
'type': 'object',
'properties': {
'headers': HEADERS,
'asset_ids': {
'description': 'An array of asset ids generated by the server. Ids are returned in the '
'same order they appeared in the "asset_types" array of the request.',
'type': 'array',
'items': {
'type' : 'string'
}
},
'timestamp': {
'description': 'The timestamp used by the server to generate ids which require a '
'timestamp component.',
'type': 'number',
}
},
}
# Build archive of a users photos/conversations/etc.
#
# /service/build_archive
BUILD_ARCHIVE_REQUEST = {
'description': 'build archive of photos/comments/etc for requesting user',
'type': 'object',
'properties': {
'headers': HEADERS,
'email': {'type': 'string'},
},
}
BUILD_ARCHIVE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Fetch calendar(s) for user.
#
# /service/get_calendar
GET_CALENDAR_REQUEST = {
'description': 'fetch calendar(s) for user; specify "holidays" for locale-specific '
'holidays. Will default to en_US if the user\'s locale is not known.',
'type': 'object',
'properties': {
'headers': HEADERS,
'calendars': {
'description': 'calendar IDs; specify none for locale-specific holiday calendar',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'calendar_id': {'type': 'string'},
'year': {'type': 'number'},
},
},
},
},
}
GET_CALENDAR_RESPONSE = {
'description': 'returns a list of events corresponding to each calendar and year',
'type': 'object',
'properties': {
'headers': HEADERS,
'calendars': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'calendar_id': {'type': 'string'},
'year': {'type': 'number'},
'events': {
'description': 'calendar events by name and date; dates are unix time in UTC',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'dtstart': {'type': 'number'},
'dtend': {'type': 'number'},
},
},
},
},
},
},
},
}
# Hide photos from user's personal library and inbox view.
#
# /service/hide_photos
HIDE_PHOTOS_REQUEST = {
'description': 'hide a list of posts by id from the user\'s personal '
'library or inbox view by labeling them as hidden for that user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'episodes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {
'description': 'id of the episode containing photos to hide '
'from the user\'s personal library or inbox view',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to hide from the user\'s '
'personal library or inbox view',
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
},
}
HIDE_PHOTOS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# List all user identities.
#
# /service/list_identities
LIST_IDENTITIES_REQUEST = {
'description': 'list all identities linked to this account',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
LIST_IDENTITIES_RESPONSE = {
'description': 'the list of all linked identities',
'type': 'object',
'properties': {
'headers': HEADERS,
'identities': {
'type': 'array',
'items': IDENTITY_METADATA
},
},
}
# Merge one user account with another.
#
# /service/merge_accounts
MERGE_ACCOUNTS_REQUEST = {
'description': 'merge assets from another user account or identity into the account of the '
'current user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'source_user_cookie': {
'description': 'user cookie for the account from which to merge; this cookie can be '
'obtained by calling the /login handler and getting the contents of the "user" HTTP '
'cookie that it returns; this cookie must be confirmed, meaning that it cannot have '
'been created from a password; either this field or the source_identity need to be '
'specified',
'type': 'string', 'required': False,
},
'source_identity': {
'description': 'confirmed identity linked to the account from which to merge; it is also '
'possible for the identity to not be linked to any account, in which case it is simply '
'linked to the target account',
'type': 'object', 'required': False,
'properties': { },
},
},
}
_CopyProperties(target_dict=MERGE_ACCOUNTS_REQUEST['properties']['source_identity'],
source_dict=CONFIRMED_IDENTITY)
MERGE_ACCOUNTS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Get a new client log upload URL from the server.
#
# /service/new_client_log_url
NEW_CLIENT_LOG_URL_REQUEST = {
'description': 'fetches an S3 PUT url for writing client device log '
'to server for debugging',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'timestamp': {'type': 'number'},
'client_log_id': {
'description': 'an arbitrary client identifier for the log; must be '
'unique across all calls made by the device',
'type': 'string'
},
'content_type': {
'description': 'optionally specify an alternate content-type for the '
'client log. By default, uses application/octet to support old clients '
'which incorrectly specify this. TODO(spencer): change default to octet-stream.',
'type': 'string',
'required': False
},
'content_md5': {'type': 'string', 'required': False},
'num_bytes': {'type': 'number', 'required': False},
},
}
NEW_CLIENT_LOG_URL_RESPONSE = {
'type': 'object',
'properties': {
'headers': HEADERS,
'client_log_put_url': {
'description': 'pre-authorized url for client log; '
'URL expires in 24 hours',
'type': 'string'
},
},
}
# Ping request. Unauthenticated request, periodically issued by the client.
# The response may contain an informative message (eg: new version available).
# Since the request does not require the user to be signed in, it is not handled by service.py
#
# /ping
PING_REQUEST = {
'description': 'device ping',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'device': DEVICE_METADATA,
},
}
PING_RESPONSE = {
'description': 'ping response',
'type': 'object',
'properties': {
'message': PING_RESPONSE_MESSAGE,
},
}
# Add new comment to the viewpoint.
#
# /service/post_comment
POST_COMMENT_REQUEST = {
'description': 'adds a new comment to the viewpoint, optionally attached to '
'another asset in the same viewpoint (such as a photo or another comment)',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=POST_COMMENT_REQUEST, source_dict=POST_COMMENT_METADATA)
POST_COMMENT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
}
}
# Fetch user contact list.
#
# /service/query_contacts
QUERY_CONTACTS_REQUEST = {
'description': 'fetch (name, identity, rank) contact tuples; specify '
'start_key to begin querying where a previous invocation left off',
'type': 'object',
'properties': {
'headers': HEADERS,
'start_key': {'type': 'string', 'required': False},
'limit': {'type': 'number', 'required': False},
},
}
QUERY_CONTACTS_RESPONSE = {
'description': 'returns a list of contacts',
'type': 'object',
'properties': {
'headers': HEADERS,
'num_contacts': {'type': 'number'},
'contacts': {
'type': 'array',
'items': QUERY_CONTACT_METADATA,
},
'last_key': {
'description': 'the last fetched contact sort key; supply this value to '
'the next invocation of query_contacts to continue scan',
'type': 'string', 'required': False,
},
}
}
# Query photos (posts) from episodes.
#
# /service/query_episodes
QUERY_EPISODES_REQUEST = {
'description': 'query photo metadata and associated post information '
'from specified episodes',
'type': 'object',
'properties': {
'headers': HEADERS,
'episodes': EPISODE_SELECTION,
'photo_limit': {
'description': 'maximum number of photos to query from each episode id',
'type': 'number', 'required': False,
},
},
}
QUERY_EPISODES_RESPONSE = {
'description': 'a list of photos from each requested episode. The photo metadata '
'is augmented by associated post information',
'type': 'object',
'properties': {
'headers': HEADERS,
'episodes': {
'description': 'episode query responses',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'photos': {
'description': 'post + photo metadata',
'type': 'array', 'required': False,
'items': POST_PHOTO_METADATA,
},
'last_key': {
'description': 'the last-processed photo in the episode; supply with'
'next invocation of QUERY_EPISODES to continue',
'type': 'string', 'required': False,
},
},
},
},
},
}
_CopyProperties(target_dict=QUERY_EPISODES_RESPONSE['properties']['episodes']['items'],
source_dict=EPISODE_METADATA)
# If get_attributes=False, only episode_id will be returned; all other properties are optional.
_MakeOptional(QUERY_EPISODES_RESPONSE['properties']['episodes']['items']['properties'],
lambda key: key != 'episode_id')
# Query for viewpoints that are followed by the calling user.
#
# /service/query_followed
QUERY_FOLLOWED_REQUEST = {
'description': 'query metadata of viewpoints that are followed by calling user',
'type': 'object',
'properties': {
'headers': HEADERS,
'start_key': {'type': 'string', 'required': False},
'limit': {'type': 'number', 'required': False},
},
}
QUERY_FOLLOWED_RESPONSE = {
'description': 'a list of viewpoints that are followed by calling user',
'type': 'object',
'properties': {
'headers': HEADERS,
'viewpoints': {
'description': 'viewpoint query responses',
'type': 'array',
'items': VIEWPOINT_METADATA,
},
'last_key': {
'description': 'the last-processed followed viewpoint; supply with '
'next invocation of QUERY_FOLLOWED to continue; values can be sorted '
'lexicographically',
'type': 'string', 'required': False,
},
},
}
# Query notifications.
#
# /service/query_notifications
QUERY_NOTIFICATIONS_REQUEST = {
'description': 'poll list of pending notifications for the user',
'type': 'object',
'properties': {
'headers': HEADERS,
'scan_forward': {
'description': 'if true or not specified, then notifications are '
'queried in forward (ascending) order; otherwise, they are queried '
'in reverse order',
'type': 'boolean', 'required': False,
},
'start_key': {
'description': 'clients should supply the last_key returned with the '
'response to a prior invocation to get subsequent notifications',
'type': 'string', 'required': False,
},
'limit': {
'description': 'maximum number of notifications to return',
'type': 'number', 'required': False,
},
'max_long_poll': {
'description': 'maximum duration for long-polling requests (in seconds)',
'type': 'number', 'required': False,
},
},
}
QUERY_NOTIFICATIONS_RESPONSE = {
'description': 'Notifications pending for user',
'type': 'object',
'properties': {
'headers': HEADERS,
'notifications': {
'description': 'an array of notification objects',
'type': 'array',
'items': NOTIFICATION,
},
'last_key': {
'description': 'the last-processed notification key; supply this value with the '
'next invocation of QUERY_NOTIFICATIONS to continue; if not supplied, no '
'notifications were available; values can be sorted lexicographically',
'type': 'string', 'required': False
},
'retry_after': {
'description': 'advisory request from the server to wait before issuing another background query_notifications.',
'type': 'number', 'required': False
},
},
}
# Query user metadata by user id.
#
# /service/query_users
QUERY_USERS_REQUEST = {
'description': 'query user metadata by user ids; only users which consider the caller a '
'friend will provide profile info; in this case, a "friend" label is returned',
'type': 'object',
'properties': {
'headers': HEADERS,
'user_ids': USER_SELECTION,
},
}
QUERY_USERS_RESPONSE = {
'description': 'user metadata for each valid, supplied user id; if a user id was supplied '
'with the request, but not returned with the response, then the user does not exist',
'type': 'object',
'properties': {
'headers': HEADERS,
'users': {
'type': 'array',
'items': USER_PROFILE_METADATA,
},
},
}
# Query episodes in specified viewpoints.
#
# /service/query_viewpoints
QUERY_VIEWPOINTS_REQUEST = {
'description': 'query viewpoint and episode metadata from specified '
'viewpoints',
'type': 'object',
'properties': {
'headers': HEADERS,
'viewpoints': VIEWPOINT_SELECTION,
'limit': {
'description': 'maximum number of items to return in each episode, '
'follower, or activity collection in the response',
'type': 'number', 'required': False,
},
},
}
QUERY_VIEWPOINTS_RESPONSE = {
'description': 'a list of episodes from each requested viewpoint. The episode metadata '
'is augmented by associated member information',
'type': 'object',
'properties': {
'headers': HEADERS,
'viewpoints': {
'description': 'viewpoint query responses',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'followers': {
'description': 'ids of users following the viewpoint',
'type': 'array', 'required': False,
'items': FRIEND_FOLLOWER_METADATA,
},
'follower_last_key': {
'description': 'the last-processed follower in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
'activities': {
'description': 'viewpoint activity metadata',
'type': 'array', 'required': False,
'items': ACTIVITY_METADATA,
},
'activity_last_key': {
'description': 'the last-processed activity in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
'episodes': {
'description': 'episode + member metadata',
'type': 'array', 'required': False,
'items': EPISODE_METADATA,
},
'episode_last_key': {
'description': 'the last-processed episode in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
'comments': {
'description': 'comment + member metadata',
'type': 'array', 'required': False,
'items': COMMENT_METADATA,
},
'comment_last_key': {
'description': 'the last-processed comment in the viewpoint; supply with'
'next invocation of QUERY_VIEWPOINTS to continue',
'type': 'string', 'required': False,
},
},
},
},
},
}
_CopyProperties(target_dict=QUERY_VIEWPOINTS_RESPONSE['properties']['viewpoints']['items'],
source_dict=VIEWPOINT_METADATA)
# If get_attributes=False, only viewpoint_id will be returned; all other properties are optional.
_MakeOptional(QUERY_VIEWPOINTS_RESPONSE['properties']['viewpoints']['items']['properties'],
lambda key: key != 'viewpoint_id')
# Records an external (iTunes in-app purchase) subscription.
#
# /service/record_subscription
RECORD_SUBSCRIPTION_REQUEST = {
'description': 'records an external subscription',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'receipt_data': {
'description': 'base64-encoded itunes receipt data',
'type': 'string',
},
},
}
RECORD_SUBSCRIPTION_RESPONSE = {
'description': 'returns information from the decoded subscription',
'type': 'object',
'properties': {
'headers': HEADERS,
'subscription': SUBSCRIPTION_METADATA,
},
}
# Remove contacts from user contacts list.
#
# /service/remove_contacts
REMOVE_CONTACTS_REQUEST = {
'description': '',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'contacts': {
'description': 'list of contact_ids. Only AddressBook and Manual contact_ids may be removed though this API',
'type': 'array',
'uniqueItems': True,
'items': {'type': 'string'},
},
},
}
REMOVE_CONTACTS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Remove followers from an existing viewpoint.
#
# /service/remove_followers
REMOVE_FOLLOWERS_REQUEST = {
'description': 'remove followers from an existing viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {
'description': 'id of the viewpoint from which to remove followers',
'type': 'string',
},
'remove_ids': {
'description': 'ids of followers to remove from the viewpoint; if a follower does not '
'not exist on the viewpoint, it is ignored',
'type': 'array',
'items': {'type': 'integer'},
},
},
}
REMOVE_FOLLOWERS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Remove photos from user's personal collection.
#
# /service/remove_photos
REMOVE_PHOTOS_REQUEST = {
'description': 'remove a list of photos by id from the user\'s personal '
'collection by labeling them as removed for that user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'episodes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {
'description': 'id of the episode containing photos to remove '
'from the user\'s personal collection',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to remove from the user\'s '
'personal collection',
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
},
}
REMOVE_PHOTOS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Remove viewpoint from a user's inbox.
#
# /service/remove_viewpoint
REMOVE_VIEWPOINT_REQUEST = {
'description': 'remove viewpoint from a user\'s inbox',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'viewpoint_id': {
'description': 'id of viewpoint to be removed from user\'s inbox',
'type': 'string',
},
},
}
REMOVE_VIEWPOINT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Resolve contacts to fill in missing information.
#
# Currently only supports retrieving metadata for email-based identities.
# Used to enable sharing to email addresses that are not currently in
# the user's contacts.
#
# /service/resolve_contacts
RESOLVE_CONTACTS_REQUEST = {
'description': 'resolve identities to contact metadata; each input identity '
'should begin with "Email:"',
'type': 'object',
'properties': {
'headers': HEADERS,
'identities': {
'type': 'array',
'items': {'type': 'string'},
},
},
}
RESOLVE_CONTACTS_RESPONSE = {
'description': 'returns a list of resolved contacts in the same order as the '
'request; if the identity matched an existing user the user_id will be '
'filled in',
'type': 'object',
'properties': {
'headers': HEADERS,
'contacts': {
'description': ('list of resolved contacts, in the same '
'order as the request. Email identities for existing '
'users will have user_id and other fields filled in; '
'otherwise only the identity field will be present.'),
'type': 'array',
'items': {
'description': 'partial user metadata',
'type': 'object',
'properties': {
'user_id': {'type': 'number', 'required': False},
'identity': {'type': 'string', 'required': False},
'name': {'type': 'string', 'required': False},
'given_name': {'type': 'string', 'required': False},
'family_name': {'type': 'string', 'required': False},
'labels': {
'description': ('set of boolean modifiers affecting the user (e.g. "registered"). '
'The "friend" label and any data requiring friend status will not be '
'returned by this method.'),
'type': 'array', 'required': False, 'items': {'type': 'string'},
},
}
}
},
}
}
# Save photos to default viewpoint.
#
# /service/save_photos
SAVE_PHOTOS_REQUEST = {
'description': 'save photos from existing episodes to new episodes in the current user\'s '
'default viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_ids': {
'description': 'server saves all episodes contained within these viewpoints; this is '
'in addition to episodes that may be given in the "episodes" field; if an episode is '
'specified in the "episodes" field, it is assumed to be complete and is skipped when '
'scanning the viewpoint',
'type': 'array',
'required': False,
'items': {'type': 'string'},
},
'episodes': OPTIONAL_COPY_EPISODES_METADATA,
},
}
SAVE_PHOTOS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Share photos with the followers of an existing viewpoint.
#
# /service/share_existing
SHARE_EXISTING_REQUEST = {
'description': 'share episodes with the followers of an existing '
'viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {'type': 'string'},
'episodes': COPY_EPISODES_METADATA,
},
}
SHARE_EXISTING_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Share photos with contacts in a new viewpoint.
#
# /service/share_new
SHARE_NEW_REQUEST = {
'description': 'share photos from existing episodes into a new viewpoint, '
'with the resolved contacts as followers',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint': CREATE_VIEWPOINT_METADATA,
'episodes': COPY_EPISODES_METADATA,
'contacts': FOLLOWER_CONTACTS_METADATA,
},
}
SHARE_NEW_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Terminate user account.
#
# /service/terminate_account
TERMINATE_ACCOUNT_REQUEST = {
'description': 'terminate a user account -- unlink all identities, mute '
'all alerts, disable all sharing',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
},
}
TERMINATE_ACCOUNT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Unlink existing identity.
#
# /sevice/unlink_identity
UNLINK_IDENTITY_REQUEST = {
'description': 'unlink an identity from an account; succeeds if the specified identity '
'is in fact linked and if it is not the last identity authenticated via trusted authority',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'identity': {
'description': 'e.g. Email:spencer.kimball.gmail.com | Phone:6464174337 | FacebookGraph:62052443',
'type': 'string',
},
},
}
UNLINK_IDENTITY_RESPONSE = {
'description': 'empty response; on error, the standard error response',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Unshare photos from a viewpoint.
#
# /service/unshare
UNSHARE_REQUEST = {
'description': 'unshares photos from episodes in a viewpoint; also '
'recursively unshares from all derived episodes',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'viewpoint_id': {'type': 'string'},
'episodes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'episode_id': {
'description': 'id of the episode containing photos to unshare '
'from the viewpoint',
'type': 'string',
},
'photo_ids': {
'description': 'ids of photos to unshare from the viewpoint',
'type': 'array',
'items': {'type': 'string'},
},
},
},
},
},
}
UNSHARE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Updates device information including last_access time. Returns a
# newly encrypted user cookie. This is necessary in the case of a
# device that isn't used often and has failed multiple push
# notifications and had its push_token reset.
#
# /service/update_device
UPDATE_DEVICE_REQUEST = {
'description': 'update device information',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'device_dict': DEVICE_METADATA,
},
}
UPDATE_DEVICE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
}
}
# Per-user photo metadata update.
#
# /service/update_user_photo
UPDATE_USER_PHOTO_REQUEST = {
'description': 'updates the per-user metadata of an existing photo',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
},
}
_CopyProperties(target_dict=UPDATE_USER_PHOTO_REQUEST, source_dict=USER_PHOTO_METADATA)
UPDATE_USER_PHOTO_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Episode metadata update.
#
# /service/update_episode
UPDATE_EPISODE_REQUEST = {
'description': 'updates the metadata of an existing episode',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=UPDATE_EPISODE_REQUEST, source_dict=UPDATE_EPISODE_METADATA)
UPDATE_EPISODE_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Follower metadata update.
#
# /service/update_follower
UPDATE_FOLLOWER_REQUEST = {
'description': 'updates the metadata of an existing follower',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'follower': UPDATE_FOLLOWER_METADATA,
},
}
UPDATE_FOLLOWER_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Friend metadata update.
#
# /service/update_friend
UPDATE_FRIEND_REQUEST = {
'description': 'updates the metadata of a friend; updates only affect the view of the calling '
'user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'friend': FRIEND_METADATA,
},
}
UPDATE_FRIEND_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Photo metadata update.
#
# /service/update_photo
UPDATE_PHOTO_REQUEST = {
'description': 'updates the metadata of an existing photo',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=UPDATE_PHOTO_REQUEST, source_dict=UPDATE_PHOTO_METADATA)
UPDATE_PHOTO_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# User metadata update.
#
# /service/update_user
UPDATE_USER_REQUEST = {
'description': 'updates the metadata of an existing user',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'account_settings': UPDATE_ACCOUNT_SETTINGS_METADATA,
'password': {
'description': 'new user password; this field can only be set if using a recently '
'confirmed user cookie, or if the old_password matches, or if no user password has '
'yet been set',
'type': 'string', 'required': False,
},
'old_password': {
'description': 'if this matches the old password, then a new password can be set '
'without needing a confirmed user cookie',
'type': 'string', 'required': False,
},
},
}
_CopyProperties(target_dict=UPDATE_USER_REQUEST, source_dict=UPDATE_USER_PROFILE_METADATA)
UPDATE_USER_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Viewpoint metadata update.
#
# /service/update_viewpoint
UPDATE_VIEWPOINT_REQUEST = {
'description': 'updates the metadata of an existing viewpoint',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
},
}
_CopyProperties(target_dict=UPDATE_VIEWPOINT_REQUEST, source_dict=UPDATE_VIEWPOINT_METADATA)
_CopyProperties(target_dict=UPDATE_VIEWPOINT_REQUEST, source_dict=UPDATE_FOLLOWER_METADATA)
UPDATE_VIEWPOINT_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
},
}
# Upload user contact list.
#
# /service/upload_contacts
UPLOAD_CONTACTS_REQUEST = {
'description': 'upload contact tuples',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'contacts': {
'type': 'array',
'maxItems': 50,
'items': UPLOAD_CONTACT_METADATA,
},
},
}
UPLOAD_CONTACTS_RESPONSE = {
'description': 'returns nothing',
'type': 'object',
'properties': {
'headers': HEADERS,
'contact_ids': {
'description': 'list of server computed contact_ids generated from '
'the list of contacts in the upload_contacts request',
'type': 'array',
'items': {'type': 'string'},
},
}
}
# Upload photo and episode metadata to service.
#
# /service/upload_episode
UPLOAD_EPISODE_REQUEST = {
'description': 'episode id, optional metadata, and list of photos',
'type': 'object',
'properties': {
'headers': OP_HEADERS,
'activity': CREATE_ACTIVITY_METADATA,
'episode': UPLOAD_EPISODE_METADATA,
'photos': {
'description': 'list of photos in episode',
'type': 'array',
'items': UPLOAD_PHOTO_METADATA,
},
},
}
UPLOAD_EPISODE_RESPONSE = {
'description': 'returns episode id and list of photo ids, one per metadata upload',
'type': 'object',
'properties': {
'headers': HEADERS,
'photos': {
'description': 'photo info for each metadata upload',
'type': 'array',
'items': {
'type': 'object',
'properties': {
'photo_id': {
'description': 'server-assigned base64hex-encoded photo id',
'type': 'string'
},
'orig_put_url': {
'description': 'pre-authorized url for original resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
'full_put_url': {
'description': 'pre-authorized url for full-screen resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
'med_put_url': {
'description': 'pre-authorized url for medium resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
'tn_put_url': {
'description': 'pre-authorized url for thumbnail resolution image file upload; '
'URL expires in 24 hours',
'type': 'string'
},
},
},
},
},
}
|
0359xiaodong/viewfinder
|
backend/www/json_schema.py
|
Python
|
apache-2.0
| 81,009
|
[
"Galaxy"
] |
3818a65ebef4471b1a643052ea73e0dc830f59ee9b724ff4fc19e96b42532dca
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from math import ceil
from os.path import dirname, join, realpath
from sys import exit as sys_exit
from sys import path
path.append("..")
from platformio import util
from platformio.platforms.base import PlatformFactory, get_packages
def is_compat_platform_and_framework(platform, framework):
p = PlatformFactory.newPlatform(platform)
for pkg in p.get_packages().keys():
if pkg.startswith("framework-%s" % framework):
return True
return False
def generate_boards(boards):
def _round_memory_size(size):
size = ceil(size)
for b in (64, 32, 16, 8, 4, 2, 1):
if b < size:
return int(ceil(size / b) * b)
assert NotImplemented()
lines = []
lines.append("""
.. list-table::
:header-rows: 1
* - Type ``board``
- Name
- Microcontroller
- Frequency
- Flash
- RAM""")
for board in sorted(boards):
for type_, data in board.iteritems():
assert type_ in util.get_boards()
board_ram = float(data['upload']['maximum_ram_size']) / 1024
lines.append("""
* - ``{type}``
- `{name} <{url}>`_
- {mcu}
- {f_cpu:d} MHz
- {rom} Kb
- {ram} Kb""".format(
type=type_,
name=data['name'],
url=data['url'],
mcu=data['build']['mcu'].upper(),
f_cpu=int((data['build']['f_cpu'][:-1])) / 1000000,
ram=int(board_ram) if board_ram % 1 == 0 else board_ram,
rom=_round_memory_size(
data['upload']['maximum_size'] / 1024)
))
return "\n".join(lines + [""])
def generate_packages(packages):
allpackages = get_packages()
lines = []
lines.append(""".. list-table::
:header-rows: 1
* - Name
- Contents""")
for type_, data in packages.iteritems():
assert type_ in allpackages
contitems = [
"`%s <%s>`_" % (name, url)
for name, url in allpackages[type_]
]
lines.append("""
* - ``{type_}``
- {contents}""".format(
type_=type_,
contents=", ".join(contitems)))
lines.append("""
.. warning::
**Linux Users:** Don't forget to install "udev" rules file
`99-platformio-udev.rules <https://github.com/platformio/platformio/blob/develop/scripts/99-platformio-udev.rules>`_ (an instruction is located in the file).
**Windows Users:** Please check that you have correctly installed USB driver
from board manufacturer
""")
return "\n".join(lines)
def generate_platform(name):
print "Processing platform: %s" % name
lines = []
lines.append(".. _platform_%s:" % name)
lines.append("")
_title = "Platform ``%s``" % name
lines.append(_title)
lines.append("=" * len(_title))
p = PlatformFactory.newPlatform(name)
lines.append(p.get_description())
lines.append("""
For more detailed information please visit `vendor site <%s>`_.""" %
p.get_vendor_url())
lines.append("""
.. contents::""")
lines.append("""
Packages
--------
""")
lines.append(generate_packages(p.get_packages()))
lines.append("""
Frameworks
----------
.. list-table::
:header-rows: 1
* - Name
- Description""")
_frameworks = util.get_frameworks()
for framework in sorted(_frameworks.keys()):
if not is_compat_platform_and_framework(name, framework):
continue
lines.append("""
* - :ref:`framework_{type_}`
- {description}""".format(
type_=framework,
description=_frameworks[framework]['description']))
lines.append("""
Boards
------
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command or
`PlatformIO Boards Explorer <http://platformio.org/#!/boards>`_
* For more detailed ``board`` information please scroll tables below by
horizontal.
""")
vendors = {}
for board, data in util.get_boards().items():
platform = data['platform']
vendor = data['vendor']
if name in platform:
if vendor in vendors:
vendors[vendor].append({board: data})
else:
vendors[vendor] = [{board: data}]
for vendor, boards in sorted(vendors.iteritems()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.append(generate_boards(boards))
return "\n".join(lines)
def update_platform_docs():
for name in PlatformFactory.get_platforms().keys():
rst_path = join(
dirname(realpath(__file__)), "..", "docs", "platforms",
"%s.rst" % name)
with open(rst_path, "w") as f:
f.write(generate_platform(name))
def generate_framework(type_, data):
print "Processing framework: %s" % type_
lines = []
lines.append(".. _framework_%s:" % type_)
lines.append("")
_title = "Framework ``%s``" % type_
lines.append(_title)
lines.append("=" * len(_title))
lines.append(data['description'])
lines.append("""
For more detailed information please visit `vendor site <%s>`_.
""" % data['url'])
lines.append(".. contents::")
lines.append("""
Platforms
---------
.. list-table::
:header-rows: 1
* - Name
- Description""")
for platform in sorted(PlatformFactory.get_platforms().keys()):
if not is_compat_platform_and_framework(platform, type_):
continue
p = PlatformFactory.newPlatform(platform)
lines.append("""
* - :ref:`platform_{type_}`
- {description}""".format(
type_=platform,
description=p.get_description()))
lines.append("""
Boards
------
.. note::
* You can list pre-configured boards by :ref:`cmd_boards` command or
`PlatformIO Boards Explorer <http://platformio.org/#!/boards>`_
* For more detailed ``board`` information please scroll tables below by horizontal.
""")
vendors = {}
for board, data in util.get_boards().items():
frameworks = data['frameworks']
vendor = data['vendor']
if type_ in frameworks:
if vendor in vendors:
vendors[vendor].append({board: data})
else:
vendors[vendor] = [{board: data}]
for vendor, boards in sorted(vendors.iteritems()):
lines.append(str(vendor))
lines.append("~" * len(vendor))
lines.append(generate_boards(boards))
return "\n".join(lines)
def update_framework_docs():
for name, data in util.get_frameworks().items():
rst_path = join(util.get_source_dir(), "..", "docs", "frameworks",
"%s.rst" % name)
with open(rst_path, "w") as f:
f.write(generate_framework(name, data))
def update_create_platform_doc():
allpackages = get_packages()
lines = []
lines.append(""".. _platform_creating_packages:
Packages
--------
*PlatformIO* has pre-built packages for the most popular operation systems:
*Mac OS*, *Linux (+ARM)* and *Windows*.
.. list-table::
:header-rows: 1
* - Name
- Contents""")
for type_, data in sorted(allpackages.iteritems()):
contitems = [
"`%s <%s>`_" % (name, url)
for name, url in allpackages[type_]
]
lines.append("""
* - ``{type_}``
- {contents}""".format(
type_=type_,
contents=", ".join(contitems)))
with open(join(util.get_source_dir(), "..", "docs", "platforms",
"creating_platform.rst"), "r+") as fp:
content = fp.read()
fp.seek(0, 0)
fp.write(
content[:content.index(".. _platform_creating_packages:")] +
"\n".join(lines) + "\n\n" +
content[content.index(".. _platform_creating_manifest_file:"):]
)
def main():
update_create_platform_doc()
update_platform_docs()
update_framework_docs()
if __name__ == "__main__":
sys_exit(main())
|
bkudria/platformio
|
scripts/docspregen.py
|
Python
|
mit
| 8,124
|
[
"VisIt"
] |
537b2b930077b1b7b5b72772afef1a7d7dfe1b50aa523e8bda240ba1f11190e1
|
'''
Created on 2014-07-09
This module contains meta data and access functions for the updated PRISM climatology as distributed by
PCIC (Pacific Climate Impact Consortium).
@author: Andre R. Erler, GPL v3
'''
# external imports
import numpy as np
import os
# internal imports
from geodata.netcdf import DatasetNetCDF
from geodata.gdal import addGDALtoDataset
from utils.nctools import writeNetCDF
from datasets.common import getRootFolder, grid_folder, transformPrecip
from datasets.common import loadObservations, addLandMask, addLengthAndNamesOfMonth, getFileName
# from geodata.utils import DatasetError
from warnings import warn
from geodata.gdal import GridDefinition
## PCIC PRISM Meta-data
dataset_name = 'PCIC'
root_folder = getRootFolder(dataset_name=dataset_name) # get dataset root folder based on environment variables
# PRISM grid definition
dlat = dlon = 1./120. # 0.0083333333
dlat2 = dlon2 = 1./240. # half step
nlat = 1680 # slat = 14 deg
nlon = 3241 # slon = 27 deg
# N.B.: coordinates refer to grid points (CF convention), commented values refer to box edges (GDAL convention)
llclat = 48. # 48.0000000000553
# llclat = 48.0000000000553 # 48.
llclon = -140. # -140.0
geotransform = (llclon-dlon2, dlon, 0.0, llclat-dlat2, 0.0, dlat)
size = (nlon,nlat) # (x,y) map size of PRISM grid
# make GridDefinition instance
PCIC_grid = GridDefinition(name=dataset_name, projection=None, geotransform=geotransform, size=size)
## Functions that handle access to the original PCIC NetCDF files
# variable attributes and names in original PCIC files
ltmvaratts = dict(tmin = dict(name='Tmin', units='K', atts=dict(long_name='Minimum 2m Temperature'), offset=273.15), # 2m minimum temperature
tmax = dict(name='Tmax', units='K', atts=dict(long_name='Maximum 2m Temperature'), offset=273.15), # 2m maximum temperature
pr = dict(name='precip', units='mm/month', atts=dict(long_name='Total Precipitation'), transform=transformPrecip), # total precipitation
# axes (don't have their own file; listed in axes)
time = dict(name='time', units='days', atts=dict(long_name='days since beginning of year'), offset=-5493), # time coordinate
lon = dict(name='lon', units='deg E', atts=dict(long_name='Longitude')), # geographic longitude field
lat = dict(name='lat', units='deg N', atts=dict(long_name='Latitude'))) # geographic latitude field
# N.B.: the time-series time offset is chose such that 1979 begins with the origin (time=0)
# list of variables to load
ltmvarlist = list(ltmvaratts.keys()) # also includes coordinate fields
# loads data from original PCIC NetCDF files
# climatology
ltmfolder = root_folder + 'climatology/' # climatology subfolder
ltmfile = '{0:s}_monClim_PRISM_historical_run1_197101-200012.nc' # expand with variable name
def loadPCIC_LTM(name=dataset_name, varlist=None, varatts=ltmvaratts, filelist=None, folder=ltmfolder):
''' Get a properly formatted dataset the monthly PCIC PRISM climatology. '''
# translate varlist
if varlist is None: varlist = list(varatts.keys())
#if varlist and varatts: varlist = translateVarNames(varlist, varatts)
# generate file list
filelist = [ltmfile.format(var) for var in varlist if var not in ('time','lat','lon')]
# load variables separately
dataset = DatasetNetCDF(name=name, folder=folder, filelist=filelist, varlist=varlist, varatts=varatts, ncformat='NETCDF4')
dataset = addGDALtoDataset(dataset, projection=None, geotransform=None, gridfolder=grid_folder)
# N.B.: projection should be auto-detected as geographic
# return formatted dataset
return dataset
## Functions that provide access to well-formatted PCIC PRISM NetCDF files
# pre-processed climatology files (varatts etc. should not be necessary)
avgfile = 'pcic{0:s}_clim{1:s}.nc' # formatted NetCDF file
avgfolder = root_folder + 'pcicavg/' # prefix
# function to load these files...
def loadPCIC(name=dataset_name, period=None, grid=None, resolution=None, varlist=None, varatts=None,
folder=None, filelist=None, lautoregrid=True):
''' Get the pre-processed monthly PCIC PRISM climatology as a DatasetNetCDF. '''
if folder is None: folder = avgfolder
# only the climatology is available
if period is not None:
warn('Only the full climatology is currently available: setting \'period\' to None.')
period = None
# load standardized climatology dataset with PRISM-specific parameters
dataset = loadObservations(name=name, folder=folder, projection=None, period=period, grid=grid,
varlist=varlist, varatts=varatts, filepattern=avgfile, filelist=filelist,
lautoregrid=lautoregrid, mode='climatology')
# # make sure all fields are masked
# dataset.load()
# dataset.mask(dataset.datamask, maskSelf=False)
# return formatted dataset
return dataset
# function to load station data
def loadPCIC_Stn(name=dataset_name, period=None, station=None, resolution=None, varlist=None,
varatts=None, folder=avgfolder, filelist=None):
''' Get the pre-processed monthly PCIC PRISM climatology at station locations as a DatasetNetCDF. '''
# only the climatology is available
if period is not None:
warn('Only the full climatology is currently available: setting \'period\' to None.')
period = None
# load standardized climatology dataset with PCIC-specific parameters
dataset = loadObservations(name=name, folder=folder, grid=None, station=station, shape=None,
varlist=varlist, varatts=varatts, filepattern=avgfile, projection=None,
filelist=filelist, lautoregrid=False, period=period, mode='climatology')
# return formatted dataset
return dataset
# function to load averaged data
def loadPCIC_Shp(name=dataset_name, period=None, shape=None, resolution=None, varlist=None,
varatts=None, folder=avgfolder, filelist=None, lencl=False):
''' Get the pre-processed monthly PCIC PRISM climatology averaged over regions as a DatasetNetCDF. '''
# only the climatology is available
if period is not None:
warn('Only the full climatology is currently available: setting \'period\' to None.')
period = None
# load standardized climatology dataset with PCIC-specific parameters
dataset = loadObservations(name=name, folder=folder, grid=None, station=None, shape=shape, lencl=lencl,
varlist=varlist, varatts=varatts, filepattern=avgfile, projection=None,
filelist=filelist, lautoregrid=False, period=period, mode='climatology')
# return formatted dataset
return dataset
## Dataset API
dataset_name # dataset name
root_folder # root folder of the dataset
ts_file_pattern = None
clim_file_pattern = avgfile # filename pattern
data_folder = avgfolder # folder for user data
grid_def = {'':PCIC_grid} # no special name, since there is only one...
LTM_grids = [''] # grids that have long-term mean data
TS_grids = [] # grids that have time-series data
grid_res = {'':0.008} # approximate resolution in degrees at 45 degrees latitude
default_grid = PCIC_grid
# functions to access specific datasets
loadLongTermMean = loadPCIC_LTM # climatology provided by publisher
loadTimeSeries = None # time-series data
loadClimatology = loadPCIC # pre-processed, standardized climatology
loadStationClimatology = loadPCIC_Stn # climatologies without associated grid (e.g. stations or basins)
loadShapeClimatology = loadPCIC_Shp
if __name__ == '__main__':
mode = 'test_climatology'
# mode = 'test_point_climatology'
# mode = 'convert_climatology'
pntset = 'shpavg' # 'ecprecip
# do some tests
if mode == 'test_climatology':
# load NetCDF dataset
dataset = loadPCIC(grid='wc2_d01')
# dataset = loadPCIC()
print(dataset)
print('')
stnds = loadPCIC_Stn(station='ecprecip')
print(stnds)
print('')
print((dataset.geotransform))
print((dataset.precip.masked))
print((dataset.precip.getArray().mean()))
print('')
# display
import pylab as pyl
pyl.imshow(np.flipud(dataset.datamask.getArray()[:,:]))
pyl.colorbar(); pyl.show(block=True)
elif mode == 'test_point_climatology':
# load point climatology
print('')
if pntset in ('shpavg',): dataset = loadPCIC_Shp(shape=pntset)
else: dataset = loadPCIC_Stn(station=pntset)
print(dataset)
print('')
print((dataset.time))
print((dataset.time.coord))
## convert PCIC NetCDF files to proper climatology
elif mode == 'convert_climatology':
# load dataset
source = loadPCIC_LTM().load() # load, otherwise masking does not work!
# change meta-data
source.name = 'PCIC'
source.title = 'PCIC PRISM Climatology'
# load data into memory (and ignore last time step, which is just the annual average)
# source.load(time=(0,12)) # exclusive the last index
# N.B.: now we need to trim the files beforehand...
# make normal dataset
dataset = source.copy()
source.close()
## add new variables
# add landmask (it's not really a landmask, thought)
dataset.precip.mask(maskValue=-9999.) # mask all fields using the missing value flag
maskatts = dict(name='datamask', units='', long_name='Mask for Climatology Fields',
description='where this mask is non-zero, no data is available')
addLandMask(dataset, maskname='datamask',atts=maskatts) # create mask from precip mask
# add length and names of month
addLengthAndNamesOfMonth(dataset, noleap=False)
# add mean temperature
T2 = dataset.Tmin + dataset.Tmax # average temperature is just the average between min and max
T2 /= 2.
T2.name = 'T2'; T2.atts.long_name='Average 2m Temperature'
print(T2)
dataset += T2 # add to dataset
# rewrite time axis
time = dataset.time
time.load(data=np.arange(1,13, dtype=time.dtype)) # 1 to 12 (incl.) for climatology
time.units = 'month'; time.atts.long_name='Month of the Year'
print(time)
# print diagnostic
print(dataset)
print('')
for var in dataset:
#print(var)
if not var.strvar:
print(('Mean {0:s}: {1:s} {2:s}'.format(var.atts.long_name, str(var.mean()), var.units)))
#print('')
print('')
# clean some offending attributes
for var in dataset.axes.values():
for name in ('NAME','CLASS'):
if name in var.atts: del var.atts[name]
## create new NetCDF file
# figure out a different filename
filename = getFileName(name='PCIC', filepattern=avgfile)
if os.path.exists(avgfolder+filename): os.remove(avgfolder+filename)
# write data and some annotation
sink = writeNetCDF(dataset, avgfolder+filename, close=False)
# add_strvar(sink,'name_of_month', name_of_month, 'time', # add names of month
# atts=dict(name='name_of_month', units='', long_name='Name of the Month'))
sink.close() # close...
print(('Saving Climatology to: '+filename))
print(avgfolder)
|
aerler/GeoPy
|
src/datasets/PCIC.py
|
Python
|
gpl-3.0
| 11,430
|
[
"NetCDF"
] |
d79df48380408651291384c0a30552e911e90503287bcf0af32b65f0bb06f58e
|
# qmpy/analysis/thermodynamics/phase.py
import numpy as np
from collections import defaultdict
import os.path
import qmpy
from io import StringIO
import fractions as frac
import logging
from qmpy.utils import *
from django.db.models import F, Q
import operator
from functools import reduce
from functools import total_ordering
logger = logging.getLogger(__name__)
THERMOPY_LIB_PATH = qmpy.INSTALL_PATH + "/data/thermodynamic/"
class PhaseError(Exception):
pass
class PhaseDataError(Exception):
pass
class PhaseData(object):
"""
A PhaseData object is a container for storing and organizing phase data.
Most importantly used when doing a large number of thermodynamic analyses
and it is undesirable to access the database for every space you want to
consider.
"""
def __init__(self):
self.clear()
def __str__(self):
return "%d Phases" % len(self.phases)
@property
def phases(self):
"""
List of all phases.
"""
return self._phases
@phases.setter
def phases(self, phases):
self.clear()
for phase in phases:
self.add_phase(phase)
def clear(self):
self._phases = []
self.phases_by_elt = defaultdict(set)
self.phases_by_dim = defaultdict(set)
self.phase_dict = {}
self.space = set()
def add_phase(self, phase):
"""
Add a phase to the PhaseData collection. Updates the
PhaseData.phase_dict and PhaseData.phases_by_elt dictionaries
appropriately to enable quick access.
Examples::
>>> pd = PhaseData()
>>> pd.add_phase(Phase(composition='Fe2O3', energy=-3))
>>> pd.add_phase(Phase(composition='Fe2O3', energy=-4))
>>> pd.add_phase(Phase(composition='Fe2O3', energy=-5))
>>> pd.phase_dict
{'Fe2O3': <Phase Fe2O3 : -5}
>>> pd.phases_by_elt['Fe']
[<Phase Fe2O3 : -3>, <Phase Fe2O3 : -4>, <Phase Fe2O3 : -5>]
"""
if not phase.name in self.phase_dict:
self.phase_dict[phase.name] = phase
else:
if phase.energy < self.phase_dict[phase.name].energy:
self.phase_dict[phase.name] = phase
self._phases.append(phase)
phase.index = len(self._phases)
for elt in phase.comp:
self.phases_by_elt[elt].add(phase)
self.phases_by_dim[len(phase.comp)].add(phase)
self.space |= set(phase.comp.keys())
def add_phases(self, phases):
"""
Loops over a sequence of phases, and applies `add_phase` to each.
Equivalent to::
>>> pd = PhaseData()
>>> for p in phases:
>>> pd.add_phase(p)
"""
for phase in phases:
self.add_phase(phase)
def load_library(self, library):
"""
Load a library file, containing self-consistent thermochemical data.
Equivalent to::
>>> pd = PhaseData()
>>> pd.read_file(INSTALL_PATH+'/data/thermodata/%s' % library)
"""
logger.debug("Loading Phases from %s" % library)
self.read_file(qmpy.INSTALL_PATH + "/data/thermodata/" + library)
def dump(self, filename=None, minimal=True):
"""
Writes the contents of the phase data to a file or to stdout.
Keyword Arguments:
filename:
If None, prints the file to stdout, otherwise writes the file
to the specified filename. Default=None.
minimal:
Dump _every_ phase in the PhaseData object, or only those that
can contribute to a phase diagram. If True, only the lowest
energy phase at a given composition will be written.
Default=True.
"""
pr = False
if filename is None:
pr = True
print("Composition Energy")
else:
f = open(os.path.abspath(filename), "w")
f.write("Composition Energy\n")
if minimal:
phases = list(self.phase_dict.values())
else:
phases = self.phases
for p in phases:
l = "%s %s" % (format_comp(p.comp), p.energy)
if pr:
print(l)
else:
f.write(l + "\n")
def load_oqmd(
self,
space=None,
search={},
exclude={},
stable=False,
fit="standard",
total=False,
):
"""
Load data from the OQMD.
Keyword Arguments:
space:
sequence of elements. If supplied, will return only phases
within that region of phase space. i.e. ['Fe', 'O'] will
return Fe, O and all iron oxides.
search:
dictionary of database search keyword:value pairs.
stable:
Restrict search to only stable phases (faster, but relies on
having current phase stability analyses).
Examples::
>>> pd = PhaseData()
>>> search = {'calculation__path__contains':'icsd'}
>>> pd.load_oqmd(space=['Fe','O'], search=search, stable=True)
"""
from qmpy.materials.formation_energy import FormationEnergy
from qmpy.materials.element import Element
logger.debug("Loading Phases from the OQMD")
data = FormationEnergy.objects.all()
##data = data.filter(entry__id=F('entry__duplicate_of__id'))
if fit:
data = data.filter(fit=fit)
else:
total = True
if stable:
data = data.filter(stability__lte=0)
if search:
data = data.filter(**search)
if exclude:
data = data.exclude(**exclude)
if space:
## Query phase space using element_list
dim = len(space) + 1
element_q_lst = [
Q(composition__element_list__contains=s + "_") for s in space
]
combined_q = reduce(operator.or_, element_q_lst)
combined_q = reduce(
operator.and_, [combined_q, Q(composition__ntypes__lt=dim)]
)
exclude_element_q_lst = [
Q(composition__element_list__contains=e.symbol + "_")
for e in Element.objects.exclude(symbol__in=space)
]
combined_q_not = reduce(operator.or_, exclude_element_q_lst)
data = data.filter(combined_q).exclude(combined_q_not)
## The following is old method (will be removed in future)
# space_qs = Element.objects.exclude(symbol__in=space)
# data = data.filter(composition__element_set__in=space)
# data = data.exclude(composition__element_set__in=space_qs)
data = data.distinct()
columns = [
"id",
"composition_id",
"stability",
"calculation__input__spacegroup",
]
if total:
columns.append("calculation__energy_pa")
else:
columns.append("delta_e")
values = data.values(*columns)
for row in values:
if total:
energy = row["calculation__energy_pa"]
else:
energy = row["delta_e"]
try:
phase = Phase(
energy=energy,
composition=parse_comp(row["composition_id"]),
description=row["calculation__input__spacegroup"],
stability=row["stability"],
per_atom=True,
total=total,
)
phase.id = row["id"]
self.add_phase(phase)
except TypeError:
raise PhaseError(
"Something went wrong with Formation object\
{}. No composition?".format(
row["id"]
)
)
def read_file(self, filename, per_atom=True):
"""
Read in a thermodata file (named filename).
File format::
composition energy
Fe 0.0
O 0.0
Li 0.0
Fe3O4 -0.21331204979
FeO -0.589343204057
Fe3O4 -0.21331204979
FeLiO2 -0.446739168889
FeLi5O4 -0.198830531099
Keyword Arguments:
per_atom: If True, the supplied energies are per atom, not per
formula unit. Defaults to True.
"""
if isinstance(filename, str):
fileobj = open(filename)
elif isinstance(filename, file):
fileobj = filename
elif isinstance(filename, type(StringIO())):
fileobj = filename
fileobj.name = None
thermodata = fileobj.readlines()
headers = [h.lower() for h in thermodata.pop(0).strip().split()]
if "composition" not in headers:
raise PhaseDataError(
"Found columns: %s. Must provide composition in\
a column labelled composition."
% (", ".join(headers))
)
if "energy" not in headers and "delta_e" not in headers:
raise PhaseDataError(
"Found columns: %s. Must provide energies in\
a column labelled delta_e or energy."
% (", ".join(headers))
)
keywords = {
"energy": "energy",
"composition": "composition",
"delta_e": "energy",
"delta_h": "energy",
"delta_g": "energy",
"comp": "composition",
"name": "composition",
"desc": "description",
"description": "description",
}
headers = [keywords[h] for h in headers if h in keywords]
name = filename.split("/")[-1]
for i, line in enumerate(thermodata):
line = line.strip().split()
if not line:
continue
ddict = dict(list(zip(headers, line)))
phase = Phase(
composition=ddict["composition"],
energy=float(ddict["energy"]),
description=ddict.get(
"description", "{file}:{line}".format(file=name, line=i)
),
per_atom=per_atom,
)
self.add_phase(phase)
def get_phase_data(self, space):
"""
Using an existing PhaseData object return a PhaseData object which is
populated by returning a subset which is inside a given region of phase
space.
Arguments:
space: formatted as in :func:`qmpy.PhaseSpace.__init__()`
Examples::
>>> pd = PhaseData()
>>> pd.read_file('legacy.dat')
>>> new_pd = pd.get_phase_data(['Fe', 'O'])
>>> new_pd.phase_dict
"""
if not space:
return self
##dim = len(space)
phases = set(self.phases)
others = set(self.phases_by_elt.keys()) - set(space)
for elt in others:
phases -= self.phases_by_elt[elt]
pd = PhaseData()
pd.phases = phases
return pd
class Phase(object):
"""
A Phase object is a point in composition-energy space.
Examples::
>>> p1 = Phase('Fe2O3', -1.64, per_atom=True)
>>> p2 = Phase('Fe2O3', -8.2, per_atom=False)
>>> p3 = Phase({'Fe':0.4, 'O':0.6}, -1.64)
>>> p4 = Phase({'Fe':6, 'O':9}, -24.6, per_atom=False)
>>> p1 == p2
True
>>> p2 == p3
True
>>> p3 == p4
True
"""
id = None
use = True
show_label = True
_calculation = None
custom_name = None
phase_dict = {}
def __init__(
self,
composition=None,
energy=None,
description="",
per_atom=True,
stability=None,
total=False,
name="",
):
if composition is None or energy is None:
raise PhaseError("Composition and/or energy missing.")
if isinstance(composition, str):
composition = parse_comp(composition)
self.description = description
self.comp = defaultdict(float, composition)
self.stability = stability
if name:
self.custom_name = name
if not per_atom:
self.total_energy = energy
else:
self.energy = energy
@staticmethod
def from_phases(phase_dict):
"""
Generate a Phase object from a dictionary of Phase objects. Returns a
composite phase of unit composition.
"""
if len(phase_dict) == 1:
return list(phase_dict.keys())[0]
pkeys = sorted(list(phase_dict.keys()), key=lambda x: x.name)
energy = sum([amt * p.energy for p, amt in list(phase_dict.items())])
comp = defaultdict(float)
for p, factor in list(phase_dict.items()):
for e, amt in list(p.unit_comp.items()):
comp[e] += amt * factor
phase = Phase(composition=comp, energy=energy, per_atom=False)
phase.phase_dict = phase_dict
return phase
@property
def natoms(self):
return sum(self.nom_comp.values())
def __str__(self):
if self.description:
return "{name} ({description}): {energy:0.3g}".format(
name=self.name, energy=self.energy, description=self.description
)
else:
return "{name} : {energy:0.3g}".format(name=self.name, energy=self.energy)
def __repr__(self):
return "<Phase %s>" % self
def __hash__(self):
return hash(
tuple(
[str(self.comp), float(self.energy)]
+ [str(self.unit_comp[key]) for key in self.comp]
)
)
@total_ordering
def __lt__(self, other):
"Phase-comparison is done based on energy value"
return self.energy < other.energy
def __eq__(self, other):
"""
Phases are defined to be equal if they have the same composition and an
energy within 1e-6 eV/atom.
"""
if set(self.comp) != set(other.comp):
return False
if abs(self.energy - other.energy) > 1e-6:
return False
for key in self.comp:
if abs(self.unit_comp[key] - other.unit_comp[key]) > 1e-6:
return False
return True
@property
def label(self):
return "%s: %0.3f eV/atom" % (self.name, self.energy)
@property
def link(self):
if self.id:
link = '<a href="/materials/entry/{id}">{name}</a>'
return link.format(
id=self.calculation.entry_id, name=format_html(self.comp)
)
else:
return ""
@property
def name(self):
if self.custom_name:
return self.custom_name
if self.phase_dict:
name_dict = dict(
(p, v / p.natoms) for p, v in list(self.phase_dict.items())
)
return " + ".join(
"%.3g %s" % (v, p.name) for p, v in list(name_dict.items())
)
return format_comp(self.nom_comp)
@property
def latex(self):
if self.phase_dict:
return " + ".join(
"%.3g %s" % (v, p.latex) for p, v in list(self.phase_dict.items())
)
return format_latex(self.nom_comp)
@property
def volume(self):
if self.phase_dict:
return sum(
phase.calculation.volume_pa * amt
for phase, amt in list(self.phase_dict.items())
)
else:
return self.calculation.volume_pa
@property
def mass(self):
if self.phase_dict:
return sum(
phase.calculation.composition.get_mass() * amt
for phase, amt in list(self.phase_dict.items())
)
else:
return self.calculation.composition.get_mass()
@property
def space(self):
"""
Set of elements in the phase.
"""
return set([k for k, v in list(self.unit_comp.items()) if abs(v) > 1e-6])
@property
def n(self):
"""
Number of atoms in the total composition.
"""
return sum(self._comp.values())
@property
def comp(self):
"""
Total composition.
"""
return self._comp
@comp.setter
def comp(self, composition):
self._comp = composition
self._unit_comp = unit_comp(composition)
self._nom_comp = reduce_comp(composition)
@property
def unit_comp(self):
"""
Unit composition.
"""
return self._unit_comp
@property
def nom_comp(self):
"""
Composition divided by the GCD. e.g. Fe4O6 becomes Fe2O3.
"""
return self._nom_comp
@property
def energy(self):
"""
Energy per atom in eV.
"""
return self._energy
@energy.setter
def energy(self, energy):
self._energy = energy
self._total_energy = energy * sum(self.comp.values())
self._energy_pfu = energy / sum(self.nom_comp.values())
@property
def total_energy(self):
"""
Total energy for the composition as supplied (in eV).
"""
return self._total_energy
@total_energy.setter
def total_energy(self, energy):
self._total_energy = energy
self._energy = energy / sum(self.comp.values())
self._energy_pfu = energy / sum(self.nom_comp.values())
@property
def energy_pfu(self):
"""
Energy per nominal composition. i.e. energy per Fe2O3, not Fe4O6.
"""
return self._energy_pfu
@energy_pfu.setter
def energy_pfu(self, energy):
self._energy_pfu = energy
_gap = None
@property
def band_gap(self):
if not self._gap:
self.get_gap()
return self._gap
def get_gap(self):
if not self.phase_dict:
self._gap = self.calculation.band_gap
else:
self._gap = min([p.calculation.band_gap for p in self.phase_dict])
_formation = None
@property
def formation(self):
if self.id is None:
return
if self._formation is None:
self._formation = qmpy.FormationEnergy.objects.get(id=self.id)
return self._formation
@property
def calculation(self):
"""
Get the oqmd Formation object for this Phase, if it exists.
"""
if self.id is None:
return
from qmpy.analysis.vasp.calculation import Calculation
return self.formation.calculation
def set_stability(self):
from qmpy.analysis.vasp import Calculation
if self.id is None:
return
Calculation.objects.filter(id=self.id).update(stability=self.stability)
def free_energy(self, T=0, P=0, mus={}):
"""
Free energy function for the phase, can be defined to be anything, by
default it just returns the phase's ground state energy.
"""
# global environment
return self.energy
def amt(self, comp):
"""
Returns a composition dictionary with the specified composition pulled
out as 'var'.
Examples::
>>> phase = Phase(composition={'Fe':1, 'Li':5, 'O':8}, energy=-1)
>>> phase.amt('Li2O')
defaultdict(<type 'float'>, {'var': 2.5, 'Fe': 1, 'O': 5.5, 'Li': 0.0})
"""
if isinstance(comp, Phase):
comp = comp.comp
elif isinstance(comp, str):
comp = parse_comp(comp)
residual = defaultdict(float, self.comp)
tot = sum(residual.values())
for c, amt in list(dict(comp).items()):
pres = residual[c] / amt
for c2, amt2 in list(comp.items()):
residual[c2] -= pres * amt2
residual["var"] = tot - sum(residual.values())
residual["var"] /= float(sum(comp.values()))
return residual
def fraction(self, comp):
"""
Returns a composition dictionary with the specified composition pulled
out as 'var'.
Examples::
>>> phase = Phase(composition={'Fe':1, 'Li':5, 'O':8}, energy=-1)
>>> phase.fraction('Li2O')
defaultdict(<type 'float'>, {'var': 0.5357142857142858, 'Fe':
0.07142857142857142, 'O': 0.3928571428571428, 'Li': 0.0})
"""
if isinstance(comp, Phase):
comp = comp.unit_comp
elif isinstance(comp, str):
comp = unit_comp(parse_comp(comp))
residual = defaultdict(float, self.unit_comp)
tot = sum(residual.values())
for c, amt in list(dict(comp).items()):
pres = residual[c] / amt
for c2, amt2 in list(comp.items()):
residual[c2] -= pres * amt2
residual["var"] = tot - sum(residual.values())
residual["var"] /= float(sum(comp.values()))
return residual
|
wolverton-research-group/qmpy
|
qmpy/analysis/thermodynamics/phase.py
|
Python
|
mit
| 21,375
|
[
"VASP"
] |
2aed8ed6e9479eb1093ef80a89b36d810702dceb445f3efb909805bd6dcf7943
|
import serial
import inspect
import time
import itertools
from util import two_byte_iter_to_str, to_two_bytes
# Message command bytes - straight from Firmata.h
DIGITAL_MESSAGE = 0x90 # send data for a digital pin
ANALOG_MESSAGE = 0xE0 # send data for an analog pin (or PWM)
DIGITAL_PULSE = 0x91 # SysEx command to send a digital pulse
# PULSE_MESSAGE = 0xA0 # proposed pulseIn/Out msg (SysEx)
# SHIFTOUT_MESSAGE = 0xB0 # proposed shiftOut msg (SysEx)
REPORT_ANALOG = 0xC0 # enable analog input by pin #
REPORT_DIGITAL = 0xD0 # enable digital input by port pair
START_SYSEX = 0xF0 # start a MIDI SysEx msg
SET_PIN_MODE = 0xF4 # set a pin to INPUT/OUTPUT/PWM/etc
END_SYSEX = 0xF7 # end a MIDI SysEx msg
REPORT_VERSION = 0xF9 # report firmware version
SYSTEM_RESET = 0xFF # reset from MIDI
QUERY_FIRMWARE = 0x79 # query the firmware name
# extended command set using sysex (0-127/0x00-0x7F)
# 0x00-0x0F reserved for user-defined commands */
SERVO_CONFIG = 0x70 # set max angle, minPulse, maxPulse, freq
STRING_DATA = 0x71 # a string message with 14-bits per char
SHIFT_DATA = 0x75 # a bitstream to/from a shift register
I2C_REQUEST = 0x76 # send an I2C read/write request
I2C_REPLY = 0x77 # a reply to an I2C read request
I2C_CONFIG = 0x78 # config I2C settings such as delay times and power pins
REPORT_FIRMWARE = 0x79 # report name and version of the firmware
SAMPLING_INTERVAL = 0x7A # set the poll rate of the main loop
SYSEX_NON_REALTIME = 0x7E # MIDI Reserved for non-realtime messages
SYSEX_REALTIME = 0x7F # MIDI Reserved for realtime messages
# Pin modes.
# except from UNAVAILABLE taken from Firmata.h
UNAVAILABLE = -1
INPUT = 0 # as defined in wiring.h
OUTPUT = 1 # as defined in wiring.h
ANALOG = 2 # analog pin in analogInput mode
PWM = 3 # digital pin in PWM output mode
SERVO = 4 # digital pin in SERVO mode
# Pin types
DIGITAL = OUTPUT # same as OUTPUT below
# ANALOG is already defined above
class PinAlreadyTakenError(Exception):
pass
class InvalidPinDefError(Exception):
pass
class NoInputWarning(RuntimeWarning):
pass
class Board(object):
"""
Base class for any board
"""
firmata_version = None
firmware = None
firmware_version = None
_command_handlers = {}
_command = None
_stored_data = []
_parsing_sysex = False
def __init__(self, port, layout, baudrate=57600, name=None):
self.sp = serial.Serial(port, baudrate)
# Allow 5 secs for Arduino's auto-reset to happen
# Alas, Firmata blinks it's version before printing it to serial
# For 2.3, even 5 seconds might not be enough.
# TODO Find a more reliable way to wait until the board is ready
self.pass_time(5)
self.name = name
if not self.name:
self.name = port
self.setup_layout(layout)
# Iterate over the first messages to get firmware data
while self.bytes_available():
self.iterate()
# TODO Test whether we got a firmware name and version, otherwise there
# probably isn't any Firmata installed
def __str__(self):
return "Board %s on %s" % (self.name, self.sp.port)
def __del__(self):
'''
The connection with the a board can get messed up when a script is
closed without calling board.exit() (which closes the serial
connection). Therefore also do it here and hope it helps.
'''
self.exit()
def send_as_two_bytes(self, val):
self.sp.write(chr(val % 128) + chr(val >> 7))
def setup_layout(self, board_layout):
"""
Setup the Pin instances based on the given board-layout. Maybe it will
be possible to do this automatically in the future, by polling the
board for its type.
"""
# Create pin instances based on board layout
self.analog = []
for i in board_layout['analog']:
self.analog.append(Pin(self, i))
self.digital = []
self.digital_ports = []
for i in xrange(0, len(board_layout['digital']), 8):
num_pins = len(board_layout['digital'][i:i+8])
port_number = i / 8
self.digital_ports.append(Port(self, port_number, num_pins))
# Allow to access the Pin instances directly
for port in self.digital_ports:
self.digital += port.pins
# Setup PWM pins
for i in board_layout['pwm']:
self.digital[i].PWM_CAPABLE = True
# Disable certain ports like Rx/Tx and crystal ports
for i in board_layout['disabled']:
self.digital[i].mode = UNAVAILABLE
# Create a dictionary of 'taken' pins. Used by the get_pin method
self.taken = { 'analog' : dict(map(lambda p: (p.pin_number, False), self.analog)),
'digital' : dict(map(lambda p: (p.pin_number, False), self.digital)) }
# Setup default handlers for standard incoming commands
self.add_cmd_handler(ANALOG_MESSAGE, self._handle_analog_message)
self.add_cmd_handler(DIGITAL_MESSAGE, self._handle_digital_message)
self.add_cmd_handler(REPORT_VERSION, self._handle_report_version)
self.add_cmd_handler(REPORT_FIRMWARE, self._handle_report_firmware)
def add_cmd_handler(self, cmd, func):
"""
Adds a command handler for a command.
"""
len_args = len(inspect.getargspec(func)[0])
def add_meta(f):
def decorator(*args, **kwargs):
f(*args, **kwargs)
decorator.bytes_needed = len_args - 1 # exclude self
decorator.__name__ = f.__name__
return decorator
func = add_meta(func)
self._command_handlers[cmd] = func
def get_pin(self, pin_def):
"""
Returns the activated pin given by the pin definition.
May raise an ``InvalidPinDefError`` or a ``PinAlreadyTakenError``.
:arg pin_def: Pin definition as described in TODO,
but without the arduino name. So for example ``a:1:i``.
"""
if type(pin_def) == list:
bits = pin_def
else:
bits = pin_def.split(':')
a_d = bits[0] == 'a' and 'analog' or 'digital'
part = getattr(self, a_d)
pin_nr = int(bits[1])
if pin_nr >= len(part):
raise InvalidPinDefError('Invalid pin definition: %s at position 3 on %s' % (pin_def, self.name))
if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE:
raise InvalidPinDefError('Invalid pin definition: UNAVAILABLE pin %s at position on %s' % (pin_def, self.name))
if self.taken[a_d][pin_nr]:
raise PinAlreadyTakenError('%s pin %s is already taken on %s' % (a_d, bits[1], self.name))
# ok, should be available
pin = part[pin_nr]
self.taken[a_d][pin_nr] = True
if pin.type is DIGITAL:
if bits[2] == 'p':
pin.mode = PWM
elif bits[2] == 's':
pin.mode = SERVO
elif bits[2] is not 'o':
pin.mode = INPUT
else:
pin.enable_reporting()
return pin
def pass_time(self, t):
"""
Non-blocking time-out for ``t`` seconds.
"""
cont = time.time() + t
while time.time() < cont:
time.sleep(0)
def send_sysex(self, sysex_cmd, data=[]):
"""
Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
:arg data: A list of 7-bit bytes of arbitrary data (bytes may be
already converted to chr's)
"""
self.sp.write(chr(START_SYSEX))
self.sp.write(chr(sysex_cmd))
for byte in data:
try:
byte = chr(byte)
except TypeError:
pass # byte is already a chr
except ValueError:
raise ValueError('Sysex data can be 7-bit bytes only. '
'Consider using utils.to_two_bytes for bigger bytes.')
self.sp.write(byte)
self.sp.write(chr(END_SYSEX))
def bytes_available(self):
return self.sp.inWaiting()
def iterate(self):
"""
Reads and handles data from the microcontroller over the serial port.
This method should be called in a main loop, or in an
:class:`Iterator` instance to keep this boards pin values up to date
"""
byte = self.sp.read()
if not byte:
return
data = ord(byte)
received_data = []
handler = None
if data < START_SYSEX:
# These commands can have 'channel data' like a pin nummber appended.
try:
handler = self._command_handlers[data & 0xF0]
except KeyError:
return
received_data.append(data & 0x0F)
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
elif data == START_SYSEX:
data = ord(self.sp.read())
handler = self._command_handlers.get(data)
if not handler:
return
data = ord(self.sp.read())
while data != END_SYSEX:
received_data.append(data)
data = ord(self.sp.read())
else:
try:
handler = self._command_handlers[data]
except KeyError:
return
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
# Handle the data
try:
handler(*received_data)
except ValueError:
pass
def get_firmata_version(self):
"""
Returns a version tuple (major, mino) for the firmata firmware on the
board.
"""
return self.firmata_version
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):
"""
Configure a pin as servo with min_pulse, max_pulse and first angle.
``min_pulse`` and ``max_pulse`` default to the arduino defaults.
"""
if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:
raise IOError("Pin %s is not a valid servo pin")
data = itertools.chain([pin], to_two_bytes(min_pulse),
to_two_bytes(max_pulse))
self.send_sysex(SERVO_CONFIG, data)
# set pin._mode to SERVO so that it sends analog messages
# don't set pin.mode as that calls this method
self.digital[pin]._mode = SERVO
self.digital[pin].write(angle)
def exit(self):
""" Call this to exit cleanly. """
# First detach all servo's, otherwise it somehow doesn't want to close...
# FIXME
for pin in self.digital:
if pin.mode == SERVO:
pin.mode = OUTPUT
if hasattr(self, 'sp'):
self.sp.close()
# Command handlers
def _handle_analog_message(self, pin_nr, lsb, msb):
value = round(float((msb << 7) + lsb) / 1023, 4)
# Only set the value if we are actually reporting
try:
if self.analog[pin_nr].reporting:
self.analog[pin_nr].value = value
except IndexError:
raise ValueError
def _handle_digital_message(self, port_nr, lsb, msb):
"""
Digital messages always go by the whole port. This means we have a
bitmask wich we update the port.
"""
mask = (msb << 7) + lsb
try:
print "pyfirmata: port %d message %d" % (port_nr, mask)
self.digital_ports[port_nr]._update(mask)
except IndexError:
raise ValueError
def _handle_report_version(self, major, minor):
self.firmata_version = (major, minor)
def _handle_report_firmware(self, *data):
major = data[0]
minor = data[1]
self.firmware_version = (major, minor)
self.firmware = two_byte_iter_to_str(data[2:])
class Port(object):
""" An 8-bit port on the board """
def __init__(self, board, port_number, num_pins=8):
self.board = board
self.port_number = port_number
self.reporting = False
self.pins = []
for i in range(num_pins):
pin_nr = i + self.port_number * 8
self.pins.append(Pin(self.board, pin_nr, type=DIGITAL, port=self))
def __str__(self):
return "Digital Port %i on %s" % (self.port_number, self.board)
def enable_reporting(self):
""" Enable reporting of values for the whole port """
self.reporting = True
msg = chr(REPORT_DIGITAL + self.port_number)
msg += chr(1)
self.board.sp.write(msg)
for pin in self.pins:
if pin.mode == INPUT:
pin.reporting = True # TODO Shouldn't this happen at the pin?
def disable_reporting(self):
""" Disable the reporting of the port """
self.reporting = False
msg = chr(REPORT_DIGITAL + self.port_number)
msg += chr(0)
self.board.sp.write(msg)
def write(self):
"""Set the output pins of the port to the correct state"""
mask = 0
for pin in self.pins:
if pin.mode == OUTPUT:
if pin.value == 1:
pin_nr = pin.pin_number - self.port_number * 8
mask |= 1 << pin_nr
msg = chr(DIGITAL_MESSAGE + self.port_number)
msg += chr(mask % 128)
msg += chr(mask >> 7)
self.board.sp.write(msg)
def _update(self, mask):
"""
Update the values for the pins marked as input with the mask.
"""
if self.reporting:
for pin in self.pins:
if pin.mode is INPUT:
pin_nr = pin.pin_number - self.port_number * 8
pin.value = (mask & (1 << pin_nr)) > 0
print "pyfirmata: updated pin %d to %s" % (pin_nr, pin.value)
class Pin(object):
""" A Pin representation """
def __init__(self, board, pin_number, type=ANALOG, port=None):
self.board = board
self.pin_number = pin_number
self.type = type
self.port = port
self.PWM_CAPABLE = False
self._mode = (type == DIGITAL and OUTPUT or INPUT)
self.reporting = False
self.value = None
def __str__(self):
type = {ANALOG : 'Analog', DIGITAL : 'Digital'}[self.type]
return "%s pin %d" % (type, self.pin_number)
def _set_mode(self, mode):
if mode is UNAVAILABLE:
self._mode = UNAVAILABLE
return
if self._mode is UNAVAILABLE:
raise IOError("%s can not be used through Firmata" % self)
if mode is PWM and not self.PWM_CAPABLE:
raise IOError("%s does not have PWM capabilities" % self)
if mode == SERVO:
if self.type != DIGITAL:
raise IOError("Only digital pins can drive servos! %s is not"
"digital" % self)
self._mode = SERVO
self.board.servo_config(self.pin_number)
return
# Set mode with SET_PIN_MODE message
self._mode = mode
command = chr(SET_PIN_MODE)
command += chr(self.pin_number)
command += chr(mode)
self.board.sp.write(command)
if mode == INPUT:
self.enable_reporting()
def _get_mode(self):
return self._mode
mode = property(_get_mode, _set_mode)
"""
Mode of operation for the pin. Can be one of the pin modes: INPUT, OUTPUT,
ANALOG, PWM or SERVO (or UNAVAILABLE)
"""
def enable_reporting(self):
""" Set an input pin to report values """
if self.mode is not INPUT:
raise IOError, "%s is not an input and can therefore not report" % self
if self.type == ANALOG:
self.reporting = True
msg = chr(REPORT_ANALOG + self.pin_number)
msg += chr(1)
self.board.sp.write(msg)
else:
self.port.enable_reporting() # TODO This is not going to work for non-optimized boards like Mega
def disable_reporting(self):
""" Disable the reporting of an input pin """
if self.type == ANALOG:
self.reporting = False
msg = chr(REPORT_ANALOG + self.pin_number)
msg += chr(0)
self.board.sp.write(msg)
else:
self.port.disable_reporting() # TODO This is not going to work for non-optimized boards like Mega
def read(self):
"""
Returns the output value of the pin. This value is updated by the
boards :meth:`Board.iterate` method. Value is alway in the range 0.0 - 1.0
"""
if self.mode == UNAVAILABLE:
raise IOError, "Cannot read pin %s"% self.__str__()
return self.value
def write(self, value):
"""
Output a voltage from the pin
:arg value: Uses value as a boolean if the pin is in output mode, or
expects a float from 0 to 1 if the pin is in PWM mode. If the pin
is in SERVO the value should be in degrees.
"""
if self.mode is UNAVAILABLE:
raise IOError, "%s can not be used through Firmata" % self
if self.mode is INPUT:
raise IOError, "%s is set up as an INPUT and can therefore not be written to" % self
if value is not self.value:
self.value = value
if self.mode is OUTPUT:
if self.port:
self.port.write()
else:
msg = chr(DIGITAL_MESSAGE)
msg += chr(self.pin_number)
msg += chr(value)
self.board.sp.write(msg)
elif self.mode is PWM:
value = int(round(value * 255))
msg = chr(ANALOG_MESSAGE + self.pin_number)
msg += chr(value % 128)
msg += chr(value >> 7)
self.board.sp.write(msg)
elif self.mode is SERVO:
value = int(value)
msg = chr(ANALOG_MESSAGE + self.pin_number)
msg += chr(value % 128)
msg += chr(value >> 7)
self.board.sp.write(msg)
|
dariobottazzi/pyfirmata
|
pyfirmata/pyfirmata.py
|
Python
|
bsd-3-clause
| 18,814
|
[
"CRYSTAL"
] |
aa5bf48b4ddbe19a05d40143d8058027ce1cd2f12fa40cba5499dbe4ab834568
|
#!/usr/bin/python2
# ----------------------------------------------------------------------
# Copyright (2010) Aram Davtyan and Garegin Papoian
# Papoian's Group, University of Maryland at Collage Park
# http://papoian.chem.umd.edu/
# Last Update: 03/04/2011
# ----------------------------------------------------------------------
import os
import sys
class Atom:
def __init__(self, no, ch, res, ty, q, x, y, z):
self.no = no
self.ch = ch
self.res = res
self.ty = ty
self.q = q
self.x = x
self.y = y
self.z = z
def write_(self, f):
space11 = " "
f.write( (space11+str(self.no))[-12:] + "\t" )
f.write( "\t".join([ str(self.ch), str(self.res), str(self.ty), str(self.q), str(self.x), str(self.y), str(self.z) ]) )
f.write( "\n" )
class Bond:
def __init__(self, no, ty, I, J):
self.no = no
self.ty = ty
self.I = I
self.J = J
def write_(self, f):
f.write( (space11+str(self.no))[-12:] + "\t" )
f.write( "\t".join([ str(self.ty), str(self.I), str(self.J) ]) )
f.write( "\n" )
inp_file = ""
out_file = ""
if len(sys.argv)>1: inp_file = sys.argv[1]
if len(sys.argv)>2: out_file = sys.argv[2]
if inp_file=="":
print "\nCoordinatesToLammpsDataFile.py input_file [output_file] [-b] [-go]\n\n"
print "\t-b\tadd bonds between CA & CA, CA & O and CA & CB in the case of coarse graining\n"
print "\t-go\tcoarse-grained setup\n\n"
exit()
cg_bonds = False
go = False
for cl in sys.argv[3:]:
if cl == '-b': cg_bonds = True
if cl == '-go': go = True
seq_file = "sequance.seq"
lammps_out_file = "file.in"
if out_file[:5]=="data.":
lammps_out_file = out_file[5:] + ".in"
seq_file = out_file[5:] + ".seq"
elif out_file[-5:]==".data":
lammps_out_file = out_file[:-5] + ".in"
seq_file = out_file[:-5] + ".seq"
else:
lammps_out_file = out_file + ".in"
out_file = "data." + out_file
seq_file = out_file + ".seq"
cg = True
xlo = -1000.0
xhi = 1000.0
ylo = -1000.0
yhi = 1000.0
zlo = -1000.0
zhi = 1000.0
masses = [12.0, 14.0, 16.0, 12.0, 1.0]
if cg and not go:
masses = [27.0, 14.0, 28.0, 60.0, 2.0]
n_atom_types = 5
if cg:
if cg_bonds: n_bond_types = 5
else: n_bond_types = 0
else: n_bond_types = 7
last_nos = { 'N' : 0, 'C-Alpha' : 0, 'C-Prime' : 0, 'O' : 0 }
last_chno = { 'N' : 0, 'C-Alpha' : 0, 'C-Prime' : 0, 'O' : 0 }
n_atoms = 0
n_bonds = 0
n_res = 0
group_id = 0
atoms = []
bonds = []
groups = []
fix_string = "4 alpha_carbons backbone beta_atoms oxygens fix_backbone_coeff.data " + seq_file
if go:
fix_string = "2 alpha_carbons gomodel fix_gomodel_coeff.data"
groups.append(["alpha_carbons", "id"])
if not go:
groups.append(["beta_atoms", "id"])
groups.append(["oxygens", "id"])
inp = open(inp_file)
atom_type = 0
for l in inp:
l = l.strip().split()
if len(l)==6:
print "Input file lacks description field!"
exit()
desc = l[6]
chain_no = l[1]
if not go:
if desc == 'C-Beta' or desc == 'H-Beta' or desc == 'C-Alpha' or desc == 'O':
n_atoms += 1
else:
if desc == 'C-Alpha':
n_atoms += 1
if not go:
if desc == 'N0' or desc == 'N':
atom_type = 2
if last_nos['C-Prime']!=0 and last_chno['C-Prime']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 3, last_nos['C-Prime'], n_atoms) )
desc = 'N'
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
n_res += 1
elif desc == 'C-Alpha':
if last_nos['N']!=0 and last_chno['N']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 1, last_nos['N'], n_atoms) )
if cg and cg_bonds:
if last_nos['C-Alpha']!=0 and last_chno['C-Alpha']==chain_no:
n_bonds += 1
bonds.append( Bond(n_bonds, 1, last_nos['C-Alpha'], n_atoms) )
if last_nos['O']!=0 and last_chno['O']==chain_no:
n_bonds += 1
bonds.append( Bond(n_bonds, 3, last_nos['O'], n_atoms) )
atom_type = 1
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
group_id = 1
elif desc == 'C-Prime':
if last_nos['C-Alpha']!=0 and last_chno['C-Alpha']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 2, last_nos['C-Alpha'], n_atoms) )
atom_type = 1
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
elif desc == 'O':
if last_nos['C-Prime']!=0 and last_chno['C-Prime']==chain_no and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 6, last_nos['C-Prime'], n_atoms) )
if cg and cg_bonds:
if last_nos['C-Alpha']!=0 and last_chno['C-Alpha']==chain_no:
n_bonds += 1
bonds.append( Bond(n_bonds, 2, last_nos['C-Alpha'], n_atoms) )
atom_type = 3
last_nos[desc] = n_atoms
last_chno[desc] = chain_no
group_id = 3
elif desc == 'C-Beta':
if last_nos['C-Alpha']!=0 and (not cg or cg_bonds):
n_bonds += 1
bonds.append( Bond(n_bonds, 4, last_nos['C-Alpha'], n_atoms) )
atom_type = 4
group_id = 2
elif desc == 'H-Beta':
if last_nos['C-Alpha']!=0 and (not cg or cg_bonds):
n_bonds += 1
bonds.append( Bond(n_bonds, 5, last_nos['C-Alpha'], n_atoms) )
atom_type = 5
group_id = 2
elif desc == 'O-In-The-End':
if last_nos['C-Prime']!=0 and not cg:
n_bonds += 1
bonds.append( Bond(n_bonds, 7, last_nos['C-Prime'], n_atoms) )
atom_type = 3
if not go:
if desc == 'C-Beta' or desc == 'H-Beta' or desc == 'C-Alpha' or desc == 'O':
# n_atoms += 1
atoms.append( Atom(n_atoms, chain_no, n_res, atom_type, 0.0, float(l[3]), float(l[4]), float(l[5])) )
groups[group_id - 1].append(str(n_atoms))
else:
if desc == 'C-Alpha':
atom_type = 1
n_res += 1
atoms.append( Atom(n_atoms, chain_no, n_res, atom_type, 0.0, float(l[3]), float(l[4]), float(l[5])) )
groups[group_id - 1].append(str(n_atoms))
inp.close()
if go:
n_atoms = len(atoms)
n_bonds = 0
n_bond_types = 0
n_atom_types = 1
masses = [118.0]
space11 = " "
out = open(out_file,'w')
out.write("LAMMPS protain data file\n\n")
out.write( (space11+str(n_atoms))[-12:] + " atoms\n" )
out.write( (space11+str(n_bonds))[-12:] + " bonds\n" )
out.write( space11 + "0 angles\n" )
out.write( space11 + "0 dihedrals\n" )
out.write( space11 + "0 impropers\n\n" )
out.write( (space11+str(n_atom_types))[-12:] + " atom types\n" )
out.write( (space11+str(n_bond_types))[-12:] + " bond types\n" )
out.write( space11 + "0 angle types\n" )
out.write( space11 + "0 dihedral types\n" )
out.write( space11 + "0 improper types\n\n" )
out.write ( "\t".join([ str(xlo), str(xhi), "xlo xhi\n" ]) )
out.write ( "\t".join([ str(ylo), str(yhi), "ylo yhi\n" ]) )
out.write ( "\t".join([ str(zlo), str(zhi), "zlo zhi\n\n" ]) )
out.write( "Masses\n\n" )
for i in range(0, len(masses)):
out.write( (space11+str(i+1))[-12:] + "\t" + str(masses[i]) + "\n" )
out.write( "\n" )
out.write( "Atoms\n\n" )
for iAtom in atoms:
iAtom.write_(out)
out.write( "\n" )
if cg and cg_bonds and not go:
out.write( "Bond Coeffs\n\n" )
#out.write( space11 + "1\t20\t3.77\n" )
#out.write( space11 + "2\t20\t2.41\n" )
#out.write( space11 + "3\t20\t2.50\n" )
#out.write( space11 + "4\t20\t1.54\n" )
#out.write( space11 + "5\t20\t1.54\n" )
out.write( space11 + "1\t60\t3.816\n" )
out.write( space11 + "2\t60\t2.40\n" )
out.write( space11 + "3\t60\t2.76\n" )
out.write( space11 + "4\t60\t1.53\n" )
out.write( space11 + "5\t60\t1.09\n" )
if (cg_bonds or not cg) and not go:
out.write( "Bonds\n\n" )
for iBond in bonds:
iBond.write_(out)
out.write( "\n" )
out.close()
groups_string = ""
for igroup in groups:
groups_string += "group\t\t" + " ".join(igroup) + "\n\n"
bonds_string = ""
if cg and cg_bonds and not go:
bonds_string = "bond_style harmonic"
pair_string = ""
if cg and not go:
pair_string = "pair_style vexcluded 2 3.5 3.5"
pair_coeff_string = ""
if cg and not go:
pair_coeff_string = "pair_coeff * * 0.0\n"
pair_coeff_string += "pair_coeff 1 1 20.0 3.5 4.5\n"
pair_coeff_string += "pair_coeff 1 4 20.0 3.5 4.5\n"
pair_coeff_string += "pair_coeff 4 4 20.0 3.5 4.5\n"
pair_coeff_string += "pair_coeff 3 3 20.0 3.5 3.5\n"
replace_rules = [ ["``read_data_file", "read_data " + out_file],
["``groups", groups_string],
["``bonds", bonds_string],
["``main_fix", fix_string],
["``pair_interactions", pair_string],
["``pair_coeff", pair_coeff_string] ]
myhome = os.environ.get("HOME")
inp = open(myhome + "/opt/tertiary_inFilePattern.data")
inFile = inp.read()
inp.close()
for ir in replace_rules:
inFile = inFile.replace(ir[0], ir[1])
out = open(lammps_out_file,'w')
out.write(inFile)
out.close()
#out = open(groups_out_file,'w')
#for igroup in groups:
# out.write( "group\t\t" )
# out.write( " ".join(igroup) )
# out.write( "\n\n" )
#out.close()
|
luwei0917/awsemmd_script
|
archive/tertiary_create_project_helper.py
|
Python
|
mit
| 9,765
|
[
"LAMMPS"
] |
e43469f2c79507fd6189740aa8599a519dd3dd66168c7d2d158f1a56d7f38c6b
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.Execute.ExecuteTabPlugin import ExecuteTabPlugin
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtTest import QTest
from PyQt5.QtCore import Qt, QSettings
from peacock.utils import Testing
from peacock.Input.InputTree import InputTree
from peacock.Input import TimeStepEstimate
import argparse
import re
class Tests(Testing.PeacockTester):
qapp = QApplication([])
def setUp(self):
super(Tests, self).setUp()
self.test_exe = Testing.find_moose_test_exe()
self.test_input_file = "../../common/transient.i"
self.start_input_file = None
self.start_csv = None
self.exe_info = None
def newWidget(self, args=None):
self.exe_info = None
main_win = QMainWindow()
w = ExecuteTabPlugin()
main_win.setCentralWidget(w)
w.needInputFile.connect(self.needInputFile)
w.startJob.connect(self.startJob)
w.executableInfoChanged.connect(self.exeInfoChanged)
menubar = main_win.menuBar()
if args:
parser = argparse.ArgumentParser()
w.commandLineArgs(parser)
parsed_args = parser.parse_args(args)
parsed_args.arguments = []
w.initialize(parsed_args)
w.addToMainMenu(menubar)
main_win.show()
return main_win, w
def startJob(self, use_csv, input_file, t):
self.start_input_file = input_file
self.start_csv = use_csv
def exeInfoChanged(self, exe_info):
self.exe_info = exe_info
def needInputFile(self, input_file):
self.input_file = input_file
data = None
with open(self.test_input_file, "r") as fin:
data = fin.read()
with open(input_file, "w") as fout:
fout.write(data)
def testBasic(self):
main_win, w = self.newWidget()
w.ExecuteOptionsPlugin.setExecutablePath(self.test_exe)
self.assertEqual(w.ExecuteRunnerPlugin.run_button.isEnabled(), True)
self.assertEqual(self.exe_info.valid(), True)
self.assertEqual(w.ConsoleOutputViewerPlugin.toPlainText(), "")
w.ExecuteRunnerPlugin.runClicked()
self.assertEqual(self.start_input_file, self.input_file)
self.assertEqual(self.start_csv, True)
w.ExecuteRunnerPlugin.runner.process.waitForFinished(-1)
self.assertNotEqual(w.ConsoleOutputViewerPlugin.toPlainText(), "")
tree = InputTree(self.exe_info)
tree.setInputFile(self.test_input_file)
num_steps = TimeStepEstimate.findTimeSteps(tree)
w.onNumTimeStepsChanged(num_steps)
def testCommandLine(self):
main_win, w = self.newWidget(["-e", self.test_exe])
self.assertEqual(w.ExecuteRunnerPlugin.run_button.isEnabled(), True)
self.assertEqual(self.exe_info.valid(), True)
main_win, w = self.newWidget(["--no-exe-search"])
self.assertEqual(w.ExecuteRunnerPlugin.run_button.isEnabled(), False)
self.assertEqual(self.exe_info, None)
main_win, w = self.newWidget(["--method", "opt"])
self.assertEqual(w.ExecuteRunnerPlugin.run_button.isEnabled(), False)
self.assertEqual(self.exe_info, None)
def testOptions(self):
main_win, w = self.newWidget()
w.ExecuteOptionsPlugin.setExecutablePath(self.test_exe)
self.assertEqual(w.ExecuteRunnerPlugin.run_button.isEnabled(), True)
self.assertEqual(self.exe_info.valid(), True)
self.assertEqual(w.ConsoleOutputViewerPlugin.toPlainText(), "")
QTest.mouseClick(w.ExecuteOptionsPlugin.mpi_checkbox, Qt.LeftButton)
QTest.mouseClick(w.ExecuteOptionsPlugin.threads_checkbox, Qt.LeftButton)
w.ExecuteRunnerPlugin.runClicked()
self.assertEqual(self.start_input_file, self.input_file)
self.assertEqual(self.start_csv, True)
w.ExecuteRunnerPlugin.runner.process.waitForFinished(-1)
output = w.ConsoleOutputViewerPlugin.toPlainText()
self.assertNotEqual(output, "")
m = re.search("Num Processors:\s*2", output)
self.assertIsNotNone(m)
m = re.search("Num Threads:\s*2", output)
self.assertIsNotNone(m)
tree = InputTree(self.exe_info)
tree.setInputFile(self.test_input_file)
num_steps = TimeStepEstimate.findTimeSteps(tree)
w.onNumTimeStepsChanged(num_steps)
def testPrefs(self):
settings = QSettings()
settings.setValue("execute/maxRecentWorkingDirs", 2)
settings.setValue("execute/maxRecentExes", 3)
settings.setValue("execute/maxRecentArgs", 4)
settings.setValue("execute/mpiEnabled", True)
settings.setValue("execute/mpiArgs", "foo bar")
settings.setValue("execute/threadsEnabled", True)
settings.setValue("execute/threadsArgs", "threads args")
settings.sync()
main_win, w = self.newWidget()
ops = w.ExecuteOptionsPlugin
self.assertEqual(ops.mpi_checkbox.isChecked(), True)
self.assertEqual(ops.threads_checkbox.isChecked(), True)
self.assertEqual(ops.mpi_line.text(), "foo bar")
self.assertEqual(ops.threads_line.text(), "threads args")
settings.setValue("execute/mpiEnabled", False)
settings.setValue("execute/mpiArgs", "some args")
settings.setValue("execute/threadsEnabled", False)
settings.setValue("execute/threadsArgs", "other args")
settings.sync()
main_win, w = self.newWidget()
ops = w.ExecuteOptionsPlugin
self.assertEqual(ops.mpi_checkbox.isChecked(), False)
self.assertEqual(ops.threads_checkbox.isChecked(), False)
self.assertEqual(ops.mpi_line.text(), "some args")
self.assertEqual(ops.threads_line.text(), "other args")
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/execute_tab/ExecuteTabPlugin/test_ExecuteTabPlugin.py
|
Python
|
lgpl-2.1
| 6,150
|
[
"MOOSE"
] |
0a5e9415a691690ad5154f4ff6ac5e94696c18678ddd6d314546e9fd6ac72974
|
# Copyright (C) 2015-2018 Hydriz Scholz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA, or visit
# <http://www.gnu.org/copyleft/gpl.html>
from cirrussearch import BALMCirrussearch
from dumps import BALMDumps
from mediacounts import BALMMediacounts
from translation import BALMTranslation
from wikidata import BALMWikidata
|
Hydriz/Balchivist
|
modules/__init__.py
|
Python
|
gpl-3.0
| 981
|
[
"VisIt"
] |
c63fe70de3efcb832bb8a73fafc7cf66fee35165e7db1b6aec6d6befed983f9a
|
#!/usr/bin/python
'''This package will take an input query and generate list of blast hits.
The input query must be a refseq protein gi number.
The default cutoff is a Blast score of 100
This script can be invoked as::
python blast_search.py 6319393
where 6319393 is the gi_number being queried.
The output of this script is a csv file named results.csv and a folder containing the fulll blast results located at blast_results/ with a separate XML file for each species.
The species tested by this script are mouse, yeast, drosophila and worms.
'''
import csv
import re
import os
import argparse
import sys
from Bio import Entrez, SeqIO
from Bio.Blast import NCBIWWW, NCBIXML
#set up argparse
#parser = argparse.ArgumentParser(description='Perform blast searches on a given gi number')
#parser.add_argument('gi', metavar='N', type=int, nargs='+',help='a gi number')
#args = parser.parse_args()
Entrez.email = "dave.bridges@gmail.com"
SCORE_CUTOFF = 100
SPECIES = (('mouse','10090'),
('yeast','7227'),
('drosophila','4932'),
('worms','6239'))
#with open('results.csv','w') as csvfile:
# dw = csv.writer(csvfile)
# dw.writeheader('Species','Score','Expected','GI')
#creates the directory for blast_results if it dosent already exist
if not os.path.exists('blast_results'):
os.makedirs('blast_results')
#this regex extracts the gi number (between the two | symbols)
r = re.compile('gi\|(.*?)\|')
for x in range(0, 4):
result_handle = NCBIWWW.qblast("blastp", "refseq_protein", sys.argv[1], entrez_query='txid%s[orgn]' %SPECIES[x][1])
print 'Performing blast search in %s for %s' %(SPECIES[x][0], sys.argv[1])
save_file = open('blast_results/%s_blast_result.xml' %SPECIES[x][0], "w+")
save_file.write(result_handle.read())
save_file.close()
result_handle.close()
result_handle = open('blast_results/%s_blast_result.xml' %SPECIES[x][0])
blast_record = NCBIXML.read(result_handle)
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.score > SCORE_CUTOFF:
with open('results.csv', 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([SPECIES[x][0],
hsp.score,
hsp.expect,
r.search(alignment.title).group(1)])
|
davebridges/biomolecule-scripts
|
biopython/blast_search.py
|
Python
|
cc0-1.0
| 2,368
|
[
"BLAST"
] |
0e00b70e1bed895d45741e6c56cd601215c84397b519979fc5ba40b032a4da3e
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
points = vtk.vtkPoints()
points.InsertNextPoint(0,-16,0)
points.InsertNextPoint(0,0,-14)
points.InsertNextPoint(0,0,14)
points.InsertNextPoint(14,0,0)
points.InsertNextPoint(10,20,-10)
points.InsertNextPoint(10,20,10)
points.InsertNextPoint(10,-20,-10)
points.InsertNextPoint(10,-20,10)
points.InsertNextPoint(-10,-20,-10)
points.InsertNextPoint(-10,-20,10)
points.InsertNextPoint(-10,20,-10)
points.InsertNextPoint(-10,20,10)
points.InsertNextPoint(-2,27,0)
points.InsertNextPoint(0,27,2)
points.InsertNextPoint(0,27,-2)
points.InsertNextPoint(2,27,0)
points.InsertNextPoint(-14,4,-1)
points.InsertNextPoint(-14,3,0)
points.InsertNextPoint(-14,5,0)
points.InsertNextPoint(-14,4,1)
points.InsertNextPoint(-1,38,-2)
points.InsertNextPoint(-1,38,2)
points.InsertNextPoint(2,35,-2)
points.InsertNextPoint(2,35,2)
points.InsertNextPoint(17,42,0)
points.InsertNextPoint(15,40,2)
points.InsertNextPoint(15,39,-2)
points.InsertNextPoint(13,37,0)
points.InsertNextPoint(19,-2,-2)
points.InsertNextPoint(19,-2,2)
points.InsertNextPoint(15,2,-2)
points.InsertNextPoint(15,2,2)
faces = vtk.vtkCellArray()
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(4)
faces.InsertCellPoint(5)
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(5)
faces.InsertCellPoint(7)
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(7)
faces.InsertCellPoint(6)
faces.InsertNextCell(3)
faces.InsertCellPoint(3)
faces.InsertCellPoint(6)
faces.InsertCellPoint(4)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(6)
faces.InsertCellPoint(7)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(7)
faces.InsertCellPoint(9)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(9)
faces.InsertCellPoint(8)
faces.InsertNextCell(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(8)
faces.InsertCellPoint(6)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(4)
faces.InsertCellPoint(6)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(6)
faces.InsertCellPoint(8)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(8)
faces.InsertCellPoint(10)
faces.InsertNextCell(3)
faces.InsertCellPoint(1)
faces.InsertCellPoint(10)
faces.InsertCellPoint(4)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(11)
faces.InsertCellPoint(9)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(9)
faces.InsertCellPoint(7)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(7)
faces.InsertCellPoint(5)
faces.InsertNextCell(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(5)
faces.InsertCellPoint(11)
faces.InsertNextCell(3)
faces.InsertCellPoint(4)
faces.InsertCellPoint(15)
faces.InsertCellPoint(5)
faces.InsertNextCell(3)
faces.InsertCellPoint(4)
faces.InsertCellPoint(14)
faces.InsertCellPoint(15)
faces.InsertNextCell(3)
faces.InsertCellPoint(5)
faces.InsertCellPoint(13)
faces.InsertCellPoint(11)
faces.InsertNextCell(3)
faces.InsertCellPoint(5)
faces.InsertCellPoint(15)
faces.InsertCellPoint(13)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(12)
faces.InsertCellPoint(10)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(13)
faces.InsertCellPoint(12)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(14)
faces.InsertCellPoint(4)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(12)
faces.InsertCellPoint(14)
faces.InsertNextCell(3)
faces.InsertCellPoint(8)
faces.InsertCellPoint(17)
faces.InsertCellPoint(16)
faces.InsertNextCell(3)
faces.InsertCellPoint(8)
faces.InsertCellPoint(9)
faces.InsertCellPoint(17)
faces.InsertNextCell(3)
faces.InsertCellPoint(9)
faces.InsertCellPoint(19)
faces.InsertCellPoint(17)
faces.InsertNextCell(3)
faces.InsertCellPoint(9)
faces.InsertCellPoint(11)
faces.InsertCellPoint(19)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(18)
faces.InsertCellPoint(19)
faces.InsertNextCell(3)
faces.InsertCellPoint(11)
faces.InsertCellPoint(10)
faces.InsertCellPoint(18)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(16)
faces.InsertCellPoint(18)
faces.InsertNextCell(3)
faces.InsertCellPoint(10)
faces.InsertCellPoint(8)
faces.InsertCellPoint(16)
faces.InsertNextCell(3)
faces.InsertCellPoint(13)
faces.InsertCellPoint(21)
faces.InsertCellPoint(12)
faces.InsertNextCell(3)
faces.InsertCellPoint(12)
faces.InsertCellPoint(21)
faces.InsertCellPoint(20)
faces.InsertNextCell(3)
faces.InsertCellPoint(12)
faces.InsertCellPoint(20)
faces.InsertCellPoint(14)
faces.InsertNextCell(3)
faces.InsertCellPoint(14)
faces.InsertCellPoint(20)
faces.InsertCellPoint(22)
faces.InsertNextCell(3)
faces.InsertCellPoint(14)
faces.InsertCellPoint(22)
faces.InsertCellPoint(15)
faces.InsertNextCell(3)
faces.InsertCellPoint(15)
faces.InsertCellPoint(22)
faces.InsertCellPoint(23)
faces.InsertNextCell(3)
faces.InsertCellPoint(15)
faces.InsertCellPoint(23)
faces.InsertCellPoint(13)
faces.InsertNextCell(3)
faces.InsertCellPoint(13)
faces.InsertCellPoint(23)
faces.InsertCellPoint(21)
faces.InsertNextCell(3)
faces.InsertCellPoint(21)
faces.InsertCellPoint(25)
faces.InsertCellPoint(24)
faces.InsertNextCell(3)
faces.InsertCellPoint(21)
faces.InsertCellPoint(24)
faces.InsertCellPoint(20)
faces.InsertNextCell(3)
faces.InsertCellPoint(20)
faces.InsertCellPoint(24)
faces.InsertCellPoint(26)
faces.InsertNextCell(3)
faces.InsertCellPoint(20)
faces.InsertCellPoint(26)
faces.InsertCellPoint(22)
faces.InsertNextCell(3)
faces.InsertCellPoint(22)
faces.InsertCellPoint(26)
faces.InsertCellPoint(27)
faces.InsertNextCell(3)
faces.InsertCellPoint(22)
faces.InsertCellPoint(27)
faces.InsertCellPoint(23)
faces.InsertNextCell(3)
faces.InsertCellPoint(23)
faces.InsertCellPoint(27)
faces.InsertCellPoint(25)
faces.InsertNextCell(3)
faces.InsertCellPoint(23)
faces.InsertCellPoint(25)
faces.InsertCellPoint(21)
faces.InsertNextCell(3)
faces.InsertCellPoint(25)
faces.InsertCellPoint(29)
faces.InsertCellPoint(24)
faces.InsertNextCell(3)
faces.InsertCellPoint(24)
faces.InsertCellPoint(29)
faces.InsertCellPoint(28)
faces.InsertNextCell(3)
faces.InsertCellPoint(24)
faces.InsertCellPoint(28)
faces.InsertCellPoint(26)
faces.InsertNextCell(3)
faces.InsertCellPoint(26)
faces.InsertCellPoint(28)
faces.InsertCellPoint(30)
faces.InsertNextCell(3)
faces.InsertCellPoint(26)
faces.InsertCellPoint(30)
faces.InsertCellPoint(27)
faces.InsertNextCell(3)
faces.InsertCellPoint(27)
faces.InsertCellPoint(30)
faces.InsertCellPoint(31)
faces.InsertNextCell(3)
faces.InsertCellPoint(27)
faces.InsertCellPoint(31)
faces.InsertCellPoint(25)
faces.InsertNextCell(3)
faces.InsertCellPoint(25)
faces.InsertCellPoint(31)
faces.InsertCellPoint(29)
faces.InsertNextCell(3)
faces.InsertCellPoint(29)
faces.InsertCellPoint(19)
faces.InsertCellPoint(17)
faces.InsertNextCell(3)
faces.InsertCellPoint(29)
faces.InsertCellPoint(17)
faces.InsertCellPoint(28)
faces.InsertNextCell(3)
faces.InsertCellPoint(28)
faces.InsertCellPoint(17)
faces.InsertCellPoint(16)
faces.InsertNextCell(3)
faces.InsertCellPoint(28)
faces.InsertCellPoint(16)
faces.InsertCellPoint(30)
faces.InsertNextCell(3)
faces.InsertCellPoint(30)
faces.InsertCellPoint(16)
faces.InsertCellPoint(18)
faces.InsertNextCell(3)
faces.InsertCellPoint(30)
faces.InsertCellPoint(18)
faces.InsertCellPoint(31)
faces.InsertNextCell(3)
faces.InsertCellPoint(31)
faces.InsertCellPoint(18)
faces.InsertCellPoint(19)
faces.InsertNextCell(3)
faces.InsertCellPoint(31)
faces.InsertCellPoint(19)
faces.InsertCellPoint(29)
model = vtk.vtkPolyData()
model.SetPolys(faces)
model.SetPoints(points)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
#vtkButterflySubdivisionFilter subdivide
subdivide = vtk.vtkLoopSubdivisionFilter()
subdivide.SetInputData(model)
subdivide.SetNumberOfSubdivisions(4)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(subdivide.GetOutputPort())
rose = vtk.vtkLODActor()
rose.SetMapper(mapper)
fe = vtk.vtkFeatureEdges()
fe.SetInputConnection(subdivide.GetOutputPort())
fe.SetFeatureAngle(100)
feMapper = vtk.vtkPolyDataMapper()
feMapper.SetInputConnection(fe.GetOutputPort())
edges = vtk.vtkActor()
edges.SetMapper(feMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(rose)
#ren1 AddActor edges
backP = vtk.vtkProperty()
backP.SetDiffuseColor(1,1,.3)
rose.SetBackfaceProperty(backP)
rose.GetProperty().SetDiffuseColor(1,.4,.3)
rose.GetProperty().SetSpecular(.4)
rose.GetProperty().SetDiffuse(.8)
rose.GetProperty().SetSpecularPower(40)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(300,300)
# render the image
#
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.Zoom(4.5)
cam1.Azimuth(-90)
ren1.ResetCameraClippingRange()
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Modeling/Testing/Python/KlineBottle.py
|
Python
|
gpl-3.0
| 9,138
|
[
"VTK"
] |
c87dfb8b5fc439b5fd67d5f5f120964915d70d00e010fd7e00661017cc3be943
|
#!/home/bin/python2
import multiprocessing;
import string;
import argparse;
import gzip;
import tempfile;
import subprocess;
import itertools;
import sys;
import time;
from scipy.stats import binom;
import numpy;
import os;
import pysam;
import math;
import copy;
import shutil
import resource
import glob
import collections
import datetime
import io
def main():
#Arguments passed
parser = argparse.ArgumentParser()
# required
parser.add_argument("--bam", help="Indexed BAMs (comma separated) containing aligned reads", required = False, default='')
parser.add_argument("--vcf", help="VCF for the sample, must be gzipped and tabix indexed.", required = True, default='')
parser.add_argument("--sample", help="Sample name in VCF", required = False, default='')
parser.add_argument("--mapq", help="Minimum MAPQ for reads to be used for phasing. Can be a comma separated list, each value corresponding to the min MAPQ for a file in the input BAM list. Useful in cases when using both for example DNA and RNA libraries which might have differing mapping qualities.", required = True)
parser.add_argument("--baseq", type=int, help="Minimum baseq for bases to be used for phasing", required = True)
parser.add_argument("--paired_end", help="Sequencing data comes from a paired end assay (0,1). Can be a comma separated list, each value specifying whether sequencing data comes from a paired end assay for the files in the input BAM list. If set to true phASER will require all reads to have the 'read mapped in proper pair' flag.", required = True)
parser.add_argument("--o", help="Out prefix",required = True)
# optional
parser.add_argument("--python_string", default="python2.7", help="Command that specifies which python2.x interpreter has to be used, required for running read variant mapping script.")
parser.add_argument("--haplo_count_bam_exclude", default="", help="Comma separated list of BAMs to exclude when generating haplotypic counts (outputted in o.haplotypic_counts.txt). When left blank haplotypic counts will be generated for all input BAMs, otherwise will they will not be generated for the BAMs specified here. Specify libraries by index where 1 = first library in --bam list, 2 = second, etc...")
parser.add_argument("--haplo_count_blacklist", default="", help="BED file containing genomic intervals to be excluded from haplotypic counts. Reads from any variants which lie within these regions will not be counted for haplotypic counts.")
parser.add_argument("--cc_threshold", type=float, default=0.01, help="Threshold for significant conflicting variant configuration. The connection between any two variants with a conflicting configuration having p-value lower than this threshold will be removed.")
parser.add_argument("--isize", default="0", help="Maximum allowed insert size for read pairs. Can be a comma separated list, each value corresponding to a max isize for a file in the input BAM list. Set to 0 for no maximum size.")
parser.add_argument("--as_q_cutoff", type=float, default=0.05, help="Bottom quantile to cutoff for read alignment score.")
parser.add_argument("--blacklist", default="", help="BED file containing genomic intervals to be excluded from phasing (for example HLA).")
parser.add_argument("--write_vcf", type=int, default=1, help="Create a VCF containing phasing information (0,1).")
parser.add_argument("--include_indels", type=int, default=0, help="Include indels in the analysis (0,1). NOTE: since mapping is a problem for indels including them will likely result in poor quality phasing unless specific precautions have been taken.")
parser.add_argument("--output_read_ids", type=int, default=0, help="Output read IDs in the coverage files (0,1).")
parser.add_argument("--remove_dups", type=int, default=1, help="Remove duplicate reads from all analyses (0,1).")
parser.add_argument("--pass_only", type=int, default=1, help="Only use variants labled with PASS in the VCF filter field (0,1).")
parser.add_argument("--unphased_vars", type=int, default=1, help="Output unphased variants (singletons) in the haplotypic_counts and haplotypes files (0,1).")
parser.add_argument("--chr_prefix", type=str, default="", help="Add the string to the begining of the VCF contig name. For example set to 'chr' if VCF contig is listed as '1' and bam reference is 'chr1'.")
# genome wide phasing
parser.add_argument("--gw_phase_method", type=int, default=0, help="Method to use for determing genome wide phasing. NOTE requires input VCF to be phased and have allele frequencies for MAF weighted mode (see --gw_af_field). 0 = Use most common haplotype phase. 1 = MAF weighted phase anchoring.")
parser.add_argument("--gw_af_field", default="AF", help="Field from --vcf to use for allele frequency.")
parser.add_argument("--gw_phase_vcf", type=int, default=0, help="Replace GT field of output VCF using phASER genome wide phase. 0: do not replace; 1: replace when gw_confidence >= --gw_phase_vcf_min_confidence; 2: as in (1), but in addition replace with haplotype block phase when gw_confidence < --gw_phase_vcf_min_confidence and include PS field. See --gw_phase_method for options.")
parser.add_argument("--gw_phase_vcf_min_confidence", type=float, default=0.90, help="If replacing GT field in VCF, only replace when phASER haplotype gw_confidence >= this value.")
# performance
parser.add_argument("--threads", type=int, default=1, help="Maximum number of threads to use. Note the maximum thread count for some tasks is bounded by the data (for example 1 thread per contig for haplotype construction).")
parser.add_argument("--max_block_size", type=int, default=15, help="Maximum number of variants to phase at once. Number of haplotypes tested = 2 ^ # variants in block. Blocks larger than this will be split into sub blocks, phased, and then the best scoring sub blocks will be phased with each other.")
parser.add_argument("--temp_dir", default="", help="Location of temporary directory to use for storing files. If left blank will default to system temp dir. NOTE: potentially large files will be stored in this directory, so please ensure there is sufficient free space.")
parser.add_argument("--max_items_per_thread", type=int, default=100000, help="Maximum number of items that can be assigned to a single thread to process. NOTE: if this number is too high Python will stall when trying to join the pools.")
# debug / development / reporting
parser.add_argument("--show_warning", type=int, default=0, help="Show warnings in stdout (0,1).")
parser.add_argument("--debug", type=int, default=0, help="Show debug mode messages (0,1).")
parser.add_argument("--chr", default="", help="Restrict haplotype phasing to a specific chromosome.")
parser.add_argument("--unique_ids", type=int, default=0, help="Generate and output unique IDs instead of those provided in the VCF (0,1). NOTE: this should be used if your VCF does not contain a unique ID for each variant.")
parser.add_argument("--id_separator", default="_", help="Separator to use when generating unique IDs. Must not be found in contig name, and cannot include ':'.")
parser.add_argument("--output_network", default="", help="Output the haplotype connection network for the given variant.")
## ** adding new arguments - BKG
'''Add a information indicating this flags are only under multisample mode'''
parser.add_argument("--process_slow", type=int, default=0, required=False,
help="Argument to process data slow in chunks (by chromosome) to handle memory limits.")
global args;
args = parser.parse_args()
#setup
version = "1.1.1";
fun_flush_print("");
fun_flush_print("##################################################")
fun_flush_print(" Welcome to phASER v%s"%(version));
fun_flush_print(" Author: Stephane Castel (scastel@nygenome.org)")
fun_flush_print(" Updated by: Bishwa K. Giri (bkgiri@uncg.edu)")
fun_flush_print("##################################################");
fun_flush_print("");
global devnull;
devnull = open(os.devnull, 'w')
# check for external dependencies
if check_dependency("samtools") == False: fatal_error("External dependency 'samtools' not installed.");
if check_dependency("bgzip") == False: fatal_error("External dependency 'bgzip' not installed.");
if check_dependency("tabix") == False: fatal_error("External dependency 'tabix' not installed.");
if check_dependency("bedtools") == False: fatal_error("External dependency 'bedtools' not installed.");
if check_dependency("bcftools") == False: fatal_error("External dependency 'bcftools' not installed.");
if args.id_separator == ":" or args.id_separator == "":
fatal_error("ID separator must not be ':' or blank. Please choose another separator that is not found in the contig names.");
contig_ban = [args.id_separator, ":"];
if args.temp_dir != "":
tempfile.tempdir = args.temp_dir;
# check for needed files
needed_files = ['call_read_variant_map.py','read_variant_map.py'];
for xfile in needed_files:
if os.path.isfile(return_script_path()+"/"+xfile) == False:
fatal_error("File %s is needed for phASER to run."%xfile);
# check that setup has been run
if os.path.isfile(return_script_path()+"/"+'read_variant_map.so') == False:
fatal_error("Read Variant Mapper module must be compiled by running 'python setup.py build_ext --inplace'.");
# check that the VCF of interest exists in bgzipped form and is indexed
if os.path.isfile(args.vcf) == False:
fatal_error("VCF file does not exist.");
elif os.path.isfile(args.vcf+".tbi") == False and os.path.isfile(args.vcf+".csi") == False:
fatal_error("VCF file is not tabix indexed.");
if args.vcf.endswith(".gz") == False and args.vcf.endswith(".bgz") == False:
fatal_error("VCF must be gzipped.");
# record whether a CSI or TBI file was used for VCF
global csi_index;
csi_index = int(os.path.isfile(args.vcf+".csi"));
## Check files availability.
check_files = [args.vcf,args.blacklist,args.haplo_count_blacklist]
for xfile in check_files:
if xfile != "":
if os.path.isfile(xfile) == False:
fatal_error("File: %s not found."%(xfile));
## find all the sample names in input VCF file.
# this code returns a key-value of all the "sample:sample position" in the vcf file
map_sample_column = sample_column_map(args.vcf);
# the BAM regions to exclude.
global haplo_count_bam_exclude;
if args.haplo_count_bam_exclude != "":
# split and subtract 1 to make 0 based index
haplo_count_bam_exclude = [x-1 for x in map(int, args.haplo_count_bam_exclude.split(","))];
else:
haplo_count_bam_exclude = [];
print("Completed the check of dependencies and input files availability... ")
fun_flush_print('')
''' Starting Read backed phasing '''
sample_start_time = time.time()
fun_flush_print('STARTED "Read backed phasing and ASE/haplotype analyses" ... ')
print(" DATE, TIME : %s" % (datetime.datetime.now().strftime('%Y-%m-%d, %H:%M:%S')))
fun_flush_print("#1. Loading heterozygous variants into intervals...")
sample_name = args.sample
print('Processing sample named {}'.format(sample_name))
## Pass the data to another procedure/function to start read backed phasing.
parse_sample(sample_name, map_sample_column, args.bam, args.o, contig_ban)
fun_flush_print('')
print('COMPLETED "Read backed phasing" of sample {} in {} hh:mm:ss'.
format(sample_name, time.strftime("%H:%M:%S", time.gmtime(time.time()-sample_start_time))))
print("DATE, TIME : %s" %(datetime.datetime.now().strftime('%Y-%m-%d, %H:%M:%S')))
fun_flush_print('')
print('The End.')
'''Function to run Readback phasing for the given input sample'''
def parse_sample(sample_name, map_sample_column, bam_file, sample_out_path, contig_ban):
args.bam = bam_file
check_bams = args.bam.split(",")
for xfile in check_bams:
if xfile != "":
if os.path.isfile(xfile) == False:
fatal_error("File: %s not found." % (xfile));
if os.path.isfile(xfile + ".bai") == False and os.path.isfile(xfile.replace(".bam", ".bai")) == False:
fatal_error(
"Index for BAM %s not found. BAM files must be indexed, with naming 'sample.bam.bai'." % (xfile));
global sample_column
#start_time = time.time()
if sample_name in map_sample_column:
sample_column = map_sample_column[sample_name];
else:
fatal_error("Sample '%s' not found in the input VCF file." % (sample_name));
# filter blacklisted variants if necessary, cut only sample column, filter for heterozygous sites
# decompress for intersection
if args.chr != "":
fun_flush_print(" restricting to chromosome '%s'..." % (args.chr));
decomp_str = "tabix -h "+args.vcf+" "+args.chr+":"
else:
fun_flush_print(" using all the chromosomes ...");
decomp_str = "gunzip -c "+args.vcf;
## create a temporary file to store the VCF data
vcf_out = tempfile.NamedTemporaryFile(delete=False);
vcf_out.close();
vcf_path = vcf_out.name;
if args.blacklist != "":
fun_flush_print(" removing blacklisted variants and processing VCF...");
call_str = decomp_str + " | cut -f 1-9,"+str(sample_column+1)+" | grep -v '0|0\|1|1' | bedtools intersect -header -v -a stdin -b "+args.blacklist+" > "+vcf_out.name;
error_code = subprocess.check_call("set -euo pipefail && "+call_str,shell=True, executable='/bin/bash',stderr=devnull)
else:
fun_flush_print(" processing VCF...");
call_str = decomp_str + " | cut -f 1-9,"+str(sample_column+1)+" | grep -v '0|0\|1|1' > "+vcf_out.name;
error_code = subprocess.check_call("set -euo pipefail && "+call_str,shell=True, executable='/bin/bash')
if error_code != 0:
fatal_error("VCF filtering using subprocess.call \""+call_str+"\" exited with an error")
# generate blacklisted variant list
set_haplo_blacklist = [];
if args.haplo_count_blacklist != "":
fun_flush_print("#1b. Loading haplotypic count blacklist intervals...");
raw_interval = subprocess.check_output("set -euo pipefail && "+"bedtools intersect -a "+vcf_path+" -b "+args.haplo_count_blacklist+" | cut -f 1-2", shell=True, executable='/bin/bash')
for line in raw_interval.split("\n"):
columns = line.replace("\n","").split("\t");
if len(columns) > 1:
xchr = columns[0];
if args.chr == "" or args.chr == xchr:
pos = int(columns[1]);
set_haplo_blacklist.append(xchr+"_"+str(pos));
set_haplo_blacklist = set(set_haplo_blacklist);
# storing the string value of original output prefix (i.e args.o)
#org_outprefix = copy.copy(args.o)
org_outprefix = copy.copy(sample_out_path)
fun_flush_print('')
#stream_vcf = open(vcf_path, "r")
if args.process_slow == 0:
'''loads "all reads" from bam file (from all chromosome/contigs) in to the memory.
This is good when the computer RAM is big.'''
print(' Memory efficient mode is deactivated...\n'
' If RAM is limited, activate memory efficient mode using the flag "--process_slow = 1"...\n')
#stream_vcf = gzip.open(args.vcf) ;
stream_vcf = open(vcf_path, "r")
chr_of_interest = args.chr
start_time = time.time()
process_vcf(stream_vcf, chr_of_interest, contig_ban, set_haplo_blacklist,
start_time, vcf_out, sample_out_path, last_chr=True, pi_block_value = 0)
elif args.process_slow == 1:
'''processes reads from each contig/chromosome separately.
This is helpful is the computer RAM is limited.
Ironically, this mode might be faster if memory congestion occurs in "all reads" mode.'''
print(' Memory efficient mode is activated... ')
print(' WARNING: this may produce slightly different results since the sequencing noise estimate is generated per chromosome, instead of across all chromosomes... ')
## prepare the list of the contig/chromosome names in the input VCF
if args.chr == '':
# if original args.chr was empty, use all the chromosomes
argu0 = ["tabix -l " + args.vcf]
process_col0 = subprocess.Popen(argu0, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True, executable='/bin/bash')
uniq_chr = process_col0.communicate()[0]
chr_of_interest = uniq_chr.rstrip('\n').split("\n")
print(' %s unique contigs/chromosomes found... ' %(len(chr_of_interest)))
elif args.chr != '':
# else use only the chromosome of interest
chr_of_interest = args.chr.split(',')
print(' %s unique contigs/chromosomes assigned... ' % (len(chr_of_interest)))
# to assign unique block value to read backed phased haplotypes
# used in the function "process_vcf()"
global pi_block_value
pi_block_value = 0
## Now, process each contig/chromosome separately on a for loop
print(' Running processes for each chromosome separately...\n')
for nth, unq_chr in enumerate(chr_of_interest):
if nth == len(chr_of_interest)-1:
last_chr = True
else: last_chr = False
# open the input vcf in each loop.
# ** for future: this can be avoided by splitting the VCF file,
# and may also reduce the run time.
# see this example: https://www.biostars.org/p/173073/
stream_vcf = open(vcf_path, "r")
# name the output as : arg.o + contig name.
# ** for future: this may also be stored as a temporary file
sample_out_path_by_chr = org_outprefix + unq_chr
start_time = time.time()
# now, pass the data to the required procedure/function
process_vcf(stream_vcf, unq_chr, contig_ban,
set_haplo_blacklist, start_time, vcf_out,
sample_out_path_by_chr, last_chr, pi_block_value)
# pause the loop briefly for few secs (to allow some time/room for optimization purposes)
time.sleep(1.5)
fun_flush_print('')
## After the above for-loop process is complete, merge the data for several contigs/chromosomes
# This is only active in "process_slow = 1" mode.
merge_files(chr_of_interest, org_outprefix, sample_name)
# this is only active in "process_slow = 1" mode.
def merge_files(chr_of_interest, org_outprefix, sample_name):
print("#8. Merging the results from several contigs/chromosome ...")
file_group = collections.OrderedDict() # to store the names by group
files_to_delete = [] # store the names that will be deleted at the end
## find the several group of files separated by chromosome/contig
for chr_ in chr_of_interest:
for name in glob.glob(org_outprefix + chr_ + '.' + '*'):
files_to_delete.append(name) # store the file that needs to be deleted later
# setting the keys-values to group the data from same type
ks = name.replace(org_outprefix + chr_ + '.', '')
if ks in file_group:
file_group[ks] += [name]
else:
file_group[ks] = [name]
## Now, merge the data that belong to same type
for file_suffix, file_names in file_group.items():
if file_suffix.endswith('.txt'):
print(' - Merging splitted text files *.%s into one file for the given sample "%s"'
%(file_suffix, sample_name))
with open(org_outprefix + "." + file_suffix, 'w') as new_file:
for names in file_names:
# only read the first line from the first file
header = open(names, 'r').readline()
break
# write the header
new_file.write(header)
# now, merge the files to one file
for names in file_names:
new_file.write(''.join(open(names, 'r').readlines()[1:]))
elif file_suffix == 'vcf.gz':
## Merge the VCF files splitted by chromosome into one file.
print(' - Concatenating splitted VCFs for sample "%s"' %sample_name)
#argu1 = "bcftools concat " + ' '.join(file_names) + " -O z -o " + org_outprefix + ".vcf.gz"
argu1 = "bcftools concat " + ' '.join(file_names) + " -a -O v" + " | " + "bcftools sort -O z -o "+ org_outprefix + ".vcf.gz"
subprocess.check_call(argu1, shell=True, executable='/bin/bash')
tabix_cmd = "tabix -f -p vcf " + org_outprefix + ".vcf.gz"
subprocess.check_call(tabix_cmd, shell=True, executable='/bin/bash')
## delete the non required files
# ** for future: if these files were stored as temp file this deletion won't be necessary
for names in files_to_delete:
os.remove(names)
'''This function processes vcf for the input sample. If memory_efficient mode is activated,
VCF for each chromosome/scaffold would be passed one by one into this function,
if not all the VCF data will be passed at once. '''
def process_vcf(stream_vcf, chromosome, contig_ban, set_haplo_blacklist,
start_time, vcf_out, out_prefix, last_chr, pi_block_value):
chrom_of_interest = chromosome
mapper_out = tempfile.NamedTemporaryFile(delete=False);
bed_out = tempfile.NamedTemporaryFile(delete=False);
het_count = 0;
total_indels_excluded = 0;
unphased_count = 0;
if args.process_slow == 1:
fun_flush_print(" \nprocessing chromosome '%s' ..." %(chromosome))
fun_flush_print(" creating variant mapping table...");
gt_index = -1;
chromosome_pool = collections.OrderedDict()
filter_count = 0;
for line in stream_vcf:
vcf_columns = line.rstrip('\n').split("\t");
if line.startswith("#") == False:
#1 10177 . A AC 100 PASS AC=2130;AF=0.425319;AN=5008;NS=2504;DP=103152;EAS_AF=0.3363;AMR_AF=0.3602;AFR_AF=0.4909;EUR_AF=0.4056;SAS_AF=0.4949;AA=|||unknown(NO_COVERAGE) GT 1|0
unphased = False;
chr = vcf_columns[0];
for item in contig_ban:
if item in chr:
fatal_error("Character '%s' must not be present in contig name. "
"Please change id separtor using --id_separator to a character not "
"found in the contig names and try again."%(item));
filter = vcf_columns[6];
if chrom_of_interest == "" or chrom_of_interest == chr:
if chr not in chromosome_pool:
chromosome_pool[chr] = [];
fields = vcf_columns[8].split(":");
if "GT" in fields:
gt_index = fields.index("GT");
geno_string = vcf_columns[9].split(":")[gt_index];
xgeno = list(geno_string);
if "." not in xgeno:
if "|" in xgeno: xgeno.remove("|");
if "/" in xgeno:
xgeno.remove("/");
unphased = True;
if len(set(xgeno)) > 1:
filters = filter.split(";");
if args.pass_only == 0 or "PASS" in filters:
chromosome_pool[chr].append(vcf_columns[0:9]+[geno_string,xgeno]);
if unphased == True:
unphased_count += 1;
else:
filter_count += 1;
else:
print_warning("Genotype, defined by GT not found in input VCF for variant %s."%(vcf_columns[2]));
pool_input = [];
for chrom in chromosome_pool.keys():
pool_input.append([chrom,chromosome_pool[chrom]]);
global temp_files;
temp_files = [];
pool_output = parallelize(generate_mapping_table, pool_input);
# clear memory
del pool_input;
del chromosome_pool;
mapping_files = [];
het_count = 0;
total_indels_excluded = 0;
for output in pool_output:
mapping_files.append([output[0],output[3],output[4]]);
het_count += output[1];
total_indels_excluded += output[2];
fun_flush_print(" %d heterozygous sites being used for phasing (%d filtered, %d indels excluded, %d unphased)"%(het_count,filter_count,total_indels_excluded,unphased_count));
print
if het_count == 0:
fatal_error("No heterozygous sites that passed all filters were included in the analysis, phASER cannot continue. Check blacklist and pass_only arguments.");
fun_flush_print("#2. Retrieving reads that overlap heterozygous sites...");
#works with multiple input bams
bam_list = args.bam.split(",");
# generate a list of bam names but don't allow any two to have the same ids
file_names = [os.path.basename(xbam).replace(".bam","") for xbam in bam_list];
bam_names = [];
bam_counter = collections.OrderedDict()
for xbam in file_names:
if file_names.count(xbam) > 1:
if xbam not in bam_counter: bam_counter[xbam] = 0;
bam_counter[xbam] += 1;
bam_names.append(xbam+"."+str(bam_counter[xbam]))
else:
bam_names.append(xbam);
#mapq
mapq_list = args.mapq.split(",");
if len(mapq_list) == 1 and len(bam_list) > 1:
mapq_list = mapq_list * len(bam_list);
elif len(mapq_list) != len(bam_list):
fatal_error("Number of mapq values and input BAMs does not match. Supply either one mapq to be used for all BAMs or one mapq per input BAM.");
#isize
isize_list = args.isize.split(",");
if len(isize_list) == 1 and len(bam_list) > 1:
isize_list = isize_list * len(bam_list);
elif len(mapq_list) != len(isize_list):
fatal_error("Number of isize values and input BAMs does not match. Supply either one isize to be used for all BAMs or one isize per input BAM.");
isize_list = map(float, isize_list);
#paired_end
paired_end_list = args.paired_end.split(",");
if len(paired_end_list) == 1 and len(bam_list) > 1:
paired_end_list = paired_end_list * len(bam_list);
elif len(paired_end_list) != len(bam_list):
fatal_error ("Number of paired_end values and input BAMs does not match. Supply either one paired_end to be used for all BAMs or one paired_end per input BAM.");
#now get bam reads that overlap het sites using SAMTOOLS
samtools_arg_list=[];
# remove dups if necessary, and only include properly paired read (ie in correct orientation)
for i in paired_end_list:
samtools_arg=""
if args.remove_dups == 1:
samtools_arg += "-F 0x400 "
if int(i) == 1:
samtools_arg += "-f 2"
samtools_arg_list.append(samtools_arg)
global dict_variant_reads;
dict_variant_reads = collections.OrderedDict()
global read_vars;
read_vars = collections.OrderedDict()
global bam_index;
bam_index = 0;
total_reads = 0;
for samtools_arg, bam, mapq, isize in zip(samtools_arg_list, bam_list, mapq_list, isize_list):
fun_flush_print(" file: %s"%(bam));
fun_flush_print(" minimum mapq: %s"%(mapq));
# use the read variant mapping script to map reads to alleles
fun_flush_print(" mapping reads to variants...");
pool_input = [x + [samtools_arg,bam,mapq,isize] for x in mapping_files];
result_files = parallelize(call_mapping_script, pool_input);
# process the result
# A determine if we need to calculate alignment score cutoff
fun_flush_print(" processing mapped reads...");
global use_as_cutoff;
global as_cutoff;
use_as_cutoff = False;
if args.as_q_cutoff > 0:
alignment_scores = map(int,[x for x in subprocess.check_output("set -euo pipefail && "+"cut -f 5 "+" ".join(result_files), shell=True, executable='/bin/bash').split("\n") if x != ""]);
if len(alignment_scores) == 0:
fun_flush_print(" no alignment score value found in reads, cannot use cutoff");
else:
as_cutoff = numpy.percentile(alignment_scores,args.as_q_cutoff*100);
use_as_cutoff = True;
fun_flush_print(" using alignment score cutoff of %d"%(as_cutoff));
# B now process variant read overlaps
pool_output = parallelize(process_mapping_result, result_files);
for output in pool_output:
for variant in output[0]:
if variant not in dict_variant_reads:
dict_variant_reads[variant] = output[0][variant];
else:
dict_variant_reads[variant]['reads'][0] += output[0][variant]['reads'][0];
dict_variant_reads[variant]['reads'][1] += output[0][variant]['reads'][1];
for xallele in [0,1]:
for xbam in output[0][variant]['haplo_reads'][xallele].keys():
if xbam in dict_variant_reads[variant]['haplo_reads'][xallele]:
dict_variant_reads[variant]['haplo_reads'][xallele][xbam] += output[0][variant]['haplo_reads'][xallele][xbam];
else:
dict_variant_reads[variant]['haplo_reads'][xallele][xbam] = output[0][variant]['haplo_reads'][xallele][xbam]
dict_variant_reads[variant]['other_reads'] += output[0][variant]['other_reads'];
for output in pool_output:
if output[3] not in read_vars: read_vars[output[3]] = collections.OrderedDict()
for output in pool_output:
for read in output[1]:
if variant not in read_vars[output[3]]:
read_vars[output[3]][read] = output[1][read];
else:
read_vars[output[3]][read] += output[1][read];
bam_reads = 0;
for output in pool_output:
total_reads += output[2];
bam_reads += output[2];
del pool_output;
fun_flush_print(" retrieved %d reads"%(bam_reads));
bam_index += 1;
# delete temp mapping files
for xfile in result_files:
os.remove(xfile);
#cleanup temp files
if args.process_slow == 0 or \
(args.process_slow == 1 and last_chr==True):
os.remove(vcf_out.name);
os.remove(mapper_out.name);
os.remove(bed_out.name);
for xfile in temp_files:
os.remove(xfile);
fun_flush_print("#3. Identifying connected variants...");
fun_flush_print(" calculating sequencing noise level...");
# calculate noise level
base_match_count = 0;
base_mismatch_count = 0;
for variant in dict_variant_reads:
mis_matches = 0;
mis_matches = len(dict_variant_reads[variant]['other_reads']);
matches = sum([len(x) for x in dict_variant_reads[variant]['reads']]);
# require other bases to be < 5% of total coverage for this variant
# protects against genotyping errors
if matches > 0 and (float(mis_matches) / float(mis_matches+matches)) < 0.05:
base_match_count += matches;
base_mismatch_count += mis_matches;
if base_match_count == 0:
fatal_error("No reads could be matched to variants. Please double check your settings and input files. Common reasons for this occurring include: 1) MAPQ or BASEQ set too conservatively 2) BAM and VCF have different chromosome names (IE 'chr1' vs '1').");
# probability of generating a random base
global noise_e;
noise_e = (float(base_mismatch_count) / (float(base_match_count+base_mismatch_count)*2));
fun_flush_print(" sequencing noise level estimated at %f"%(noise_e));
# premake read sets for faster comparison
fun_flush_print(" creating read sets...");
for var_id in dict_variant_reads:
dict_variant_reads[var_id]['read_set'] = [];
for allele_reads in dict_variant_reads[var_id]['reads']:
dict_variant_reads[var_id]['read_set'].append(set(allele_reads));
dict_variant_reads[var_id]['other_read_set'] = set(dict_variant_reads[var_id]['other_reads']);
# now create the quick lookup dictionary
# this is used for haplotype construction
# dictionary tells you what variants are connected
fun_flush_print(" generating read connectivity map...");
global dict_variant_overlap;
dict_variant_overlap = collections.OrderedDict()
pool_input = read_vars.keys();
pool_output = parallelize(generate_connectivity_map, pool_input);
for output in pool_output:
dict_variant_overlap.update(output);
# clear memory
del pool_output;
del read_vars;
# make sets of overlaps
for chr in dict_variant_overlap:
for variant in dict_variant_overlap[chr]:
dict_variant_overlap[chr][variant] = set(dict_variant_overlap[chr][variant]);
## now run the test to determine if the number of reads with conflicting connections is
## higher than noise for a given variant pair.
## if so these two variants will be disconnected, so that they won't be used for haplotype construction
tested_connections = set([]);
pool_input = [];
fun_flush_print(" testing variant connections versus noise...");
for chr in dict_variant_overlap:
for variant_a in dict_variant_overlap[chr]:
overlapping_variants = dict_variant_overlap[chr][variant_a];
for variant_b in overlapping_variants:
key1 = variant_a+"|"+variant_b;
key2 = variant_b+"|"+variant_a;
if key1 not in tested_connections and key2 not in tested_connections:
pool_input.append([chr,variant_a,variant_b]);
tested_connections.add(key1);
pool_output = parallelize(test_variant_connection, pool_input);
#out_stream = open(args.o+".variant_connections.txt","w");
out_stream = open(out_prefix + ".variant_connections.txt", "w");
out_stream.write("variant_a\tvariant_b\tsupporting_connections\ttotal_connections\tconflicting_configuration_p\tphase_concordant\n");
dict_allele_connections = collections.OrderedDict()
# remove all those connections which failed
c_dropped = 0;
for connection in pool_output:
chr,variant_a,variant_b,conflicting_config_p,c_supporting,c_total,phase_concordant,chosen_config = connection;
# if the number of conflicting reads is more than would be expected from noise, then disconnect these two variants
# they will not be used for haplotype construction
out_stream.write("\t".join(map(str,[variant_a,variant_b,c_supporting,c_total,conflicting_config_p,phase_concordant]))+"\n");
if conflicting_config_p < args.cc_threshold:
#print("%s %s"%(variant_a,variant_b));
dict_variant_overlap[chr][variant_a].remove(variant_b);
dict_variant_overlap[chr][variant_b].remove(variant_a);
# if these variants have no other connections remove them from overlap dictionary
if len(dict_variant_overlap[chr][variant_a]) == 0:
del dict_variant_overlap[chr][variant_a];
if len(dict_variant_overlap[chr][variant_b]) == 0:
del dict_variant_overlap[chr][variant_b];
c_dropped += 1;
else:
# didn't drop record the specific allele connections
if variant_a+":0" not in dict_allele_connections: dict_allele_connections[variant_a+":0"] = set([]);
if variant_a+":1" not in dict_allele_connections: dict_allele_connections[variant_a+":1"] = set([]);
if variant_b+":0" not in dict_allele_connections: dict_allele_connections[variant_b+":0"] = set([]);
if variant_b+":1" not in dict_allele_connections: dict_allele_connections[variant_b+":1"] = set([]);
if chosen_config == 0:
# 0 - 0 / 1 - 1
dict_allele_connections[variant_a+":0"].add(variant_b+":0");
dict_allele_connections[variant_b+":0"].add(variant_a+":0");
dict_allele_connections[variant_a+":1"].add(variant_b+":1");
dict_allele_connections[variant_b+":1"].add(variant_a+":1");
elif chosen_config == 1:
# 0 - 1 / 1 - 0
dict_allele_connections[variant_a+":0"].add(variant_b+":1");
dict_allele_connections[variant_b+":0"].add(variant_a+":1");
dict_allele_connections[variant_a+":1"].add(variant_b+":0");
dict_allele_connections[variant_b+":1"].add(variant_a+":0");
out_stream.close();
fun_flush_print(" %d variant connections dropped because of conflicting configurations (threshold = %f)"%(c_dropped,args.cc_threshold));
# output the coverage level per snp
# same format as GATK tool:
#stream_out = open(args.o + ".allelic_counts.txt", "w");
stream_out = open(out_prefix + ".allelic_counts.txt", "w");
stream_out.write("contig position variantID refAllele altAllele refCount altCount totalCount\n");
covered_count = 0;
for variant in dict_variant_reads:
snp_dict = dict_variant_reads[variant];
ref_reads = len(set(snp_dict['reads'][0]));
alt_reads = len(set(snp_dict['reads'][1]));
if ref_reads+alt_reads > 0:
covered_count += 1;
stream_out.write("\t".join([snp_dict['chr'],str(snp_dict['pos']),variant,snp_dict['alleles'][0],snp_dict['alleles'][1],str(ref_reads),str(alt_reads),str(ref_reads+alt_reads)+"\n"]));
stream_out.close();
fun_flush_print(" %d variants covered by at least 1 read"%(covered_count));
# record the total number of hets
total_het_variants = len(dict_variant_reads.keys());
remove_keys = [];
if args.unphased_vars == 0:
# clear all SNPs with no connections to others from the dictionary to free up memory
# if we don;t want to output unphased snps
for variant in dict_variant_reads:
chr = dict_variant_reads[variant]['chr'];
if chr not in dict_variant_overlap:
remove_keys.append(variant);
elif variant not in dict_variant_overlap[chr]:
remove_keys.append(variant);
else:
# otherwise just remove variants with 0 coverage
for variant in dict_variant_reads:
if len(dict_variant_reads[variant]['reads'][0]) + len(dict_variant_reads[variant]['reads'][1]) == 0:
remove_keys.append(variant);
for key in remove_keys:
del dict_variant_reads[key];
print_debug(" removed %d variants from memory in cleanup"%(len(remove_keys)));
# using only the overlapping SNP dictionary build haplotype blocks
fun_flush_print("#4. Identifying haplotype blocks...");
block_haplotypes = [];
phased_vars = 0;
pool_output = parallelize(build_haplotypes, dict_variant_overlap.values());
for chr_haplotypes in pool_output:
for haplotype_block in chr_haplotypes:
block_haplotypes.append(haplotype_block);
phased_vars += len(haplotype_block);
# now for each of the blocks identify the phasing with the most supporting reads
fun_flush_print("#5. Phasing blocks...");
pool_input = [];
for block in block_haplotypes:
# retrieve all allele connections for block;
variant_connections = collections.OrderedDict()
allele_connections = collections.OrderedDict()
for variant in block:
chr = variant.split(args.id_separator)[0];
if variant in dict_variant_overlap[chr]: variant_connections[variant] = dict_variant_overlap[chr][variant];
if variant+":0" in dict_allele_connections: allele_connections[variant+":0"] = dict_allele_connections[variant+":0"]
if variant+":1" in dict_allele_connections: allele_connections[variant+":1"] = dict_allele_connections[variant+":1"]
pool_input.append([sort_var_ids(block),variant_connections,allele_connections]);
pool_output = parallelize(phase_v3, pool_input);
final_haplotypes = [];
for output in pool_output:
for block in output:
if block != []:
final_haplotypes.append(block);
#print(final_haplotypes);
del pool_output;
del pool_input;
#pool_input = pool_split(args.threads, pool_input);
#pool_output = parallelize(phase_block_container, pool_input);
#final_haplotypes = [];
#for blocks in pool_output:
# for block in blocks:
# for sub_block in block:
# if len(sub_block) > 1:
# final_haplotypes.append(sub_block);
#del pool_output;
#del pool_input;
fun_flush_print("#6. Outputting haplotypes...");
#stream_out_ase = open(args.o+".haplotypic_counts.txt","w");
stream_out_ase = open(out_prefix + ".haplotypic_counts.txt", "w");
ase_columns = ["contig","start","stop","variants","variantCount","variantsBlacklisted","variantCountBlacklisted","haplotypeA","haplotypeB","aCount","bCount","totalCount","blockGWPhase","gwStat","max_haplo_maf","bam","aReads","bReads"];
if args.output_read_ids == 1:
ase_columns += ["read_ids_a","read_ids_b"];
stream_out_ase.write("\t".join(ase_columns)+"\n");
#stream_out = open(args.o+".haplotypes.txt","w");
stream_out = open(out_prefix + ".haplotypes.txt", "w");
stream_out.write("\t".join(['contig','start','stop','length','variants','variant_ids','variant_alleles','reads_hap_a','reads_hap_b','reads_total','edges_supporting','edges_total','annotated_phase','phase_concordant','gw_phase','gw_confidence'])+"\n");
#stream_out_allele_configs = open(args.o+".allele_config.txt","w");
stream_out_allele_configs = open(out_prefix + ".allele_config.txt", "w");
stream_out_allele_configs.write("\t".join(['variant_a','rsid_a','variant_b','rsid_b','configuration'])+"\n");
global haplotype_lookup;
haplotype_lookup = collections.OrderedDict()
global haplotype_pvalue_lookup;
haplotype_pvalue_lookup = collections.OrderedDict();
global haplotype_gw_stat_lookup;
haplotype_gw_stat_lookup = collections.OrderedDict();
global haplotype_max_maf_lookup;
haplotype_max_maf_lookup = collections.OrderedDict();
all_variants = [];
#block_index = 0;
# Create a new variable to store values of "block index"
# value of initial "block index" is based on value of "pi block value" (which is global variable)
block_index = pi_block_value
for block in final_haplotypes:
#get all unique variants
block_index += 1;
variants = [x.split(":")[0] for x in block];
variants = sort_var_ids(variants);
all_variants += variants;
haplotype_a = "".join([x.split(":")[1] for x in block]);
haplotype_b = "".join([str(int(not int(x))) for x in haplotype_a]);
# determine number of supporting edges vs total edges for this haplotype
supporting_connections = 0;
total_connections = 0;
for allele in block:
variant = allele.split(":")[0];
for other_allele in block:
if allele != other_allele:
other_variant = other_allele.split(":")[0];
# check if the configuration supports the phasing
if other_allele in dict_allele_connections[allele]:
supporting_connections += 1;
if other_variant+":0" in dict_allele_connections[allele]:
total_connections += 1;
if other_variant+":1" in dict_allele_connections[allele]:
total_connections += 1;
supporting_connections = supporting_connections / 2;
total_connections = total_connections / 2;
if args.unique_ids == 0:
rsids = [dict_variant_reads[x]['rsid'] for x in variants];
else:
rsids = variants;
chrs = [dict_variant_reads[x]['chr'] for x in variants];
positions = map(int, [dict_variant_reads[x]['pos'] for x in variants]);
hap_p = 0;
haplotype_pvalue_lookup[list_to_string(variants)] = hap_p;
for var_index in range(0, len(variants)):
id = variants[var_index];
haplotype_lookup[id] = [variants, haplotype_a[var_index]+"|"+haplotype_b[var_index],block_index];
alleles = [[],[]];
phases = [[],[]];
set_reads = [[],[]];
hap_counts = [0,0];
for hap_index in range(0,2):
hap_x = [haplotype_a, haplotype_b][hap_index];
for var_index in range(0, len(variants)):
id = variants[var_index];
allele = dict_variant_reads[id]['alleles'][int(hap_x[var_index])];
alleles[hap_index].append(allele);
phases[hap_index].append(get_allele_phase(allele,dict_variant_reads[id]));
allele_index = dict_variant_reads[id]['alleles'].index(allele);
set_reads[hap_index] += dict_variant_reads[id]['reads'][allele_index];
set_reads[hap_index] = list(set(set_reads[hap_index]));
hap_counts[hap_index] = len(set_reads[hap_index]);
# determine if phasing is completely concordant
# don't include variants whose phase was unknown in the original VCF
use_phases = [x for x in phases[0] if str(x) != "nan"];
if len(set(use_phases)) <= 1:
phase_concordant = 1;
else:
phase_concordant = 0;
phase_string = ["",""]
phase_string[0] = "".join([str(x).replace("nan", "-") for x in phases[0]]);
phase_string[1] = "".join([str(x).replace("nan", "-") for x in phases[1]]);
### GENOME WIDE PHASING
# how many population phased variants do we have in this hap
nan_strip = [int(x) for x in phases[0] if x >= 0];
# by default corrected is the same as population
corrected_phases = [phases[0],phases[1]];
cor_phase_stat = 0.5;
maf_phased = False;
# get the MAF for each variant in haplotype
haplotype_mafs = [];
for variant in variants:
haplotype_mafs.append(dict_variant_reads[variant]['maf']);
if len(nan_strip) > 0:
# if setting is on determine genome wide phasing
# if completely concordant don't need to do anything
phase_set = set(phases[0]);
if "-" in phase_set: phase_set.remove("-");
if len(phase_set) == 1:
corrected_phases = [phases[0],phases[1]];
cor_phase_stat = 1;
if args.gw_phase_method == 1: maf_phased = True;
elif args.gw_phase_method == 0:
# phase using most common phase
cor_phase_stat = numpy.mean(nan_strip);
if cor_phase_stat < 0.5:
corrected_phases = [[0]*len(variants),[1]*len(variants)];
elif cor_phase_stat > 0.5:
corrected_phases = [[1]*len(variants),[0]*len(variants)];
else:
# no consensus, use population phasing
print_warning("No GW phasing consensus for %s using method 1"%(str(variants)));
cor_phase_stat = max([cor_phase_stat, 1-cor_phase_stat]);
elif args.gw_phase_method == 1:
# phase using MAF weighted phase
# we need the mafs for this, so we need to look them up
# Step 2 get allele frequencies
# first get the allele frequency for each of the variants
if len(haplotype_mafs) == len(variants):
phase_support = [0,0];
# now we need to weight the phasing by their MAF
for phase, maf in zip(phases[0],haplotype_mafs):
if phase == 0:
phase_support[0] += maf;
elif phase == 1:
phase_support[1] += maf;
# now select the phase with the most MAF support
if sum(phase_support) > 0:
cor_phase_stat = max(phase_support) / sum(phase_support);
maf_phased = True;
if phase_support[0] > phase_support[1]:
corrected_phases = [[0]*len(variants),[1]*len(variants)];
elif phase_support[1] > phase_support[0]:
corrected_phases = [[1]*len(variants),[0]*len(variants)];
else:
# no consensus, use population phasing
maf_phased = False;
print_warning("No GW phasing consensus for %s using method 2"%(str(variants)));
else:
# variants are not found in AF VCF but they still have phase, try using other approach
# phase using most common phase
cor_phase_stat = numpy.mean(nan_strip);
if cor_phase_stat < 0.5:
corrected_phases = [[0]*len(variants),[1]*len(variants)];
elif cor_phase_stat > 0.5:
corrected_phases = [[1]*len(variants),[0]*len(variants)];
else:
# no consensus, use population phasing
print_warning("No GW phasing consensus for %s using method 1"%(str(variants)));
cor_phase_stat = max([cor_phase_stat, 1-cor_phase_stat]);
else:
print_warning("GW phasing failed for %s"%(str(variants)));
# save the stat for lookup when generating VCF
haplotype_gw_stat_lookup[list_to_string(variants)] = cor_phase_stat;
haplotype_max_maf_lookup[list_to_string(variants)] = max(haplotype_mafs);
# update the variants with their corrected phases
for var_index in range(0,len(variants)):
variant = variants[var_index];
allele_index = dict_variant_reads[variant]['alleles'].index(alleles[0][var_index])
dict_variant_reads[variant]['gw_phase'][allele_index] = corrected_phases[0][var_index];
dict_variant_reads[variant]['gw_phase'][1-allele_index] = corrected_phases[1][var_index];
corrected_phase_string = ["",""]
corrected_phase_string[0] = "".join([str(x).replace("nan", "-") for x in corrected_phases[0]]);
corrected_phase_string[1] = "".join([str(x).replace("nan", "-") for x in corrected_phases[1]]);
## write the haplotype details
stream_out.write(str_join("\t",[chrs[0],min(positions),max(positions),max(positions)-min(positions),len(variants),list_to_string(rsids),list_to_string(alleles[0])+"|"+list_to_string(alleles[1]),hap_counts[0],hap_counts[1],sum(hap_counts),supporting_connections,total_connections,phase_string[0]+"|"+phase_string[1],phase_concordant,corrected_phase_string[0]+"|"+corrected_phase_string[1],cor_phase_stat])+"\n");
#$ write ASE stats
# generate haplotypic counts
for bam_i in range(0,len(bam_list)):
if bam_i not in haplo_count_bam_exclude:
bam_name = bam_names[bam_i]
set_hap_expr_reads = [[],[]];
hap_expr_counts = [0,0];
used_alleles = [[],[]];
used_vars = [];
var_reads = [[],[]];
used_var_pos = [];
blacklisted_vars = set([]);
for hap_index in range(0,2):
hap_x = [haplotype_a, haplotype_b][hap_index];
for var_index in range(0, len(variants)):
id = variants[var_index];
chrom = dict_variant_reads[id]['chr'];
pos = int(dict_variant_reads[id]['pos']);
used_var_pos.append(pos);
# check to see if variant is blacklisted
if chrom+"_"+str(pos) not in set_haplo_blacklist:
allele = dict_variant_reads[id]['alleles'][int(hap_x[var_index])];
allele_index = dict_variant_reads[id]['alleles'].index(allele);
if id not in used_vars: used_vars.append(id);
used_alleles[hap_index].append(allele);
if bam_i in dict_variant_reads[id]['haplo_reads'][allele_index]:
var_reads[hap_index].append(dict_variant_reads[id]['haplo_reads'][allele_index][bam_i]);
set_hap_expr_reads[hap_index] += dict_variant_reads[id]['haplo_reads'][allele_index][bam_i];
else:
var_reads[hap_index].append([]);
else:
blacklisted_vars.add(id);
set_hap_expr_reads[hap_index] = list(set(set_hap_expr_reads[hap_index]));
hap_expr_counts[hap_index] = len(set_hap_expr_reads[hap_index]);
hap_a_count = hap_expr_counts[0];
hap_b_count = hap_expr_counts[1]
hap_a_reads = set_hap_expr_reads[0];
hap_b_reads = set_hap_expr_reads[1];
list_hap_expr_reads = [list(set_hap_expr_reads[0]),list(set_hap_expr_reads[1])];
hap_var_reads = [[],[]];
out_block_gw_phase = "0/1";
if corrected_phases[0][0] == 0:
# haplotype A = GW phase 0
out_block_gw_phase = "0|1";
elif corrected_phases[0][0] == 1:
# haplotype A = GW phase 1
out_block_gw_phase = "1|0";
# record the reads that overlap each individual variant
for hap_index in range(0,2):
for var_index in range(0,len(used_vars)):
xvar_reads = [];
for xread in var_reads[hap_index][var_index]:
xvar_reads.append(list_hap_expr_reads[hap_index].index(xread));
hap_var_reads[hap_index].append(list_to_string(xvar_reads));
# convert to string
hap_var_reads[0] = list_to_string(hap_var_reads[0],sep=";");
hap_var_reads[1] = list_to_string(hap_var_reads[1],sep=";");
total_cov = sum(hap_expr_counts);
if total_cov > 0:
fields_out = [chrs[0],min(used_var_pos),max(used_var_pos),list_to_string(used_vars),len(used_vars),list_to_string(blacklisted_vars),len(blacklisted_vars),list_to_string(used_alleles[0]),list_to_string(used_alleles[1]),hap_a_count,hap_b_count,total_cov,out_block_gw_phase,cor_phase_stat];
if args.output_read_ids == 1:
fields_out += [list_to_string(hap_a_reads),list_to_string(hap_b_reads)];
fields_out += [str(max(haplotype_mafs)),bam_name];
fields_out += [hap_var_reads[0],hap_var_reads[1]];
stream_out_ase.write(str_join("\t",fields_out)+"\n");
## OUTPUT THE NETWORK FOR A SPECIFIC HAPLOTYPE
if args.output_network in variants:
#hap_a_network = generate_hap_network([variants, haplotype_a])[0];
#hap_b_network = generate_hap_network([variants, haplotype_b])[0];
hap_a_network = generate_hap_network_all(variants)[0];
#stream_out_network = open(args.o+".network.links.txt","w");
stream_out_network = open(out_prefix + ".network.links.txt", "w");
stream_out_network.write("\t".join(["variantA","variantB","connections","inferred\n"]));
nodes = [];
#for item in hap_a_network + hap_b_network:
hap_a_vars = [];
for item in hap_a_network:
if item[2] > 0:
stream_out_network.write(list_to_string(item,"\t")+"\n");
nodes.append(item[0]);
nodes.append(item[1]);
stream_out_network.close();
#stream_out_network = open(args.o+".network.nodes.txt","w");
stream_out_network = open(out_prefix + ".network.nodes.txt", "w");
stream_out_network.write("id\tindex\tassigned_hap\n");
for item in set(nodes):
xvar = item.split(":")[0];
xallele = item.split(":")[1];
var_index = variants.index(xvar);
if alleles[0][var_index] == xallele:
assigned_hap = "A";
else:
assigned_hap = "B";
stream_out_network.write(item+"\t"+str(var_index)+"\t"+assigned_hap+"\n");
stream_out_network.close()
## OUTPUT allele configuration
for variant_a, allele_a in zip(variants, alleles[0]):
for variant_b, allele_b in zip(variants, alleles[1]):
if variant_a != variant_b:
a_config = "";
if (dict_variant_reads[variant_a]['ref'] == allele_a and dict_variant_reads[variant_b]['ref'] == allele_b) or (dict_variant_reads[variant_a]['ref'] != allele_a and dict_variant_reads[variant_b]['ref'] != allele_b):
# ref and ref are in trans
# this is a compound het
a_config = "trans";
elif (dict_variant_reads[variant_a]['ref'] == allele_a and dict_variant_reads[variant_b]['ref'] != allele_b) or (dict_variant_reads[variant_a]['ref'] != allele_a and dict_variant_reads[variant_b]['ref'] == allele_b):
# ref and ref are in cis
a_config = "cis";
if a_config != "":
stream_out_allele_configs.write("\t".join([variant_a,dict_variant_reads[variant_a]['rsid'],variant_b,dict_variant_reads[variant_b]['rsid'],a_config])+"\n");
# update pi_block_value after the loop is over
if args.process_slow == 1:
pi_block_value = block_index
else:pi_block_value = 0
#output read counts for unphased variants
if args.unphased_vars == 1:
singletons = set(dict_variant_reads.keys()) - set(all_variants);
for variant in singletons:
dict_var = dict_variant_reads[variant];
chrom = dict_var['chr'];
pos = int(dict_var['pos']);
# check to see if variant is blacklisted
if chrom+"_"+str(pos) not in set_haplo_blacklist:
for bam_i in range(0,len(bam_list)):
if bam_i not in haplo_count_bam_exclude:
bam_name = bam_names[bam_i];
if bam_i in dict_var['haplo_reads'][0]:
hap_a_count = len(set(dict_var['haplo_reads'][0][bam_i]));
hap_a_reads = set(dict_var['haplo_reads'][0][bam_i]);
else:
hap_a_count = 0;
hap_a_reads = [];
if bam_i in dict_var['haplo_reads'][1]:
hap_b_count = len(set(dict_var['haplo_reads'][1][bam_i]));
hap_b_reads = set(dict_var['haplo_reads'][1][bam_i]);
else:
hap_b_count = 0;
hap_b_reads = [];
total_cov = int(hap_a_count)+int(hap_b_count);
if total_cov > 0:
if "-" not in dict_var['phase']:
phase_string = str(dict_var['phase'].index(dict_var['alleles'][0]))+"|"+str(dict_var['phase'].index(dict_var['alleles'][1]));
else:
phase_string = "0/1";
fields_out = [dict_var['chr'],str(dict_var['pos']),str(dict_var['pos']),variant,str(1),"",str(0),dict_var['alleles'][0],dict_var['alleles'][1],str(hap_a_count),str(hap_b_count),str(total_cov),phase_string,"1"];
if args.output_read_ids == 1:
fields_out += [list_to_string(hap_a_reads),list_to_string(hap_b_reads)];
fields_out += [str(dict_var['maf']),bam_name];
fields_out += ["",""];
stream_out_ase.write("\t".join(fields_out)+"\n");
#output haplotypes for unphased variants (if enabled)
for variant in singletons:
dict_var = dict_variant_reads[variant];
total_cov = len(dict_var['read_set'][0])+len(dict_var['read_set'][1]);
# make sure it is actually phased
if "-" not in dict_var['phase']:
phase_string = str(dict_var['phase'].index(dict_var['alleles'][0]))+"|"+str(dict_var['phase'].index(dict_var['alleles'][1]));
else:
phase_string = "-|-";
if args.unique_ids == 0:
out_name = dict_var['rsid'];
else:
out_name = variant;
stream_out.write(dict_var['chr']+"\t"+str(dict_var['pos']-1)+"\t"+str(dict_var['pos'])+"\t"+str(1)+"\t"+str(1)+"\t"+out_name+"\t"+dict_var['alleles'][0]+"|"+dict_var['alleles'][1]+"\t"+str(len(dict_var['read_set'][0]))+"\t"+str(len(dict_var['read_set'][1]))+"\t"+str(total_cov)+"\t"+str(0)+"\t"+str(0)+"\t"+phase_string+"\t"+str(float('nan'))+"\t"+phase_string+"\t"+str(float('nan'))+"\n");
stream_out.close();
stream_out_ase.close();
stream_out_allele_configs.close();
# output VCF
if args.write_vcf == 1:
unphased_phased, phase_corrected = write_vcf(out_prefix, chrom_of_interest);
total_time = time.time() - start_time;
fun_flush_print('')
fun_flush_print(" COMPLETED using %d reads in %d seconds using %d threads"%(total_reads,total_time,args.threads));
fun_flush_print(" PHASED %d of %d all variants (= %f) with at least one other variant"%(len(all_variants),het_count,float(len(all_variants))/float(het_count)));
if args.write_vcf == 1:
if unphased_count > 0:
fun_flush_print(" GENOME WIDE PHASED %d of %d unphased variants (= %f)"%(unphased_phased,unphased_count,float(unphased_phased)/float(unphased_count)));
fun_flush_print(" GENOME WIDE PHASE CORRECTED %d of %d variants (= %f)"%(phase_corrected,het_count,float(phase_corrected)/float(het_count)));
print(' Global maximum memory usage: %.2f (mb)' % current_mem_usage())
if args.process_slow == 1:
print(' Completed processes for contig/chromosome "{}" in {} hh:mm:ss'.
format(chromosome, time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time))))
def generate_connectivity_map(chrom):
global read_vars;
global dict_variant_reads;
dict_variant_overlap = collections.OrderedDict();
for read_id in read_vars[chrom].keys():
overlapped_variants = read_vars[chrom][read_id];
for variant in overlapped_variants:
var_chr = dict_variant_reads[variant]['chr'];
for other_variant in overlapped_variants:
other_var_chr = dict_variant_reads[other_variant]['chr'];
# Restrict to being on the same chromosome, speeds up and allows parallelization
# might not be desired for some very specific cases (ie trans-splicing)
if var_chr == other_var_chr and other_variant != variant:
if var_chr not in dict_variant_overlap: dict_variant_overlap[var_chr] = collections.OrderedDict()
if variant not in dict_variant_overlap[var_chr]: dict_variant_overlap[var_chr][variant] = [];
dict_variant_overlap[var_chr][variant].append(other_variant);
return(dict_variant_overlap);
def process_mapping_result(input):
global use_as_cutoff;
global as_cutoff;
global bam_index;
global haplo_count_bam_exclude;
dict_variant_reads = collections.OrderedDict()
read_vars = collections.OrderedDict()
stream_in = open(input, "r");
total_reads = 0;
chrom = "";
mapped_reads = 0;
for line in stream_in:
fields = line.rstrip().split("\t");
#read_name variant_id rs_id read_allele alignment_score genotype maf
if use_as_cutoff == False or int(fields[4]) >= as_cutoff:
read_id = fields[0];
var_id = fields[1];
chrom = var_id.split(args.id_separator)[0];
read_allele = fields[3];
if var_id not in dict_variant_reads: dict_variant_reads[var_id] = generate_variant_dict(fields);
if read_allele in dict_variant_reads[var_id]['alleles']:
# add to the quick lookup dictionary
if read_id not in read_vars: read_vars[read_id] = [];
read_vars[read_id].append(var_id);
allele_index = dict_variant_reads[var_id]['alleles'].index(read_allele)
dict_variant_reads[var_id]['reads'][allele_index].append(read_id);
mapped_reads += 1;
if bam_index not in haplo_count_bam_exclude or len(haplo_count_bam_exclude) == 0:
if bam_index not in dict_variant_reads[var_id]['haplo_reads'][allele_index]: dict_variant_reads[var_id]['haplo_reads'][allele_index][bam_index] = [];
dict_variant_reads[var_id]['haplo_reads'][allele_index][bam_index].append(read_id);
else:
dict_variant_reads[var_id]['other_reads'].append(read_id);
total_reads += 1;
stream_in.close();
return([dict_variant_reads,read_vars,total_reads, chrom]);
def call_mapping_script(input):
global args;
global devnull;
chrom = input[0];
bed_out = input[1];
mapper_out = input[2];
samtools_arg = input[3];
bam = input[4];
mapq = input[5];
isize = input[6];
mapping_result = tempfile.NamedTemporaryFile(delete=False);
mapping_result.close();
#Save error code from subprocess if not 0, file it writes is truncated and gives unexpected wrong results.
run_cmd = "samtools view -h "+bam+" '"+chrom+"': | samtools view -Sh "+samtools_arg+" -L "+bed_out+" -q "+mapq+" - | "+args.python_string+" "+return_script_path()+"/call_read_variant_map.py --baseq "+str(args.baseq)+" --splice 1 --isize_cutoff "+str(isize)+" --variant_table "+mapper_out+" --o "+mapping_result.name
error_code = subprocess.check_call("set -euo pipefail && "+run_cmd, stdout=devnull, shell=True, executable='/bin/bash')
if error_code != 0:
raise RuntimeError("subprocess.call of call_read_variant_map.py exited with an error, with call: %s"%(run_cmd))
fun_flush_print(" completed chromosome %s..."%(chrom));
return(mapping_result.name);
def generate_mapping_table(input):
global args;
global temp_files;
chrom = input[0];
chrom = args.chr_prefix + chrom;
vcf_lines = input[1];
bed_out = tempfile.NamedTemporaryFile(delete=False, mode='wt');
mapper_out = tempfile.NamedTemporaryFile(delete=False, mode='wt');
het_count = 0;
total_indels_excluded = 0;
temp_files.append(bed_out.name);
temp_files.append(mapper_out.name);
for vcf_columns in vcf_lines:
pos = vcf_columns[1];
rs_id = vcf_columns[2];
alt_alleles = vcf_columns[4].split(",");
all_alleles = [vcf_columns[3]] + alt_alleles;
unique_id = chrom+args.id_separator+pos+args.id_separator+(args.id_separator.join(all_alleles));
geno_string = vcf_columns[9];
genotype = vcf_columns[10];
maf = None;
if args.gw_phase_method == 1:
info_fields = annotation_to_dict(vcf_columns[7])
if args.gw_af_field in info_fields:
# make sure to get the right index if multi-allelic site
afs = map(float, info_fields[args.gw_af_field].split(","));
# make sure that there are the same number of allele frequencies as alternative variants
if len(afs) == len(alt_alleles):
use_afs = [];
for allele in list(genotype):
if allele != "." and int(allele) != 0:
use_afs.append(int(allele) - 1);
# if there are multiple alternative alleles use the lowest MAF
if len(use_afs) > 0:
maf = min([min([afs[x],1-afs[x]]) for x in use_afs]);
max_allele_size = max([len(x) for x in all_alleles]);
if (max_allele_size == 1 or args.include_indels == 1):
mapper_out.write("\t".join([chrom,vcf_columns[1], unique_id, rs_id,
",".join(all_alleles), str(len(vcf_columns[3])),
geno_string, str(maf)]) + "\n")
bed_out.write("\t".join([chrom, str(int(vcf_columns[1]) - 1), vcf_columns[1]]) + "\n");
het_count += 1;
else:
total_indels_excluded += 1;
bed_out.close();
mapper_out.close();
return([chrom, het_count, total_indels_excluded, bed_out.name, mapper_out.name]);
def return_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]));
def generate_variant_dict(fields):
#read_name variant_id rs_id read_allele alignment_score genotype maf
id_split = fields[1].split(args.id_separator);
all_alleles = id_split[2:len(id_split)];
genotype = list(fields[5]);
is_phased = 0;
if "|" in genotype:
genotype.remove("|");
is_phased = 1;
if "/" in genotype: genotype.remove("/");
# get only the alleles this individual has
ind_alleles = [];
for i in range(0,len(all_alleles)):
if str(i) in genotype:
ind_alleles.append(all_alleles[i]);
# get phasing
phase = [];
if is_phased == 1:
for index in genotype:
phase.append(all_alleles[int(index)]);
else:
phase = ["-","-"];
maf = fields[6];
try:
maf = float(maf);
except:
maf = 0;
# if rsid is "." or "" then set rsID to the uniqueID
if fields[2] != "." and fields[2] != "":
rsid = fields[2];
else:
rsid = fields[1];
return collections.OrderedDict(
[("id", fields[1]), ("rsid", rsid), ("ref", all_alleles[0]),
("chr", id_split[0]), ("pos", int(id_split[1])), ("alleles", ind_alleles),
("phase", phase), ("gw_phase", phase), ("maf", maf), ("other_reads", []),
("reads", [[] for i in range(len(ind_alleles))]),
("haplo_reads", [collections.OrderedDict() for i in range(len(ind_alleles))])])
#return({"id":fields[1], "rsid":rsid,"ref":all_alleles[0],"chr":id_split[0],"pos":int(id_split[1]),"alleles":ind_alleles,"phase":phase, "gw_phase":phase, "maf":maf, "other_reads":[], "reads":[[] for i in range(len(ind_alleles))], "haplo_reads":[{} for i in range(len(ind_alleles))]});
def phase_block_container(input):
#stream_out = open(input[0],"w");
output = [];
for i in input:
output.append(phase_block(i));
#for block in phase_block_result:
# stream_out.write(",".join(block)+"\n");
#stream_out.close();
return(output);
def phase_block(input):
global args;
variants = input[0];
variant_connections = copy.deepcopy(input[1]);
allele_connections = copy.deepcopy(input[2]);
largest_block = [];
# first get all variants that have more than one connection
multi_connected_variants = [];
for variant in variant_connections:
if len(variant_connections[variant]) > 1:
multi_connected_variants.append(variant);
# now see how many possible connections there are to remove
# can only remove connections between two multiconnected variants
removable_connections = [];
for variant in multi_connected_variants:
connections = variant_connections[variant];
for connection in connections:
if connection in multi_connected_variants:
if connection+"|"+variant not in removable_connections and variant+"|"+connection not in removable_connections:
removable_connections.append(variant+"|"+connection);
remove_number = 0;
remove_connections = [""];
while remove_number <= len(removable_connections):
# prune connections
# add first no connection removal
to_remove = list(itertools.combinations(range(len(removable_connections)), remove_number));
if len(to_remove) > args.max_prune:
print_warning("maximum number of pruning iterations reached for %s"%(variants));
break;
else:
remove_connections += to_remove;
remove_number += 1;
for remove in remove_connections:
remaining_hap_pool = copy.deepcopy(input[2]);
for remove_index in remove:
remove_keys = removable_connections[remove_index].split("|");
if remove_keys[0]+":0" in remaining_hap_pool[remove_keys[1]+":0"]: remaining_hap_pool[remove_keys[1]+":0"].remove(remove_keys[0]+":0")
if remove_keys[0]+":1" in remaining_hap_pool[remove_keys[1]+":0"]: remaining_hap_pool[remove_keys[1]+":0"].remove(remove_keys[0]+":1")
if remove_keys[0]+":0" in remaining_hap_pool[remove_keys[1]+":1"]: remaining_hap_pool[remove_keys[1]+":1"].remove(remove_keys[0]+":0")
if remove_keys[0]+":1" in remaining_hap_pool[remove_keys[1]+":1"]: remaining_hap_pool[remove_keys[1]+":1"].remove(remove_keys[0]+":1")
if remove_keys[1]+":0" in remaining_hap_pool[remove_keys[0]+":0"]: remaining_hap_pool[remove_keys[0]+":0"].remove(remove_keys[1]+":0")
if remove_keys[1]+":1" in remaining_hap_pool[remove_keys[0]+":0"]: remaining_hap_pool[remove_keys[0]+":0"].remove(remove_keys[1]+":1")
if remove_keys[1]+":0" in remaining_hap_pool[remove_keys[0]+":1"]: remaining_hap_pool[remove_keys[0]+":1"].remove(remove_keys[1]+":0")
if remove_keys[1]+":1" in remaining_hap_pool[remove_keys[0]+":1"]: remaining_hap_pool[remove_keys[0]+":1"].remove(remove_keys[1]+":1")
set_remaining_hap_pool = set(remaining_hap_pool.keys());
while len(remaining_hap_pool) > 0:
# this will construct many iterations of the same haplotype need to filter it out;
# start the process with a variant pair;
seed_var = remaining_hap_pool.keys()[0];
seed = set([seed_var] + list(remaining_hap_pool[seed_var]));
del remaining_hap_pool[seed_var];
set_remaining_hap_pool.remove(seed_var);
result = build_haplotype_v3(seed,remaining_hap_pool,set_remaining_hap_pool);
new_hap = list(result[0]);
# sort by location
new_hap = sort_var_ids(new_hap);
remaining_hap_pool = result[1];
set_remaining_hap_pool = result[2];
if len(new_hap) > len(largest_block) and test_loop_back(new_hap) == 0:
largest_block = new_hap;
# check to see if we have a full haplotype
if len(largest_block) == len(variants):
return([largest_block]);
# if we get here we failed to find a full block, so just return the best one and try to phase the remainder
# remove phased variants from connections
unphased_vars = [];
unphased_var_connections = collections.OrderedDict()
for variant in variants:
if variant+":0" in largest_block or variant+":1" in largest_block:
del allele_connections[variant+":0"];
del allele_connections[variant+":1"];
for connected_var in allele_connections:
if variant+":0" in allele_connections[connected_var]: allele_connections[connected_var].remove(variant+":0");
if variant+":1" in allele_connections[connected_var]: allele_connections[connected_var].remove(variant+":1");
else:
unphased_vars.append(variant);
unphased_var_connections[variant] = [];
if variant in variant_connections:
for connection in variant_connections[variant]:
if connection+":0" not in largest_block and connection+":1" not in largest_block:
unphased_var_connections[variant].append(connection);
#print(largest_block);
#print("FAILED TO RESOLVE HAPLOTYPE: %s"%(input[1]));
if len(unphased_vars) > 1:
if len(largest_block) == 0:
print_warning("phasing failed for %s. Attempted to remove %d combinations of %d connections"%(variants,len(list(remove_connections)),remove_number));
return([[]]);
else:
print_warning("failed to phase full haplotype for %s, splitting into fragments, max haplotype = %s"%(variants,largest_block));
return([largest_block]+phase_block([unphased_vars,unphased_var_connections,allele_connections]));
else:
return([largest_block]+[[unphased_vars[0]+":0"]]);
def test_loop_back(block):
# test to see if a haplotype block ever includes the same variant more than once
# strip allele
block = [x.split(":")[0] for x in block];
# count occurance of each variant
counts = [block.count(x) for x in set(block)];
if max(counts) == 1:
return(0);
else:
return(1);
def test_variant_connection(input):
global noise_e;
global dict_variant_reads;
chr, variant_a, variant_b = input;
# there are only two possible configurations, determine evidence for each
# a[ref]b[ref] | a[alt]b[alt]
hap_config_a_support = len(dict_variant_reads[variant_a]['read_set'][0] & dict_variant_reads[variant_b]['read_set'][0]) + len(dict_variant_reads[variant_a]['read_set'][1] & dict_variant_reads[variant_b]['read_set'][1])
# a[ref]b[alt] | a[alt]b[ref]
hap_config_b_support = len(dict_variant_reads[variant_a]['read_set'][1] & dict_variant_reads[variant_b]['read_set'][0]) + len(dict_variant_reads[variant_a]['read_set'][0] & dict_variant_reads[variant_b]['read_set'][1])
# determine if phasing is concordant with what as specified in the input VCF
phase_concordant = ".";
# make sure the input VCF had phase
if "-" not in dict_variant_reads[variant_a]['phase'] and "-" not in dict_variant_reads[variant_b]['phase']:
if hap_config_a_support > hap_config_b_support:
if dict_variant_reads[variant_a]['phase'].index(dict_variant_reads[variant_a]['alleles'][0]) == dict_variant_reads[variant_b]['phase'].index(dict_variant_reads[variant_b]['alleles'][0]):
phase_concordant = 1;
else:
phase_concordant = 0;
elif hap_config_a_support < hap_config_b_support:
if dict_variant_reads[variant_a]['phase'].index(dict_variant_reads[variant_a]['alleles'][1]) == dict_variant_reads[variant_b]['phase'].index(dict_variant_reads[variant_b]['alleles'][0]):
phase_concordant = 1;
else:
phase_concordant = 0;
# also get the connections from reads where the bases did not match to either ref or alt
# a[other] -> b[ref]
other_base_connections = len(dict_variant_reads[variant_a]['other_read_set'] & dict_variant_reads[variant_b]['read_set'][0]);
# a[other] -> b[alt]
other_base_connections += len(dict_variant_reads[variant_a]['other_read_set'] & dict_variant_reads[variant_b]['read_set'][1]);
# a[ref] -> b[other]
other_base_connections += len(dict_variant_reads[variant_a]['read_set'][0] & dict_variant_reads[variant_b]['other_read_set']);
# a[alt] -> b[other]
other_base_connections += len(dict_variant_reads[variant_a]['read_set'][1] & dict_variant_reads[variant_b]['other_read_set']);
# a[other] -> b[other]
other_base_connections += len(dict_variant_reads[variant_a]['other_read_set'] & dict_variant_reads[variant_b]['other_read_set']);
c_supporting = max(hap_config_a_support,hap_config_b_support);
c_total = hap_config_a_support + hap_config_b_support + other_base_connections;
if hap_config_a_support > hap_config_b_support:
chosen_config = 0;
elif hap_config_a_support < hap_config_b_support:
chosen_config = 1;
else:
chosen_config = -1;
# if no reads support the phase then strip this connection
if c_supporting == 0:
conflicting_config_p = 0;
elif c_total - c_supporting > 0:
# otherwise do the test
conflicting_config_p = binom.cdf(c_supporting,c_total,1-((6*noise_e)+(10*math.pow(noise_e,2))));
else:
# only both doing the test if there are some conflicting reads
conflicting_config_p = 1;
return([chr,variant_a,variant_b,conflicting_config_p,c_supporting,c_total,phase_concordant,chosen_config]);
def new_temp_file():
xfile = tempfile.NamedTemporaryFile(delete=False)
xfile.close();
return(xfile.name);
def write_vcf(out_prefix, chromosome_of_interest):
global args;
global haplotype_lookup;
global dict_variant_reads;
global haplotype_pvalue_lookup
global sample_column;
global csi_index;
fun_flush_print("#7. Outputting phased VCF...");
if args.gw_phase_vcf == 1:
fun_flush_print(" GT field is being updated with phASER genome wide phase when applicable. This can be changed using the --gw_phase_vcf argument.");
elif args.gw_phase_vcf == 2:
fun_flush_print(" GT field is being updated with either phASER genome wide phase or phASER block phase with PS specified, depending on phase anchoring quality.");
else:
fun_flush_print(" GT field is not being updated with phASER genome wide phase. This can be changed using the --gw_phase_vcf argument.");
#if args.chr != "":
#decomp_str = "tabix -h "+args.vcf+" "+args.chr+":"
if chromosome_of_interest != "":
decomp_str = "tabix -h "+args.vcf+" "+ chromosome_of_interest + ":"
else:
decomp_str = "gunzip -c "+args.vcf;
tmp_out = tempfile.NamedTemporaryFile(delete=False);
tmp_out.close();
subprocess.check_call("set -euo pipefail && "+decomp_str + " | cut -f 1-9,"+str(sample_column+1)+" > "+tmp_out.name,shell=True, executable='/bin/bash')
vcf_in = open(tmp_out.name,"r");
#vcf_out = open(args.o+".vcf","w");
vcf_out = open(out_prefix + ".vcf", "w");
phase_corrections = 0;
unphased_phased = 0;
set_phased_vars = set(haplotype_lookup.keys());
format_text = "";
for line in vcf_in:
vcf_columns = line.replace("\n","").split("\t");
if "##FORMAT" in line:
format_text += line;
vcf_out.write(line);
elif line.startswith("#CHROM"):
# we reached the end of the format section
# dump it and add phaser format fields if needed
if "##FORMAT=<ID=PG," not in format_text: vcf_out.write("##FORMAT=<ID=PG,Number=1,Type=String,Description=\"phASER Local Genotype\">\n");
if "##FORMAT=<ID=PB," not in format_text: vcf_out.write("##FORMAT=<ID=PB,Number=1,Type=String,Description=\"phASER Local Block\">\n");
if "##FORMAT=<ID=PI," not in format_text: vcf_out.write("##FORMAT=<ID=PI,Number=1,Type=String,Description=\"phASER Local Block Index (unique for each block)\">\n");
if "##FORMAT=<ID=PM," not in format_text: vcf_out.write("##FORMAT=<ID=PM,Number=1,Type=String,Description=\"phASER Local Block Maximum Variant MAF\">\n");
if "##FORMAT=<ID=PW," not in format_text: vcf_out.write("##FORMAT=<ID=PW,Number=1,Type=String,Description=\"phASER Genome Wide Genotype\">\n");
if "##FORMAT=<ID=PC," not in format_text: vcf_out.write("##FORMAT=<ID=PC,Number=1,Type=String,Description=\"phASER Genome Wide Confidence\">\n");
if args.gw_phase_vcf == 2:
if "##FORMAT=<ID=PS," not in format_text: vcf_out.write("##FORMAT=<ID=PS,Number=1,Type=String,Description=\"Phase Set\">\n");
# if multiple samples only output phased sample
out_cols = vcf_columns[0:9] + [vcf_columns[9]];
vcf_out.write("\t".join(out_cols)+"\n");
elif line[0:1] == "#":
vcf_out.write(line);
else:
##CHROM POS ID REF ALT QUAL FILTER INFO FORMAT NA06986
id = vcf_columns[2];
chrom = vcf_columns[0];
pos = int(vcf_columns[1]);
#if args.chr == "" or chrom == args.chr:
if chromosome_of_interest == "" or chrom == chromosome_of_interest:
if "GT" in vcf_columns[8]:
gt_index = vcf_columns[8].split(":").index("GT");
genotype = list(vcf_columns[9].split(":")[gt_index]);
if "|" in genotype: genotype.remove("|");
if "/" in genotype: genotype.remove("/");
# get only the alleles this individual has
alt_alleles = vcf_columns[4].split(",");
all_alleles = [vcf_columns[3]] + alt_alleles;
ind_alleles = [];
for i in range(0,len(all_alleles)):
if str(i) in genotype:
ind_alleles.append(all_alleles[i]);
# make sure there are as many entries in each sample as there should be before adding new columns
# if there are entries missing add blanks
n_fields = len(vcf_columns[8].split(":"));
for i in range(9, len(vcf_columns)):
sample_fields = len(vcf_columns[i].split(":"));
if sample_fields != n_fields:
missing_cols = n_fields - sample_fields;
vcf_columns[i] += ":" * missing_cols;
# update the format tags only if they are needed
vcf_format_fields = vcf_columns[8].split(":");
phaser_tags = ['PG','PB','PI','PW','PC','PM'];
for tag in phaser_tags:
if tag not in vcf_format_fields: vcf_format_fields.append(tag);
vcf_columns[8] = ":".join(vcf_format_fields);
#generate a unique id
unique_id = chrom + args.id_separator + str(pos) + args.id_separator + (args.id_separator.join(all_alleles));
if unique_id in set_phased_vars:
# retrieve the correct allele number of each allele
# issue because if a site is multi-allelic it will be converted to 0/1 (ie 0/2 converted to 0/1)
alleles_out = [];
gw_phase_out = ["",""];
block_index = haplotype_lookup[unique_id][2];
for allele in haplotype_lookup[unique_id][1].split("|"):
allele_base = dict_variant_reads[unique_id]['alleles'][int(allele)];
vcf_allele_index = all_alleles.index(allele_base);
# get the genome wide phase
gw_phase = dict_variant_reads[unique_id]['gw_phase'][int(allele)]
if isinstance(gw_phase, int) == True:
gw_phase_out[gw_phase] = str(vcf_allele_index);
alleles_out.append(str(vcf_allele_index));
# get rsID for each of the variants on the haplotype
variants_out = [];
for variant in haplotype_lookup[unique_id][0]:
# if ":" is in the rsid need to replace it, otherwise it will mess up output
variants_out.append(dict_variant_reads[variant]['rsid'].replace(":","_"));
# get the p-value, if there was one for the block
# pval = haplotype_pvalue_lookup[list_to_string(haplotype_lookup[unique_id][0])];
gw_stat = haplotype_gw_stat_lookup[list_to_string(haplotype_lookup[unique_id][0])];
max_block_maf = haplotype_max_maf_lookup[list_to_string(haplotype_lookup[unique_id][0])];
# if desired to overwrite input phase with GW phase, do it here
if "-" not in gw_phase_out:
xfields = vcf_columns[9].split(":");
new_phase = "|".join(gw_phase_out);
if gw_stat >= args.gw_phase_vcf_min_confidence:
if "|" in xfields[gt_index] and xfields[gt_index] != new_phase: phase_corrections += 1;
if "/" in xfields[gt_index] and xfields[gt_index] != "./." and xfields[gt_index] != new_phase: unphased_phased += 1;
if args.gw_phase_vcf == 1 or args.gw_phase_vcf == 2:
xfields[gt_index] = new_phase;
vcf_columns[9] = ":".join(xfields);
if args.gw_phase_vcf == 2 and gw_stat < args.gw_phase_vcf_min_confidence:
xfields[gt_index] = "|".join(alleles_out);
vcf_columns[9] = ":".join(xfields);
sample_fields = vcf_columns[9].split(":");
sample_fields += ['']*(len(vcf_format_fields) - len(sample_fields));
sample_fields[vcf_format_fields.index('PG')] = "|".join(alleles_out);
sample_fields[vcf_format_fields.index('PB')] = list_to_string(variants_out);
sample_fields[vcf_format_fields.index('PI')] = str(block_index);
sample_fields[vcf_format_fields.index('PM')] = str(max_block_maf);
sample_fields[vcf_format_fields.index('PW')] = "|".join(gw_phase_out);
sample_fields[vcf_format_fields.index('PC')] = str(gw_stat);
# ADD PS IF NEEDED
if args.gw_phase_vcf == 2 and gw_stat < args.gw_phase_vcf_min_confidence:
if 'PS' not in vcf_format_fields:
vcf_columns[8] += ":PS";
vcf_format_fields.append("PS");
sample_fields.append('');
sample_fields[vcf_format_fields.index('PS')] = str(block_index);
vcf_columns[9] = ":".join(sample_fields);
else:
sample_fields = vcf_columns[9].split(":");
sample_fields += ['']*(len(vcf_format_fields) - len(sample_fields));
sample_fields[vcf_format_fields.index('PG')] = "/".join(sorted(genotype));
sample_fields[vcf_format_fields.index('PB')] = '.';
sample_fields[vcf_format_fields.index('PI')] = '.';
sample_fields[vcf_format_fields.index('PM')] = '.';
sample_fields[vcf_format_fields.index('PW')] = vcf_columns[9].split(":")[gt_index];
sample_fields[vcf_format_fields.index('PC')] = '.';
vcf_columns[9] = ":".join(sample_fields);
# if VCF contains multiple samples, only output the phased sample
out_cols = vcf_columns[0:9] + [vcf_columns[9]];
vcf_out.write("\t".join(out_cols)+"\n");
vcf_out.close();
os.remove(tmp_out.name);
fun_flush_print(" Compressing and tabix indexing output VCF...");
tabix_cmd = "tabix";
if csi_index == 1: tabix_cmd += " --csi";
#subprocess.check_call("set -euo pipefail && "+"bgzip -f "+args.o+".vcf; "+tabix_cmd+" -f -p vcf "+args.o+".vcf.gz", shell=True, executable='/bin/bash')
subprocess.check_call("set -euo pipefail && " + "bgzip -f " + \
out_prefix + ".vcf; " + tabix_cmd + " -f -p vcf " \
+ out_prefix + ".vcf.gz", shell=True, executable='/bin/bash')
return([unphased_phased, phase_corrections]);
def str_join(joiner,list):
list = map(str, list);
return(joiner.join(list));
def build_haplotypes(input):
dict_variant_overlap = copy.deepcopy(input);
block_haplotypes = [];
total_hap_pool = len(dict_variant_overlap);
remaining_hap_pool = dict_variant_overlap;
set_remaining_hap_pool = set(dict_variant_overlap.keys());
while len(remaining_hap_pool) > 0:
# this will construct many iterations of the same haplotype need to filter it out;
# start the process with a variant pair;
seed_var = remaining_hap_pool.keys()[0];
seed = set([seed_var] + list(remaining_hap_pool[seed_var]));
del remaining_hap_pool[seed_var];
set_remaining_hap_pool.remove(seed_var);
result = build_haplotype_v3(seed,remaining_hap_pool,set_remaining_hap_pool);
new_hap = list(result[0]);
# sort by location
new_hap = sort_var_ids(new_hap);
remaining_hap_pool = result[1];
set_remaining_hap_pool = result[2];
block_haplotypes.append(new_hap);
return(block_haplotypes);
def sort_var_ids(ids):
xsplit = [x.split(args.id_separator) for x in ids];
xsort = sorted(xsplit, key = lambda x: (x[0], int(x[1])))
return([args.id_separator.join(x) for x in xsort]);
def count_hap_junctions(block):
counted = set([]);
reads = [];
for var_index in range(0,len(block)):
for var_allele in range(0,2):
for other_index in range(0, len(block)):
if other_index != var_index:
for other_allele in range(0,2):
if (str(var_index)+":"+str(var_allele)+":"+str(other_index)+":"+str(other_allele) not in counted) and (str(other_index)+":"+str(other_allele)+":"+str(var_index)+":"+str(var_allele) not in counted):
reads += list(dict_variant_reads[block[var_index]]['read_set'][var_allele] & dict_variant_reads[block[other_index]]['read_set'][other_allele]);
counted.add(str(var_index)+":"+str(var_allele)+":"+str(other_index)+":"+str(other_allele));
return([block,len(reads)]);
def count_hap_reads(input):
block = input[0];
configuration = input[1];
parent_block = None;
block_number = None;
if len(input) == 4:
parent_block = input[2];
block_number = input[3];
global dict_variant_reads;
reads = [];
counted = set([]);
# sum up supporting reads between all configs
for var_index in range(0,len(block)):
for other_index in range(0, len(block)):
if other_index != var_index:
if (str(var_index)+":"+str(other_index) not in counted) and (str(other_index)+":"+str(var_index) not in counted):
# the noise test should be done here, only pairs where the signal is above noise should be counted.
reads += list(dict_variant_reads[block[var_index]]['read_set'][int(configuration[var_index])] & dict_variant_reads[block[other_index]]['read_set'][int(configuration[other_index])]);
counted.add(str(var_index)+":"+str(other_index));
return([block, configuration, len(reads), parent_block, block_number]);
def generate_hap_network_all(input):
block = input;
global dict_variant_reads;
reads = [];
counted = set([]);
out_junctions = [];
for var_index in range(0,len(block)):
for other_index in range(0, len(block)):
if other_index != var_index:
for allele_index in range (0,2):
for other_allele_index in range(0,2):
if (str(var_index)+":"+str(allele_index)+":"+str(other_index)+":"+str(other_allele_index) not in counted) and (str(other_index)+":"+str(other_allele_index)+":"+str(var_index)+":"+str(allele_index) not in counted):
junctions = list(dict_variant_reads[block[var_index]]['read_set'][allele_index] & dict_variant_reads[block[other_index]]['read_set'][other_allele_index]);
out_junctions.append([dict_variant_reads[block[var_index]]['id']+":"+dict_variant_reads[block[var_index]]['alleles'][allele_index],dict_variant_reads[block[other_index]]['id']+":"+dict_variant_reads[block[other_index]]['alleles'][other_allele_index], len(junctions), 0]);
out_junctions.append([dict_variant_reads[block[var_index]]['id']+":"+dict_variant_reads[block[var_index]]['alleles'][int(not allele_index)],dict_variant_reads[block[other_index]]['id']+":"+dict_variant_reads[block[other_index]]['alleles'][int(not other_allele_index)], len(junctions), 1]);
counted.add(str(var_index)+":"+str(allele_index)+":"+str(other_index)+":"+str(other_allele_index));
return([out_junctions, block]);
def generate_hap_network(input):
block = input[0];
configuration = input[1];
global dict_variant_reads;
reads = [];
counted = set([]);
out_junctions = [];
# sum up supporting reads between all configs
for var_index in range(0,len(block)):
for other_index in range(0, len(block)):
if other_index != var_index:
if (str(var_index)+":"+str(other_index) not in counted) and (str(other_index)+":"+str(var_index) not in counted):
## SHOULD FIRST CHECK TO MAKE SURE THIS ISN'T A READ PAIR THAT FAILED THE TEST
# actually I don't think this matters, it will always choose the most supported phase
junctions = list(dict_variant_reads[block[var_index]]['read_set'][int(configuration[var_index])] & dict_variant_reads[block[other_index]]['read_set'][int(configuration[other_index])]);
out_junctions.append([dict_variant_reads[block[var_index]]['rsid']+":"+dict_variant_reads[block[var_index]]['alleles'][int(configuration[var_index])],dict_variant_reads[block[other_index]]['rsid']+":"+dict_variant_reads[block[other_index]]['alleles'][int(configuration[other_index])], len(junctions), 0]);
out_junctions.append([dict_variant_reads[block[var_index]]['rsid']+":"+dict_variant_reads[block[var_index]]['alleles'][int(not int(configuration[var_index]))],dict_variant_reads[block[other_index]]['rsid']+":"+dict_variant_reads[block[other_index]]['alleles'][int(not int(configuration[other_index]))], len(junctions), 1]);
counted.add(str(var_index)+":"+str(other_index));
return([out_junctions, block, configuration]);
def get_allele_phase(allele, var_dict):
try:
return(var_dict['phase'].index(allele));
except:
return(float('nan'));
def build_haplotype_v3(set_haplotype, dict_all_associations, set_all_associations):
global args;
overlapping = set_haplotype & set_all_associations;
while len(overlapping) > 0:
for variant in overlapping:
set_haplotype = set_haplotype | dict_all_associations[variant];
del dict_all_associations[variant];
set_all_associations.remove(variant);
overlapping = set_haplotype & set_all_associations;
return([set_haplotype, dict_all_associations, set_all_associations])
def get_var_pos(var_fields):
return(int(var_fields.split(":")[0]));
def list_to_string(xlist,sep=","):
string_out = "";
for item in xlist:
string_out += str(item) + sep;
if len(sep) > 0:
string_out = string_out[:-len(sep)];
return(string_out);
def print_warning(text):
if args.show_warning == 1:
fun_flush_print(text);
def dict_from_info(info_field):
out_dict = collections.OrderedDict()
fields = info_field.split(";");
for field in fields:
sub_field = field.split("=");
if len(sub_field) == 2:
out_dict[sub_field[0]] = sub_field[1];
return(out_dict);
def fun_flush_print(text):
print(text);
sys.stdout.flush();
def fatal_error(text):
fun_flush_print(" FATAL ERROR: "+text);
sys.exit(1)
def print_debug(text):
if args.debug == 1:
fun_flush_print(text);
def pool_split(threads, data):
global args;
data_length = len(data);
pool_input = [];
# calculate pool size if all data is divided by number of threads
optimal_pool_size = data_length/threads;
# unfortunately due to OS limitations the maximum output but a given thread is limited
# so the pool size can't be too enormous
# see : http://bugs.python.org/issue8426
# so limit it to at max some value (set at 100,000 by default)
# this is probably conservative but I haven't checked out what the best number is yet
pool_size = min([args.max_items_per_thread, optimal_pool_size]);
if pool_size > 0:
pool_inputs = data_length / pool_size;
for i in range(0,pool_inputs):
#last pool gets the remaining reads
if i == (pool_inputs-1):
pool_input.append(data[(i*pool_size):]);
else:
pool_input.append(data[(i*pool_size):((i+1)*pool_size)]);
else:
pool_input = [];
return(pool_input);
def pool_setup(pool_input):
global args;
threads = min([len(pool_input),args.threads]);
return (multiprocessing.Pool(processes=threads));
def parallelize(function, pool_input):
global args;
if len(pool_input) > 0:
threads = min([len(pool_input),args.threads]);
if args.threads > 1:
pool = multiprocessing.Pool(processes=threads);
pool_output = pool.map(function, pool_input);
pool.close() # no more tasks
pool.join() # wrap up current tasks
else:
pool_output = [];
for input in pool_input:
pool_output.append(function(input));
else:
pool_output = [];
return(pool_output);
def annotation_to_dict(text,sep=";"):
dict_out = collections.OrderedDict()
vars = text.split(sep);
for var in vars:
if "=" in var:
key = var.split("=")[0];
values = var.split("=")[1];
dict_out[key] = values;
return(dict_out);
def phase_v3(input):
global args;
variants = input[0];
variant_connections = input[1];
allele_connections = input[2];
# first check to see if haplotype is fully concordant
# if it is simply return the haplotype
xhap = resolve_phase(variants, allele_connections);
if xhap != None:
final_blocks = xhap;
else:
# if there is no concordant select phase with most support in terms of connections
# first break up the block if needed into sublocks at weak points
# always split where spanning connections = 1
# then if needed subsequently split at 2, 3, 4, etc..
if args.max_block_size == 0:
xmax = len(variants);
else:
xmax = args.max_block_size;
sub_blocks = split_by_weak(variants, variant_connections, xmax);
# now select the most supported phase in each sub block
if len(sub_blocks) == 1:
sub_block_phases = [sub_block_phase(xvars,allele_connections)for xvars in sub_blocks];
else:
sub_block_phases = [sub_block_phase(xvars,allele_connections,attempt_resolve = True)for xvars in sub_blocks];
# now phase sub blocks relative to each other
# sequentially from the left
split_phases = [];
final_phase = sub_block_phases[0];
split_start = 0;
for i in range(1, len(sub_block_phases)):
step_phases = [final_phase,sub_block_phases[i]];
used_vars = sum([sum([len(y) for y in x]) for x in step_phases]) / 2;
#print(used_vars);
new_phase = sub_block_phase(variants[split_start:split_start+used_vars], allele_connections, step_phases);
# if phasing including the next block includes uncertainty then need to split
if "-" in new_phase[0]:
split_phases+= [final_phase];
split_start = used_vars;
final_phase = sub_block_phases[i];
else:
final_phase = new_phase;
final_blocks = split_phases + [final_phase];
do_print = 1;
out_phase = [];
variant_index = 0;
for block in final_blocks:
out_block = [];
for allele in block[0]:
out_block.append(variants[variant_index]+":"+allele)
variant_index += 1;
if "-" not in out_block[0].split(":")[1]:
out_phase.append(out_block);
return(out_phase);
def resolve_phase(variants, allele_connections, clean_connections = False):
# if needed remove connections from allele_connections that are not in the variant list
if clean_connections == True:
set_variants = set(variants);
cleaned_connections = collections.OrderedDict()
for allele in allele_connections:
variant = allele.split(":")[0];
if variant in set_variants:
cleaned_connections[allele] = set([]);
for connection in allele_connections[allele]:
other_variant = connection.split(":")[0];
if other_variant in variants:
cleaned_connections[allele].add(connection);
allele_connections = cleaned_connections;
remaining_hap_pool = copy.deepcopy(allele_connections);
set_remaining_hap_pool = set(remaining_hap_pool.keys());
seed_var = remaining_hap_pool.keys()[0];
seed = set([seed_var] + list(remaining_hap_pool[seed_var]));
del remaining_hap_pool[seed_var];
set_remaining_hap_pool.remove(seed_var);
result = build_haplotype_v3(seed,remaining_hap_pool,set_remaining_hap_pool);
new_hap = list(result[0]);
if len(new_hap) == len(variants):
output = "";
for xvar in variants:
if xvar+":0" in new_hap:
output += "0";
elif xvar+":1" in new_hap:
output += "1";
return([[output,inverse_conifg(output)]]);
else:
return(None);
def sub_block_phase(variants, allele_connections, sub_block_configs=[], attempt_resolve = False):
if len(sub_block_configs) > 0:
# if we given sub block phases then we are phasing sub blocks against each other
configurations = [];
configurations += [sub_block_configs[0][0] + sub_block_configs[1][0]];
configurations += [sub_block_configs[0][0] + sub_block_configs[1][1]];
configurations += [sub_block_configs[0][1] + sub_block_configs[1][0]];
configurations += [sub_block_configs[0][1] + sub_block_configs[1][1]];
else:
# first try to resolve phase
if attempt_resolve == True:
xhap = resolve_phase(variants, allele_connections, clean_connections = True);
if xhap != None:
return(xhap[0]);
# otherwise determine all possible configurations in this block
configurations = ["".join(seq) for seq in itertools.product("01", repeat=len(variants))];
supporting_connections = collections.OrderedDict()
set_variants = set(variants);
for configuration in configurations:
inverse_config = inverse_conifg(configuration);
# only test each haplotype once, not necessary to test complement
if configuration + "|" + inverse_config not in supporting_connections and inverse_config + "|" + configuration not in supporting_connections:
support = 0;
for variant, allele in zip(variants, configuration):
if allele != "-":
if variant+":"+allele in allele_connections:
for other_variant, other_allele in zip(variants, configuration):
if other_variant != variant:
if other_allele != "-":
if other_variant+":"+other_allele in allele_connections[variant+":"+allele]:
support += 1;
supporting_connections[configuration + "|" + inverse_config] = support;
# select connections with maximum support
max_support = max(supporting_connections.values());
best_configs = [];
for config in supporting_connections.keys():
if supporting_connections[config] == max_support:
best_configs.append(config);
if len(best_configs) == 1:
return(best_configs[0].split("|"));
else:
return(["-"*len(variants),"-"*len(variants)]);
def inverse_conifg(config):
out_config = "";
for allele in config:
if allele != "-":
out_config += str(int(not int(allele)));
else:
out_config += "-";
return(out_config)
def split_by_weak(variants, variant_connections, max_size):
#NOTE THE INPUT VARIANT LIST MUST BE SORTED BY POSITION
weak_points = find_weak_points(variants, variant_connections);
# first always split at points only spanned by one connection, there is no reason not to
haplo_fragments = [];
split_points = [];
split_at = 1;
max_frag = len(variants);
while max_frag > max_size or split_at == 1:
for position in sorted(weak_points.keys()):
if weak_points[position] == split_at:
if position + 1 not in split_points and position - 1 not in split_points:
split_points.append(position);
if len(split_points) > 0:
haplo_fragments = split_variants(variants, split_points);
else:
haplo_fragments = [variants];
max_frag = max([len(x) for x in haplo_fragments]);
split_at += 1;
return(haplo_fragments);
def split_variants(variants, split_points):
split_points = sorted(split_points);
split_variants = [];
for i in range(0,len(split_points)+1):
if i == 0:
split_variants.append(variants[:split_points[i]]);
elif i < len(split_points):
split_variants.append(variants[split_points[i-1]:split_points[i]]);
else:
split_variants.append(variants[split_points[i-1]:]);
return(split_variants);
def find_weak_points(variants, variant_connections):
# this function reports how many connections are crossing each point, where a point is between a pair of variants
# it returns a dictionary with the counts at each point
dict_counts = collections.OrderedDict()
for position in range(2,len(variants)-1):
dict_counts[position] = 0;
for xvar in variant_connections:
for connection in variant_connections[xvar]:
# check if variant spans position
if variants.index(xvar) < (position - 0.5) and variants.index(connection) > (position - 0.5):
dict_counts[position] += 1;
return(dict_counts);
def sample_column_map(path, start_col=9, line_key="#CHR"):
#stream_in = gzip.open(path, "r")
stream_in = gzip.open(path, "rt")
out_map = collections.OrderedDict()
for line in stream_in:
if line_key in line:
#if line.startswith(b'#CHR'):
line = line.rstrip().rstrip('\n').split("\t")
for i in range(start_col,len(line)):
out_map[line[i]] = i
break;
stream_in.close();
return(out_map);
def check_dependency(name):
global devnull;
error_code = subprocess.check_call("set -euo pipefail && "+"which "+name, shell=True, executable='/bin/bash', stdout=devnull)
if error_code == 0:
return(True);
else:
return(False);
''' to monitor memory usage. '''
def current_mem_usage():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.
if __name__ == "__main__":
main();
|
secastel/phaser
|
phaser/phaser.py
|
Python
|
gpl-3.0
| 98,504
|
[
"ASE",
"pysam"
] |
99fe6e83a3b56d781e0b21061d3f4c7f285077e913ddbe36bfea7201dfecd62f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for data_ingestion.py script."""
from __future__ import unicode_literals
__version__ = '$Id$'
import os
from tests import _data_dir
from tests import _images_dir
from tests.aspects import unittest, TestCase, ScriptMainTestCase
from scripts import data_ingestion
class TestPhoto(TestCase):
"""Test Photo class."""
sites = {
'wm-upload': {
'hostname': 'upload.wikimedia.org',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
def setUp(self):
super(TestPhoto, self).setUp()
self.obj = data_ingestion.Photo(URL='http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png',
metadata={'description.en': '"Sounds" icon',
'source': 'http://commons.wikimedia.org/wiki/File:Sound-icon.svg',
'author': 'KDE artists | Silstor',
'license': 'LGPL',
'set': 'Crystal SVG icon set',
'name': 'Sound icon'},
site=self.get_site('commons'))
def test_downloadPhoto(self):
"""Test download from http://upload.wikimedia.org/."""
with open(os.path.join(_images_dir, 'MP_sounds.png'), 'rb') as f:
self.assertEqual(f.read(), self.obj.downloadPhoto().read())
def test_findDuplicateImages(self):
"""Test finding duplicates on Wikimedia Commons."""
duplicates = self.obj.findDuplicateImages()
self.assertIn('MP sounds.png', [dup.replace("_", " ") for dup in duplicates])
def test_getTitle(self):
self.assertEqual(self.obj.getTitle("%(name)s - %(set)s.%(_ext)s"), "Sound icon - Crystal SVG icon set.png")
def test_getDescription(self):
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
"""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
}}""") # noqa
class TestCSVReader(TestCase):
"""Test CSVReader class."""
family = 'commons'
code = 'commons'
def setUp(self):
super(TestCSVReader, self).setUp()
with open(os.path.join(_data_dir, 'csv_ingestion.csv')) as fileobj:
self.iterator = data_ingestion.CSVReader(fileobj, 'url',
site=self.get_site())
self.obj = next(self.iterator)
def test_PhotoURL(self):
self.assertEqual(self.obj.URL, 'http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png')
def test_getTitle(self):
self.assertEqual(self.obj.getTitle("%(name)s - %(set)s.%(_ext)s"), "Sound icon - Crystal SVG icon set.png")
def test_getDescription(self):
self.assertEqual(self.obj.getDescription('CrystalTemplate'),
"""{{CrystalTemplate
|author=KDE artists {{!}} Silstor
|description.en="Sounds" icon
|license=LGPL
|name=Sound icon
|set=Crystal SVG icon set
|source=http://commons.wikimedia.org/wiki/File:Sound-icon.svg
|url=http://upload.wikimedia.org/wikipedia/commons/f/fc/MP_sounds.png
}}""") # noqa
class TestDataIngestionBot(ScriptMainTestCase):
"""Test TestDataIngestionBot class."""
family = 'test'
code = 'test'
def test_existing_file(self):
"""Test uploading a file that already exists."""
data_ingestion.main(
'-csvdir:tests/data',
'-page:User:John_Vandenberg/data_ingestion_test_template')
if __name__ == "__main__":
unittest.main()
|
xZise/pywikibot-core
|
tests/data_ingestion_tests.py
|
Python
|
mit
| 3,809
|
[
"CRYSTAL"
] |
ba9c2c83cbd1e43105c28ce48c9984a7b360d690e4dcb7e656c3b4df1f373501
|
# Copyright 2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.Application related tests for command line application wrappers.
This is intended to check generic things like argument parsing, and
stdin/stdout/stderr handling.
"""
import os
import unittest
from Bio.Application import AbstractCommandline, _Argument
class EchoApp(AbstractCommandline):
def __init__(self, cmd="echo", **kwargs):
self.parameters = [_Argument(["text"], "Text to echo")]
AbstractCommandline.__init__(self, cmd, **kwargs)
class TestApp(unittest.TestCase):
def test_echo(self):
cline = EchoApp(text="Hello World")
stdout, stderr = cline()
self.assertEqual(stderr, "")
self.assertEqual(stdout, "Hello World\n")
def test_echo_capture_both(self):
cline = EchoApp(text="Hello World")
stdout, stderr = cline(stdout=True, stderr=True)
self.assertEqual(stderr, "")
self.assertEqual(stdout, "Hello World\n")
def test_echo_capture_stdout(self):
cline = EchoApp(text="Hello World")
stdout, stderr = cline(stdout=True, stderr=False)
self.assertEqual(stderr, None)
self.assertEqual(stdout, "Hello World\n")
def test_echo_capture_stderr(self):
cline = EchoApp(text="Hello World")
stdout, stderr = cline(stdout=False, stderr=True)
self.assertEqual(stderr, "")
self.assertEqual(stdout, None)
def test_echo_capture_neither(self):
cline = EchoApp(text="Hello World")
stdout, stderr = cline(stdout=False, stderr=False)
self.assertEqual(stderr, None)
self.assertEqual(stdout, None)
def test_echo_file_stdout(self):
cline = EchoApp(text="Hello World")
tmp = "echo_stdout.tmp"
if os.path.isfile(tmp):
os.remove(tmp)
stdout, stderr = cline(stdout=tmp)
self.assertEqual(stderr, "")
self.assertEqual(stdout, None)
self.assertTrue(os.path.isfile(tmp))
with open(tmp) as h:
contents = h.read()
self.assertEqual(contents, "Hello World\n")
os.remove(tmp)
def test_echo_file_stderr(self):
cline = EchoApp(text="Hello World")
tmp = "echo_stderr.tmp"
if os.path.isfile(tmp):
os.remove(tmp)
stdout, stderr = cline(stderr=tmp)
self.assertEqual(stderr, None)
self.assertEqual(stdout, "Hello World\n")
self.assertTrue(os.path.isfile(tmp))
with open(tmp) as h:
contents = h.read()
self.assertEqual(contents, "")
os.remove(tmp)
def test_echo_file_same(self):
cline = EchoApp(text="Hello World")
tmp = "echo_stdout_stderr.tmp"
if os.path.isfile(tmp):
os.remove(tmp)
stdout, stderr = cline(stdout=tmp, stderr=tmp)
self.assertEqual(stderr, None)
self.assertEqual(stdout, None)
self.assertTrue(os.path.isfile(tmp))
with open(tmp) as h:
contents = h.read()
self.assertEqual(contents, "Hello World\n") # stdout + stderr
os.remove(tmp)
def test_echo_file_both(self):
cline = EchoApp(text="Hello World")
tmp = "echo_stdout.tmp"
if os.path.isfile(tmp):
os.remove(tmp)
tmp2 = "echo_stderr.tmp"
if os.path.isfile(tmp2):
os.remove(tmp2)
stdout, stderr = cline(stdout=tmp, stderr=tmp2)
self.assertEqual(stderr, None)
self.assertEqual(stdout, None)
self.assertTrue(os.path.isfile(tmp), tmp)
with open(tmp) as h:
contents = h.read()
self.assertEqual(contents, "Hello World\n") # stdout
os.remove(tmp)
self.assertTrue(os.path.isfile(tmp2), tmp2)
with open(tmp2) as h:
contents = h.read()
self.assertEqual(contents, "") # stderr
os.remove(tmp2)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_Application.py
|
Python
|
gpl-2.0
| 4,176
|
[
"Biopython"
] |
26482f4e33d1f5c7056c55a753bf54ee889ed37bab980cecf4cb37c579c633a7
|
'''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
from PySide.QtGui import QDialog, QFileDialog, QDialogButtonBox, QAbstractItemView, QTableWidgetItem
from PySide.QtCore import Qt
from mapclientplugins.mayaviviewerstep.widgets.ui_mayaviviewerwidget import Ui_Dialog
from traits.api import HasTraits, Instance, on_trait_change, \
Int, Dict
# from mayaviviewerobjects import colours, MayaviViewerObjectsContainer
from mappluginutils.mayaviviewer.mayaviviewerobjects import colours, MayaviViewerObjectsContainer
class MayaviViewerWidget(QDialog):
'''
Configure dialog to present the user with the options to configure this step.
'''
GFD = [10,10]
displayGFNodes = True
defaultColor = colours['bone']
objectTableHeaderColumns = {'visible':0, 'type':1}
mergeGFVertices = False
backgroundColour = (0.0,0.0,0.0)
def __init__(self, viewerObjects, parent=None):
'''
Constructor
'''
QDialog.__init__(self, parent)
self._ui = Ui_Dialog()
self._ui.setupUi(self)
# self._view = self._ui.MayaviScene.visualisation.view
self._scene = self._ui.MayaviScene.visualisation.scene
self._scene.background = self.backgroundColour
if isinstance(viewerObjects, MayaviViewerObjectsContainer):
self._objects = viewerObjects # models, point clouds, tri-mesh, measurements etc to be rendered {name:(type, object)}
else:
raise TypeError, 'viewerObject must be a MayaviViewerObjects instance'
self._makeConnections()
self._initialiseObjectTable()
self._refresh()
self.selectedObjectName = None
# self.testPlot()
# self.drawObjects()
def _makeConnections(self):
self._ui.tableWidget.itemClicked.connect(self._tableItemClicked)
self._ui.tableWidget.itemChanged.connect(self._visibleBoxChanged)
self._ui.screenshotSaveButton.clicked.connect(self._saveScreenShot)
self._ui.slicePlaneRadioX.toggled.connect(self._slicePlaneXToggled)
self._ui.slicePlaneRadioY.toggled.connect(self._slicePlaneYToggled)
self._ui.slicePlaneRadioZ.toggled.connect(self._slicePlaneZToggled)
self._ui.closeButton.clicked.connect(self._close)
def _initialiseObjectTable(self):
self._ui.tableWidget.setRowCount(self._objects.getNumberOfObjects())
self._ui.tableWidget.verticalHeader().setVisible(False)
self._ui.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self._ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self._ui.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection)
row = 0
for name in self._objects.getObjectNames():
obj = self._objects.getObject(name)
self._addObjectToTable(row, name, obj)
row += 1
print row, name
self._ui.tableWidget.resizeColumnToContents(self.objectTableHeaderColumns['visible'])
self._ui.tableWidget.resizeColumnToContents(self.objectTableHeaderColumns['type'])
def _addObjectToTable(self, row, name, obj):
typeName = obj.typeName
print typeName
print name
tableItem = QTableWidgetItem(name)
tableItem.setCheckState(Qt.Checked)
self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['visible'], tableItem)
self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['type'], QTableWidgetItem(typeName))
def _tableItemClicked(self):
selectedRow = self._ui.tableWidget.currentRow()
self.selectedObjectName = self._ui.tableWidget.item(selectedRow, self.objectTableHeaderColumns['visible']).text()
self._populateScalarsDropDown(self.selectedObjectName)
print selectedRow
print self.selectedObjectName
obj = self._objects.getObject(self.selectedObjectName)
# enable/disable image plane toggles if gias scan is selected
if obj.typeName=='giasscan':
self._ui.slicePlaneRadioX.setEnabled(True)
self._ui.slicePlaneRadioY.setEnabled(True)
self._ui.slicePlaneRadioZ.setEnabled(True)
else:
self._ui.slicePlaneRadioX.setEnabled(False)
self._ui.slicePlaneRadioY.setEnabled(False)
self._ui.slicePlaneRadioZ.setEnabled(False)
def _visibleBoxChanged(self, tableItem):
# get name of object selected
# name = self._getSelectedObjectName()
# checked changed item is actually the checkbox
if tableItem.column()==self.objectTableHeaderColumns['visible']:
# get visible status
name = tableItem.text()
visible = tableItem.checkState().name=='Checked'
print 'visibleboxchanged name', name
print 'visibleboxchanged visible', visible
# toggle visibility
obj = self._objects.getObject(name)
print obj.name
if obj.sceneObject:
print 'changing existing visibility'
obj.setVisibility(visible)
else:
print 'drawing new'
obj.draw(self._scene)
def _populateScalarsDropDown(self, objectName):
pass
def _scalarSelectionChanged(self):
name = self._getSelectedObjectName()
scalarName = self._getSelectedScalarName()
self._objects.getObject(name).updateScalar(scalarName, self._scene)
def _getSelectedObjectName(self):
return self.selectedObjectName
def _getSelectedScalarName(self):
return 'none'
def drawObjects(self):
for name in self._objects.getObjectNames():
self._objects.getObject(name).draw(self._scene)
def _close(self):
for name in self._objects.getObjectNames():
self._objects.getObject(name).remove()
self._objects._objects = {}
self._objects == None
# for r in xrange(self._ui.tableWidget.rowCount()):
# self._ui.tableWidget.removeRow(r)
def _refresh(self):
for r in xrange(self._ui.tableWidget.rowCount()):
tableItem = self._ui.tableWidget.item(r, self.objectTableHeaderColumns['visible'])
name = tableItem.text()
visible = tableItem.checkState().name=='Checked'
obj = self._objects.getObject(name)
print obj.name
if obj.sceneObject:
print 'changing existing visibility'
obj.setVisibility(visible)
else:
print 'drawing new'
obj.draw(self._scene)
def _saveScreenShot(self):
filename = self._ui.screenshotFilenameLineEdit.text()
width = int(self._ui.screenshotPixelXLineEdit.text())
height = int(self._ui.screenshotPixelYLineEdit.text())
self._scene.mlab.savefig( filename, size=( width, height ) )
def _slicePlaneXToggled(self, checked):
name = self._getSelectedObjectName()
obj = self._objects.getObject(name)
if checked:
obj.changeSlicePlane('x_axes')
def _slicePlaneYToggled(self, checked):
name = self._getSelectedObjectName()
obj = self._objects.getObject(name)
if checked:
obj.changeSlicePlane('y_axes')
def _slicePlaneZToggled(self, checked):
name = self._getSelectedObjectName()
obj = self._objects.getObject(name)
if checked:
obj.changeSlicePlane('z_axes')
#================================================================#
@on_trait_change('scene.activated')
def testPlot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
print 'trait_changed'
# We can do normal mlab calls on the embedded scene.
self._scene.mlab.test_points3d()
# def _saveImage_fired( self ):
# self.scene.mlab.savefig( str(self.saveImageFilename), size=( int(self.saveImageWidth), int(self.saveImageLength) ) )
|
MusculoskeletalAtlasProject/mapclient-tests
|
test_resources/updater_test/mayaviviewerstep-master/mapclientplugins/mayaviviewerstep/widgets/mayaviviewerwidget.py
|
Python
|
apache-2.0
| 8,972
|
[
"VTK"
] |
afa245459035dc4e27d1d33e977c7783cdad4cd87e0de4fd8beb84cba67bc047
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Lint as: python3
"""A simple tool to generate associated code from parse_tree.h.
Generates the following files from parse_tree.h:
parse_tree_visitor.h
parse_tree_decls.h
parse_tree_accept_methods.inc
"""
import re
import sys
import textwrap
def GetClasses(input_filename):
"""Computes the set of classes for AST objects in the given file.
Args:
input_filename: The input file.
Returns:
A pair (concrete_classes, abstract_classes) where concrete_classes is a list
of all final (concrete) class names in the input file, and abstract_classes
is a list of non-final class names.
"""
concrete_classes = []
abstract_classes = ['ASTNode']
for input_line in open(input_filename):
m = re.search('^class (AST[a-zA-Z]*) final : public', input_line)
if m:
concrete_classes.append(m.group(1))
m = re.search('^class (AST[a-zA-Z]*) : public', input_line)
if m:
abstract_classes.append(m.group(1))
return (concrete_classes, abstract_classes)
def GenerateParseTreeVisitor(concrete_classes):
"""Generates parse_tree_visitor.h contents containing ParseTreeVisitor class.
Args:
concrete_classes: a list of classes for which to generate visit methods
Yields:
A string part of the output code.
"""
yield textwrap.dedent('''\
#ifndef STORAGE_ZETASQL_PARSER_PARSE_TREE_VISITOR_H_
#define STORAGE_ZETASQL_PARSER_PARSE_TREE_VISITOR_H_
#include "zetasql/parser/parse_tree.h"
#include "zetasql/parser/visit_result.h"
namespace zetasql {
class ParseTreeVisitor {
public:
virtual ~ParseTreeVisitor() {}
virtual void visit(const ASTNode *node, void* data) = 0;
''')
for cls in concrete_classes:
yield (' virtual void visit{0}(const {0}* node, void* data) = 0;\n\n'
.format(cls))
yield textwrap.dedent('''\
};
class DefaultParseTreeVisitor : public ParseTreeVisitor {
public:
virtual void defaultVisit(const ASTNode* node, void* data) = 0;
void visit(const ASTNode* node, void* data) override {
defaultVisit(node, data);
}
''')
for cls in concrete_classes:
yield (
' void visit{0}(const {0}* node, void* data) override {{\n' + #
' defaultVisit(node, data);\n' + #
' }}\n' + #
'\n').format(cls)
yield textwrap.dedent('''\
};
class NonRecursiveParseTreeVisitor {
public:
virtual ~NonRecursiveParseTreeVisitor() {}
virtual absl::StatusOr<VisitResult> defaultVisit(const ASTNode* node) = 0;
absl::StatusOr<VisitResult> visit(const ASTNode* node) {
return defaultVisit(node);
}
''')
for cls in concrete_classes:
yield ((' virtual absl::StatusOr<VisitResult> visit{0}(const {0}* node) ' +
'{{return defaultVisit(node);}};\n\n').format(cls))
yield textwrap.dedent('''\
};
} // namespace zetasql
#endif // STORAGE_ZETASQL_PARSER_PARSE_TREE_VISITOR_H_
''')
def GenerateParseTreeDecls(concrete_classes,
abstract_classes):
"""Generates parse_tree_decls.h contents containing forward declarations.
Args:
concrete_classes: a list of classes for which to generate declarations
abstract_classes: a list of classes for which to generate declarations
Yields:
A string part of the output code.
"""
yield textwrap.dedent('''\
#ifndef STORAGE_ZETASQL_PARSER_PARSE_TREE_DECLS_H
#define STORAGE_ZETASQL_PARSER_PARSE_TREE_DECLS_H
namespace zetasql {
''')
for cls in abstract_classes + concrete_classes:
yield 'class {0};\n'.format(cls)
yield textwrap.dedent('''\
} // namespace zetasql
#endif // STORAGE_ZETASQL_PARSER_PARSE_TREE_DECLS_H
''')
def GeneerateParseTreeAcceptMethods(
concrete_classes):
"""Generates parse_tree_accept_methods.inc contents containing Accept methods.
Args:
concrete_classes: a list of classes for which to generate Accept methods
Yields:
A string part of the output code.
"""
yield textwrap.dedent('''\
#include "zetasql/parser/parse_tree.h"
namespace zetasql {
''')
for cls in concrete_classes:
yield textwrap.dedent('''\
void {0}::Accept(ParseTreeVisitor* visitor, void* data) const {{
visitor->visit{0}(this, data);
}}
''').format(cls)
for cls in concrete_classes:
yield textwrap.dedent('''\
absl::StatusOr<VisitResult> {0}::Accept(NonRecursiveParseTreeVisitor* visitor) const {{
return visitor->visit{0}(this);
}}
''').format(cls)
yield '} // namespace zetasql\n'
def ToFile(output_filename, data):
"""Writes a sequence of strings to a file.
Args:
output_filename: the name (and path) of the file to write.
data: a sequence of strings to write to the file.
"""
with open(output_filename, 'w') as output:
for chunk in data:
output.write(chunk)
def main(argv):
if len(argv) != 5:
raise Exception(
'Usage: %s <input/path/to/parse_tree_generated.h> <output/path/to/parse_tree_visitor.h> <output/path/to/parse_tree_decls.h> <output/path/to/parse_tree_accept_methods.inc>'
)
(concrete_classes, abstract_classes) = GetClasses(argv[1])
ToFile(argv[2], GenerateParseTreeVisitor(concrete_classes))
ToFile(argv[3], GenerateParseTreeDecls(concrete_classes, abstract_classes))
ToFile(argv[4], GeneerateParseTreeAcceptMethods(concrete_classes))
if __name__ == '__main__':
main(sys.argv)
|
google/zetasql
|
zetasql/parser/gen_extra_files.py
|
Python
|
apache-2.0
| 6,124
|
[
"VisIt"
] |
054158911a8c6221553e09f3fddf44c5b8265831942f4a23f52209eb82db42fb
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.