code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""wxpython_kit package driver file.
Inserts the following modules in sys.modules: wx.
@author: Charl P. Botha <http://cpbotha.net/>
"""
# you have to define this
VERSION = ''
def init(theModuleManager, pre_import=True):
# import the main module itself
global wx
import wx
import dvedit_window
import dvshell
import python_shell_mixin
import python_shell
import utils
# build up VERSION
global VERSION
VERSION = wx.VERSION_STRING
theModuleManager.setProgress(100, 'Initialising wx_kit')
| Python |
medical_image_properties_keywords = [
'PatientName',
'PatientID',
'PatientAge',
'PatientSex',
'PatientBirthDate',
'ImageDate',
'ImageTime',
'ImageNumber',
'StudyDescription',
'StudyID',
'StudyDate',
'AcquisitionDate',
'SeriesNumber',
'SeriesDescription',
'Modality',
'ManufacturerModelName',
'Manufacturer',
'StationName',
'InstitutionName',
'ConvolutionKernel',
'SliceThickness',
'KVP',
'GantryTilt',
'EchoTime',
'EchoTrainLength',
'RepetitionTime',
'ExposureTime',
'XRayTubeCurrent'
]
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
"""Utility methods for vtk_kit module kit.
@author Charl P. Botha <http://cpbotha.net/>
"""
import vtk
class DVOrientationWidget:
"""Convenience class for embedding orientation widget in any
renderwindowinteractor. If the data has DeVIDE style orientation
metadata, this class will show the little LRHFAP block, otherwise
x-y-z cursor.
"""
def __init__(self, rwi):
self._orientation_widget = vtk.vtkOrientationMarkerWidget()
self._orientation_widget.SetInteractor(rwi)
# we'll use this if there is no orientation metadata
# just a thingy with x-y-z indicators
self._axes_actor = vtk.vtkAxesActor()
# we'll use this if there is orientation metadata
self._annotated_cube_actor = aca = vtk.vtkAnnotatedCubeActor()
# configure the thing with better colours and no stupid edges
#aca.TextEdgesOff()
aca.GetXMinusFaceProperty().SetColor(1,0,0)
aca.GetXPlusFaceProperty().SetColor(1,0,0)
aca.GetYMinusFaceProperty().SetColor(0,1,0)
aca.GetYPlusFaceProperty().SetColor(0,1,0)
aca.GetZMinusFaceProperty().SetColor(0,0,1)
aca.GetZPlusFaceProperty().SetColor(0,0,1)
def close(self):
self.set_input(None)
self._orientation_widget.SetInteractor(None)
def set_input(self, input_data):
if input_data is None:
self._orientation_widget.Off()
return
ala = input_data.GetFieldData().GetArray('axis_labels_array')
if ala:
lut = list('LRPAFH')
labels = []
for i in range(6):
labels.append(lut[ala.GetValue(i)])
self._set_annotated_cube_actor_labels(labels)
self._orientation_widget.Off()
self._orientation_widget.SetOrientationMarker(
self._annotated_cube_actor)
self._orientation_widget.On()
else:
self._orientation_widget.Off()
self._orientation_widget.SetOrientationMarker(
self._axes_actor)
self._orientation_widget.On()
def _set_annotated_cube_actor_labels(self, labels):
aca = self._annotated_cube_actor
aca.SetXMinusFaceText(labels[0])
aca.SetXPlusFaceText(labels[1])
aca.SetYMinusFaceText(labels[2])
aca.SetYPlusFaceText(labels[3])
aca.SetZMinusFaceText(labels[4])
aca.SetZPlusFaceText(labels[5])
###########################################################################
def vtkmip_copy(src, dst):
"""Given two vtkMedicalImageProperties instances, copy all
attributes from the one to the other.
Rather use vtkMedicalImageProperties.DeepCopy.
"""
import module_kits.vtk_kit as vk
mip_kw = vk.constants.medical_image_properties_keywords
for kw in mip_kw:
# get method objects for the getter and the setter
gmo = getattr(src, 'Get%s' % (kw,))
smo = getattr(dst, 'Set%s' % (kw,))
# from the get to the set!
smo(gmo())
def setup_renderers(renwin, fg_ren, bg_ren):
"""Utility method to configure foreground and background renderer
and insert them into different layers of the renderenwinindow.
Use this if you want an incredibly cool gradient background!
"""
# bit of code thanks to
# http://www.bioengineering-research.com/vtk/BackgroundGradient.tcl
# had to change approach though to using background renderer,
# else transparent objects don't appear, and adding flat
# shaded objects breaks the background gradient.
# =================================================================
qpts = vtk.vtkPoints()
qpts.SetNumberOfPoints(4)
qpts.InsertPoint(0, 0, 0, 0)
qpts.InsertPoint(1, 1, 0, 0)
qpts.InsertPoint(2, 1, 1, 0)
qpts.InsertPoint(3, 0, 1, 0)
quad = vtk.vtkQuad()
quad.GetPointIds().SetId(0,0)
quad.GetPointIds().SetId(1,1)
quad.GetPointIds().SetId(2,2)
quad.GetPointIds().SetId(3,3)
uc = vtk.vtkUnsignedCharArray()
uc.SetNumberOfComponents(4)
uc.SetNumberOfTuples(4)
uc.SetTuple4(0, 128, 128, 128, 255) # bottom left RGBA
uc.SetTuple4(1, 128, 128, 128, 255) # bottom right RGBA
uc.SetTuple4(2, 255, 255, 255, 255) # top right RGBA
uc.SetTuple4(3, 255, 255, 255, 255) # tob left RGBA
dta = vtk.vtkPolyData()
dta.Allocate(1,1)
dta.InsertNextCell(quad.GetCellType(), quad.GetPointIds())
dta.SetPoints(qpts)
dta.GetPointData().SetScalars(uc)
coord = vtk.vtkCoordinate()
coord.SetCoordinateSystemToNormalizedDisplay()
mapper2d = vtk.vtkPolyDataMapper2D()
mapper2d.SetInput(dta)
mapper2d.SetTransformCoordinate(coord)
actor2d = vtk.vtkActor2D()
actor2d.SetMapper(mapper2d)
actor2d.GetProperty().SetDisplayLocationToBackground()
bg_ren.AddActor(actor2d)
bg_ren.SetLayer(0) # seems to be background
bg_ren.SetInteractive(0)
fg_ren.SetLayer(1) # and foreground
renwin.SetNumberOfLayers(2)
renwin.AddRenderer(fg_ren)
renwin.AddRenderer(bg_ren)
| Python |
# $Id$
"""Mixins that are useful for classes using vtk_kit.
@author: Charl P. Botha <http://cpbotha.net/>
"""
from external.vtkPipeline.ConfigVtkObj import ConfigVtkObj
from external.vtkPipeline.vtkMethodParser import VtkMethodParser
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin # temporary
import module_utils # temporary, most of this should be in utils.
import re
import types
import utils
#########################################################################
class PickleVTKObjectsModuleMixin(object):
"""This mixin will pickle the state of all vtk objects whose binding
attribute names have been added to self._vtkObjects, e.g. if you have
a self._imageMath, '_imageMath' should be in the list.
Your module has to derive from module_base as well so that it has a
self._config!
Remember to call the __init__ of this class with the list of attribute
strings representing vtk objects that you want pickled. All the objects
have to exist and be initially configured by then.
Remember to call close() when your child class close()s.
"""
def __init__(self, vtkObjectNames):
# you have to add the NAMES of the objects that you want pickled
# to this list.
self._vtkObjectNames = vtkObjectNames
self.statePattern = re.compile ("To[A-Z0-9]")
# make sure that the state of the vtkObjectNames objects is
# encapsulated in the initial _config
self.logic_to_config()
def close(self):
# make sure we get rid of these bindings as well
del self._vtkObjectNames
def logic_to_config(self):
parser = VtkMethodParser()
for vtkObjName in self._vtkObjectNames:
# pickled data: a list with toggle_methods, state_methods and
# get_set_methods as returned by the vtkMethodParser. Each of
# these is a list of tuples with the name of the method (as
# returned by the vtkMethodParser) and the value; in the case
# of the stateMethods, we use the whole stateGroup instead of
# just a single name
vtkObjPD = [[], [], []]
vtkObj = getattr(self, vtkObjName)
parser.parse_methods(vtkObj)
# parser now has toggle_methods(), state_methods() and
# get_set_methods();
# toggle_methods: ['BlaatOn', 'AbortExecuteOn']
# state_methods: [['SetBlaatToOne', 'SetBlaatToTwo'],
# ['SetMaatToThree', 'SetMaatToFive']]
# get_set_methods: ['NumberOfThreads', 'Progress']
for method in parser.toggle_methods():
# if you query ReleaseDataFlag on a filter with 0 outputs,
# VTK yields an error
if vtkObj.GetNumberOfOutputPorts() == 0 and \
method == 'ReleaseDataFlagOn':
continue
# we need to snip the 'On' off
val = eval("vtkObj.Get%s()" % (method[:-2],))
vtkObjPD[0].append((method, val))
for stateGroup in parser.state_methods():
# we search up to the To
end = self.statePattern.search (stateGroup[0]).start ()
# so we turn SetBlaatToOne to GetBlaat
get_m = 'G'+stateGroup[0][1:end]
# we're going to have to be more clever when we set_config...
# use a similar trick to get_state in vtkMethodParser
val = eval('vtkObj.%s()' % (get_m,))
vtkObjPD[1].append((stateGroup, val))
for method in parser.get_set_methods():
val = eval('vtkObj.Get%s()' % (method,))
vtkObjPD[2].append((method, val))
# finally set the pickle data in the correct position
setattr(self._config, vtkObjName, vtkObjPD)
def config_to_logic(self):
# go through at least the attributes in self._vtkObjectNames
for vtkObjName in self._vtkObjectNames:
try:
vtkObjPD = getattr(self._config, vtkObjName)
vtkObj = getattr(self, vtkObjName)
except AttributeError:
print "PickleVTKObjectsModuleMixin: %s not available " \
"in self._config OR in self. Skipping." % (vtkObjName,)
else:
for method, val in vtkObjPD[0]:
if val:
eval('vtkObj.%s()' % (method,))
else:
# snip off the On
eval('vtkObj.%sOff()' % (method[:-2],))
for stateGroup, val in vtkObjPD[1]:
# keep on calling the methods in stategroup until
# the getter returns a value == val.
end = self.statePattern.search(stateGroup[0]).start()
getMethod = 'G'+stateGroup[0][1:end]
for i in range(len(stateGroup)):
m = stateGroup[i]
eval('vtkObj.%s()' % (m,))
tempVal = eval('vtkObj.%s()' % (getMethod,))
if tempVal == val:
# success! break out of the for loop
break
for method, val in vtkObjPD[2]:
try:
eval('vtkObj.Set%s(val)' % (method,))
except TypeError:
if type(val) in [types.TupleType, types.ListType]:
# sometimes VTK wants the separate elements
# and not the tuple / list
eval("vtkObj.Set%s(*val)"%(method,))
else:
# re-raise the exception if it wasn't a
# tuple/list
raise
#########################################################################
# note that the pickle mixin comes first, as its config_to_logic/logic_to_config
# should be chosen over that of noConfig
class SimpleVTKClassModuleBase(PickleVTKObjectsModuleMixin,
IntrospectModuleMixin,
ModuleBase):
"""Use this base to make a DeVIDE module that wraps a single VTK
object. The state of the VTK object will be saved when the network
is.
You only have to override the __init__ method and call the __init__
of this class with the desired parameters.
The __doc__ string of your module class will be replaced with the
__doc__ string of the encapsulated VTK class (and will thus be
shown if the user requests module help). If you don't want this,
call the ctor with replaceDoc=False.
inputFunctions is a list of the complete methods that have to be called
on the encapsulated VTK class, e.g. ['SetInput1(inputStream)',
'SetInput1(inputStream)']. The same goes for outputFunctions, except that
there's no inputStream involved. Use None in both cases if you want
the default to be used (SetInput(), GetOutput()).
"""
def __init__(self, module_manager, vtkObjectBinding, progressText,
inputDescriptions, outputDescriptions,
replaceDoc=True,
inputFunctions=None, outputFunctions=None):
self._viewFrame = None
self._configVtkObj = None
# first these two mixins
ModuleBase.__init__(self, module_manager)
self._theFilter = vtkObjectBinding
if replaceDoc:
myMessage = "<em>"\
"This is a special DeVIDE module that very simply " \
"wraps a single VTK class. In general, the " \
"complete state of the class will be saved along " \
"with the rest of the network. The documentation " \
"below is that of the wrapped VTK class:</em>"
self.__doc__ = '%s\n\n%s' % (myMessage, self._theFilter.__doc__)
# now that we have the object, init the pickle mixin so
# that the state of this object will be saved
PickleVTKObjectsModuleMixin.__init__(self, ['_theFilter'])
# make progress hooks for the object
module_utils.setup_vtk_object_progress(self, self._theFilter,
progressText)
self._inputDescriptions = inputDescriptions
self._outputDescriptions = outputDescriptions
self._inputFunctions = inputFunctions
self._outputFunctions = outputFunctions
def _createViewFrame(self):
parentWindow = self._module_manager.get_module_view_parent_window()
import resources.python.defaultModuleViewFrame
reload(resources.python.defaultModuleViewFrame)
dMVF = resources.python.defaultModuleViewFrame.defaultModuleViewFrame
viewFrame = module_utils.instantiate_module_view_frame(
self, self._module_manager, dMVF)
# ConfigVtkObj parent not important, we're passing frame + panel
# this should populate the sizer with a new sizer7
# params: noParent, noRenwin, vtk_obj, frame, panel
self._configVtkObj = ConfigVtkObj(None, None,
self._theFilter,
viewFrame, viewFrame.viewFramePanel)
module_utils.create_standard_object_introspection(
self, viewFrame, viewFrame.viewFramePanel,
{'Module (self)' : self}, None)
# we don't want the Execute button to be default... else stuff gets
# executed with every enter in the command window (at least in Doze)
module_utils.create_eoca_buttons(self, viewFrame,
viewFrame.viewFramePanel,
False)
self._viewFrame = viewFrame
return viewFrame
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
PickleVTKObjectsModuleMixin.close(self)
IntrospectModuleMixin.close(self)
if self._viewFrame is not None:
self._configVtkObj.close()
self._viewFrame.Destroy()
ModuleBase.close(self)
# get rid of our binding to the vtkObject
del self._theFilter
def get_output_descriptions(self):
return self._outputDescriptions
def get_output(self, idx):
# this will only every be invoked if your get_output_descriptions has
# 1 or more elements
if self._outputFunctions:
return eval('self._theFilter.%s' % (self._outputFunctions[idx],))
else:
return self._theFilter.GetOutput()
def get_input_descriptions(self):
return self._inputDescriptions
def set_input(self, idx, inputStream):
# this will only be called for a certain idx if you've specified that
# many elements in your get_input_descriptions
if self._inputFunctions:
exec('self._theFilter.%s' %
(self._inputFunctions[idx]))
else:
if idx == 0:
self._theFilter.SetInput(inputStream)
else:
self._theFilter.SetInput(idx, inputStream)
def execute_module(self):
# it could be a writer, in that case, call the Write method.
if hasattr(self._theFilter, 'Write') and \
callable(self._theFilter.Write):
self._theFilter.Write()
else:
self._theFilter.Update()
def streaming_execute_module(self):
"""All VTK classes should be streamable.
"""
# it could be a writer, in that case, call the Write method.
if hasattr(self._theFilter, 'Write') and \
callable(self._theFilter.Write):
self._theFilter.Write()
else:
self._theFilter.Update()
def view(self):
if self._viewFrame is None:
# we have an initial config populated with stuff and in sync
# with theFilter. The viewFrame will also be in sync with the
# filter
self._viewFrame = self._createViewFrame()
self._viewFrame.Show(True)
self._viewFrame.Raise()
def config_to_view(self):
# the pickleVTKObjectsModuleMixin does logic <-> config
# so when the user clicks "sync", logic_to_config is called
# which transfers picklable state from the LOGIC to the CONFIG
# then we do double the work and call update_gui, which transfers
# the same state from the LOGIC straight up to the VIEW
self._configVtkObj.update_gui()
def view_to_config(self):
# same thing here: user clicks "apply", view_to_config is called which
# zaps UI changes straight to the LOGIC. Then we have to call
# logic_to_config explicitly which brings the info back up to the
# config... i.e. view -> logic -> config
# after that, config_to_logic is called which transfers all state AGAIN
# from the config to the logic
self._configVtkObj.apply_changes()
self.logic_to_config()
#########################################################################
| Python |
# $Id$
"""Miscellaneous functions that are part of the vtk_kit.
This module is imported by vtk_kit.init() after the rest of the vtk_kit has
been initialised. To use these functions in your module code, do e.g.:
import moduleKits; moduleKits.vtk_kit.misc.flatterProp3D(obj);
@author: Charl P. Botha <http://cpbotha.net/>
"""
# this import does no harm; we go after the rest of vtk_kit has been
# initialised
import vtk
def flattenProp3D(prop3D):
"""Get rid of the UserTransform() of an actor by integrating it with
the 'main' matrix.
"""
if not prop3D.GetUserTransform():
# no flattening here, move along
return
# get the current "complete" matrix (combining internal and user)
currentMatrix = vtk.vtkMatrix4x4()
prop3D.GetMatrix(currentMatrix)
# apply it to a transform
currentTransform = vtk.vtkTransform()
currentTransform.Identity()
currentTransform.SetMatrix(currentMatrix)
# zero the prop3D UserTransform
prop3D.SetUserTransform(None)
# and set the internal matrix of the prop3D
prop3D.SetPosition(currentTransform.GetPosition())
prop3D.SetScale(currentTransform.GetScale())
prop3D.SetOrientation(currentTransform.GetOrientation())
# we should now also be able to zero the origin
#prop3D.SetOrigin(0,0,0)
def planePlaneIntersection(
planeNormal0, planeOrigin0, planeNormal1, planeOrigin1):
"""Given two plane definitions, determine the intersection line using
the method on page 233 of Graphics Gems III: 'Plane-to-Plane Intersection'
Returns tuple with lineOrigin and lineVector.
"""
# convert planes to Hessian form first:
# http://mathworld.wolfram.com/HessianNormalForm.html
# calculate p, orthogonal distance from the plane to the origin
p0 = - vtk.vtkMath.Dot(planeOrigin0, planeNormal0)
p1 = - vtk.vtkMath.Dot(planeOrigin1, planeNormal1)
# we already have n, the planeNormal
# calculate cross product
L = [0.0, 0.0, 0.0]
vtk.vtkMath.Cross(planeNormal0, planeNormal1, L)
absL = [abs(e) for e in L]
maxAbsL = max(absL)
if maxAbsL == 0.0:
raise ValueError, "Planes are almost parallel."
w = absL.index(maxAbsL)
Lw = L[w]
# we're going to set the maxLidx'th component of our lineOrigin (i.e.
# any point on the line) to 0
P = [0.0, 0.0, 0.0]
# we want either [0, 1], [1, 2] or [2, 0]
if w == 0:
u = 1
v = 2
elif w == 1:
u = 2
v = 0
else:
u = 0
v = 1
P[u] = (planeNormal0[v] * p1 - planeNormal1[v] * p0) / float(Lw)
P[v] = (planeNormal1[u] * p0 - planeNormal0[u] * p1) / float(Lw)
P[w] = 0 # just for completeness
vtk.vtkMath.Normalize(L)
return (P, L)
| Python |
# perceptually linear colour scales based on those published by Haim
# Levkowitz at http://www.cs.uml.edu/~haim/ColorCenter/
# code by Peter R. Krekel (c) 2009
# modified by Charl Botha to cache lookuptable per range
import vtk
class ColorScales():
def __init__(self):
self.BlueToYellow = {}
self.Linear_Heat = {}
self.Linear_BlackToWhite = {}
self.Linear_BlueToYellow = {}
def LUT_BlueToYellow(self, LUrange):
key = tuple(LUrange)
try:
return self.BlueToYellow[key]
except KeyError:
pass
differencetable = vtk.vtkLookupTable()
differencetable.SetNumberOfColors(50)
differencetable.SetRange(LUrange[0], LUrange[1])
i=0.0
while i<50:
differencetable.SetTableValue( int(i), (i/50.0, i/50.0, 1-(i/50.0) ,1.0))
i=i+1
differencetable.Build()
self.BlueToYellow[key] = differencetable
return self.BlueToYellow[key]
def LUT_Linear_Heat(self, LUrange):
key = tuple(LUrange)
try:
return self.Linear_Heat[key]
except KeyError:
pass
L1table = vtk.vtkLookupTable()
L1table.SetRange(LUrange[0], LUrange[1])
L1table.SetNumberOfColors(256)
L1table.SetTableValue( 0 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 1 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 2 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 3 , ( 0.004 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 4 , ( 0.008 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 5 , ( 0.008 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 6 , ( 0.012 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 7 , ( 0.012 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 8 , ( 0.016 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 9 , ( 0.020 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 10 , ( 0.020 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 11 , ( 0.024 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 12 , ( 0.027 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 13 , ( 0.027 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 14 , ( 0.031 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 15 , ( 0.035 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 16 , ( 0.035 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 17 , ( 0.039 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 18 , ( 0.043 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 19 , ( 0.047 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 20 , ( 0.051 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 21 , ( 0.055 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 22 , ( 0.059 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 23 , ( 0.063 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 24 , ( 0.067 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 25 , ( 0.071 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 26 , ( 0.075 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 27 , ( 0.078 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 28 , ( 0.082 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 29 , ( 0.086 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 30 , ( 0.090 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 31 , ( 0.098 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 32 , ( 0.102 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 33 , ( 0.106 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 34 , ( 0.110 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 35 , ( 0.118 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 36 , ( 0.122 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 37 , ( 0.129 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 38 , ( 0.133 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 39 , ( 0.137 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 40 , ( 0.145 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 41 , ( 0.153 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 42 , ( 0.157 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 43 , ( 0.169 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 44 , ( 0.176 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 45 , ( 0.180 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 46 , ( 0.192 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 47 , ( 0.200 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 48 , ( 0.208 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 49 , ( 0.212 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 50 , ( 0.220 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 51 , ( 0.227 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 52 , ( 0.235 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 53 , ( 0.243 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 54 , ( 0.251 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 55 , ( 0.263 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 56 , ( 0.271 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 57 , ( 0.278 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 58 , ( 0.290 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 59 , ( 0.298 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 60 , ( 0.314 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 61 , ( 0.318 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 62 , ( 0.329 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 63 , ( 0.337 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 64 , ( 0.349 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 65 , ( 0.361 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 66 , ( 0.369 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 67 , ( 0.380 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 68 , ( 0.392 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 69 , ( 0.404 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 70 , ( 0.416 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 71 , ( 0.427 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 72 , ( 0.439 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 73 , ( 0.451 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 74 , ( 0.459 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 75 , ( 0.478 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 76 , ( 0.494 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 77 , ( 0.502 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 78 , ( 0.514 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 79 , ( 0.529 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 80 , ( 0.529 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 81 , ( 0.529 , 0.004 , 0.000 ,1.0))
L1table.SetTableValue( 82 , ( 0.529 , 0.008 , 0.000 ,1.0))
L1table.SetTableValue( 83 , ( 0.529 , 0.012 , 0.000 ,1.0))
L1table.SetTableValue( 84 , ( 0.529 , 0.016 , 0.000 ,1.0))
L1table.SetTableValue( 85 , ( 0.529 , 0.024 , 0.000 ,1.0))
L1table.SetTableValue( 86 , ( 0.529 , 0.024 , 0.000 ,1.0))
L1table.SetTableValue( 87 , ( 0.529 , 0.031 , 0.000 ,1.0))
L1table.SetTableValue( 88 , ( 0.529 , 0.035 , 0.000 ,1.0))
L1table.SetTableValue( 89 , ( 0.529 , 0.039 , 0.000 ,1.0))
L1table.SetTableValue( 90 , ( 0.529 , 0.043 , 0.000 ,1.0))
L1table.SetTableValue( 91 , ( 0.529 , 0.051 , 0.000 ,1.0))
L1table.SetTableValue( 92 , ( 0.529 , 0.051 , 0.000 ,1.0))
L1table.SetTableValue( 93 , ( 0.529 , 0.059 , 0.000 ,1.0))
L1table.SetTableValue( 94 , ( 0.529 , 0.067 , 0.000 ,1.0))
L1table.SetTableValue( 95 , ( 0.529 , 0.067 , 0.000 ,1.0))
L1table.SetTableValue( 96 , ( 0.529 , 0.075 , 0.000 ,1.0))
L1table.SetTableValue( 97 , ( 0.529 , 0.082 , 0.000 ,1.0))
L1table.SetTableValue( 98 , ( 0.529 , 0.086 , 0.000 ,1.0))
L1table.SetTableValue( 99 , ( 0.529 , 0.090 , 0.000 ,1.0))
L1table.SetTableValue( 100 , ( 0.529 , 0.098 , 0.000 ,1.0))
L1table.SetTableValue( 101 , ( 0.529 , 0.102 , 0.000 ,1.0))
L1table.SetTableValue( 102 , ( 0.529 , 0.106 , 0.000 ,1.0))
L1table.SetTableValue( 103 , ( 0.529 , 0.114 , 0.000 ,1.0))
L1table.SetTableValue( 104 , ( 0.529 , 0.122 , 0.000 ,1.0))
L1table.SetTableValue( 105 , ( 0.529 , 0.125 , 0.000 ,1.0))
L1table.SetTableValue( 106 , ( 0.529 , 0.129 , 0.000 ,1.0))
L1table.SetTableValue( 107 , ( 0.529 , 0.137 , 0.000 ,1.0))
L1table.SetTableValue( 108 , ( 0.529 , 0.141 , 0.000 ,1.0))
L1table.SetTableValue( 109 , ( 0.529 , 0.149 , 0.000 ,1.0))
L1table.SetTableValue( 110 , ( 0.529 , 0.157 , 0.000 ,1.0))
L1table.SetTableValue( 111 , ( 0.529 , 0.165 , 0.000 ,1.0))
L1table.SetTableValue( 112 , ( 0.529 , 0.173 , 0.000 ,1.0))
L1table.SetTableValue( 113 , ( 0.529 , 0.180 , 0.000 ,1.0))
L1table.SetTableValue( 114 , ( 0.529 , 0.184 , 0.000 ,1.0))
L1table.SetTableValue( 115 , ( 0.529 , 0.192 , 0.000 ,1.0))
L1table.SetTableValue( 116 , ( 0.529 , 0.200 , 0.000 ,1.0))
L1table.SetTableValue( 117 , ( 0.529 , 0.204 , 0.000 ,1.0))
L1table.SetTableValue( 118 , ( 0.529 , 0.212 , 0.000 ,1.0))
L1table.SetTableValue( 119 , ( 0.529 , 0.220 , 0.000 ,1.0))
L1table.SetTableValue( 120 , ( 0.529 , 0.224 , 0.000 ,1.0))
L1table.SetTableValue( 121 , ( 0.529 , 0.231 , 0.000 ,1.0))
L1table.SetTableValue( 122 , ( 0.529 , 0.243 , 0.000 ,1.0))
L1table.SetTableValue( 123 , ( 0.529 , 0.247 , 0.000 ,1.0))
L1table.SetTableValue( 124 , ( 0.529 , 0.255 , 0.000 ,1.0))
L1table.SetTableValue( 125 , ( 0.529 , 0.263 , 0.000 ,1.0))
L1table.SetTableValue( 126 , ( 0.529 , 0.271 , 0.000 ,1.0))
L1table.SetTableValue( 127 , ( 0.529 , 0.282 , 0.000 ,1.0))
L1table.SetTableValue( 128 , ( 0.529 , 0.286 , 0.000 ,1.0))
L1table.SetTableValue( 129 , ( 0.529 , 0.298 , 0.000 ,1.0))
L1table.SetTableValue( 130 , ( 0.529 , 0.306 , 0.000 ,1.0))
L1table.SetTableValue( 131 , ( 0.529 , 0.314 , 0.000 ,1.0))
L1table.SetTableValue( 132 , ( 0.529 , 0.322 , 0.000 ,1.0))
L1table.SetTableValue( 133 , ( 0.529 , 0.329 , 0.000 ,1.0))
L1table.SetTableValue( 134 , ( 0.529 , 0.341 , 0.000 ,1.0))
L1table.SetTableValue( 135 , ( 0.529 , 0.345 , 0.000 ,1.0))
L1table.SetTableValue( 136 , ( 0.529 , 0.353 , 0.000 ,1.0))
L1table.SetTableValue( 137 , ( 0.529 , 0.365 , 0.000 ,1.0))
L1table.SetTableValue( 138 , ( 0.529 , 0.373 , 0.000 ,1.0))
L1table.SetTableValue( 139 , ( 0.529 , 0.384 , 0.000 ,1.0))
L1table.SetTableValue( 140 , ( 0.529 , 0.396 , 0.000 ,1.0))
L1table.SetTableValue( 141 , ( 0.529 , 0.404 , 0.000 ,1.0))
L1table.SetTableValue( 142 , ( 0.529 , 0.416 , 0.000 ,1.0))
L1table.SetTableValue( 143 , ( 0.529 , 0.420 , 0.000 ,1.0))
L1table.SetTableValue( 144 , ( 0.529 , 0.431 , 0.000 ,1.0))
L1table.SetTableValue( 145 , ( 0.529 , 0.443 , 0.000 ,1.0))
L1table.SetTableValue( 146 , ( 0.529 , 0.451 , 0.000 ,1.0))
L1table.SetTableValue( 147 , ( 0.529 , 0.463 , 0.000 ,1.0))
L1table.SetTableValue( 148 , ( 0.529 , 0.475 , 0.000 ,1.0))
L1table.SetTableValue( 149 , ( 0.529 , 0.486 , 0.000 ,1.0))
L1table.SetTableValue( 150 , ( 0.529 , 0.498 , 0.000 ,1.0))
L1table.SetTableValue( 151 , ( 0.529 , 0.506 , 0.000 ,1.0))
L1table.SetTableValue( 152 , ( 0.529 , 0.522 , 0.000 ,1.0))
L1table.SetTableValue( 153 , ( 0.529 , 0.529 , 0.000 ,1.0))
L1table.SetTableValue( 154 , ( 0.529 , 0.541 , 0.000 ,1.0))
L1table.SetTableValue( 155 , ( 0.529 , 0.553 , 0.000 ,1.0))
L1table.SetTableValue( 156 , ( 0.529 , 0.565 , 0.000 ,1.0))
L1table.SetTableValue( 157 , ( 0.529 , 0.580 , 0.000 ,1.0))
L1table.SetTableValue( 158 , ( 0.529 , 0.588 , 0.000 ,1.0))
L1table.SetTableValue( 159 , ( 0.529 , 0.608 , 0.000 ,1.0))
L1table.SetTableValue( 160 , ( 0.529 , 0.616 , 0.000 ,1.0))
L1table.SetTableValue( 161 , ( 0.529 , 0.627 , 0.000 ,1.0))
L1table.SetTableValue( 162 , ( 0.529 , 0.639 , 0.000 ,1.0))
L1table.SetTableValue( 163 , ( 0.529 , 0.651 , 0.000 ,1.0))
L1table.SetTableValue( 164 , ( 0.529 , 0.667 , 0.000 ,1.0))
L1table.SetTableValue( 165 , ( 0.529 , 0.682 , 0.000 ,1.0))
L1table.SetTableValue( 166 , ( 0.529 , 0.694 , 0.000 ,1.0))
L1table.SetTableValue( 167 , ( 0.529 , 0.706 , 0.000 ,1.0))
L1table.SetTableValue( 168 , ( 0.529 , 0.722 , 0.000 ,1.0))
L1table.SetTableValue( 169 , ( 0.529 , 0.737 , 0.000 ,1.0))
L1table.SetTableValue( 170 , ( 0.529 , 0.753 , 0.000 ,1.0))
L1table.SetTableValue( 171 , ( 0.529 , 0.765 , 0.000 ,1.0))
L1table.SetTableValue( 172 , ( 0.529 , 0.784 , 0.000 ,1.0))
L1table.SetTableValue( 173 , ( 0.529 , 0.796 , 0.000 ,1.0))
L1table.SetTableValue( 174 , ( 0.529 , 0.804 , 0.000 ,1.0))
L1table.SetTableValue( 175 , ( 0.529 , 0.824 , 0.000 ,1.0))
L1table.SetTableValue( 176 , ( 0.529 , 0.839 , 0.000 ,1.0))
L1table.SetTableValue( 177 , ( 0.529 , 0.855 , 0.000 ,1.0))
L1table.SetTableValue( 178 , ( 0.529 , 0.871 , 0.000 ,1.0))
L1table.SetTableValue( 179 , ( 0.529 , 0.886 , 0.000 ,1.0))
L1table.SetTableValue( 180 , ( 0.529 , 0.906 , 0.000 ,1.0))
L1table.SetTableValue( 181 , ( 0.529 , 0.925 , 0.000 ,1.0))
L1table.SetTableValue( 182 , ( 0.529 , 0.937 , 0.000 ,1.0))
L1table.SetTableValue( 183 , ( 0.529 , 0.957 , 0.000 ,1.0))
L1table.SetTableValue( 184 , ( 0.529 , 0.976 , 0.000 ,1.0))
L1table.SetTableValue( 185 , ( 0.529 , 0.996 , 0.000 ,1.0))
L1table.SetTableValue( 186 , ( 0.529 , 1.000 , 0.004 ,1.0))
L1table.SetTableValue( 187 , ( 0.529 , 1.000 , 0.020 ,1.0))
L1table.SetTableValue( 188 , ( 0.529 , 1.000 , 0.039 ,1.0))
L1table.SetTableValue( 189 , ( 0.529 , 1.000 , 0.059 ,1.0))
L1table.SetTableValue( 190 , ( 0.529 , 1.000 , 0.078 ,1.0))
L1table.SetTableValue( 191 , ( 0.529 , 1.000 , 0.090 ,1.0))
L1table.SetTableValue( 192 , ( 0.529 , 1.000 , 0.110 ,1.0))
L1table.SetTableValue( 193 , ( 0.529 , 1.000 , 0.129 ,1.0))
L1table.SetTableValue( 194 , ( 0.529 , 1.000 , 0.149 ,1.0))
L1table.SetTableValue( 195 , ( 0.529 , 1.000 , 0.169 ,1.0))
L1table.SetTableValue( 196 , ( 0.529 , 1.000 , 0.176 ,1.0))
L1table.SetTableValue( 197 , ( 0.529 , 1.000 , 0.192 ,1.0))
L1table.SetTableValue( 198 , ( 0.529 , 1.000 , 0.212 ,1.0))
L1table.SetTableValue( 199 , ( 0.529 , 1.000 , 0.231 ,1.0))
L1table.SetTableValue( 200 , ( 0.529 , 1.000 , 0.255 ,1.0))
L1table.SetTableValue( 201 , ( 0.529 , 1.000 , 0.275 ,1.0))
L1table.SetTableValue( 202 , ( 0.529 , 1.000 , 0.290 ,1.0))
L1table.SetTableValue( 203 , ( 0.529 , 1.000 , 0.314 ,1.0))
L1table.SetTableValue( 204 , ( 0.529 , 1.000 , 0.329 ,1.0))
L1table.SetTableValue( 205 , ( 0.529 , 1.000 , 0.353 ,1.0))
L1table.SetTableValue( 206 , ( 0.529 , 1.000 , 0.373 ,1.0))
L1table.SetTableValue( 207 , ( 0.529 , 1.000 , 0.384 ,1.0))
L1table.SetTableValue( 208 , ( 0.529 , 1.000 , 0.408 ,1.0))
L1table.SetTableValue( 209 , ( 0.529 , 1.000 , 0.431 ,1.0))
L1table.SetTableValue( 210 , ( 0.529 , 1.000 , 0.455 ,1.0))
L1table.SetTableValue( 211 , ( 0.529 , 1.000 , 0.471 ,1.0))
L1table.SetTableValue( 212 , ( 0.529 , 1.000 , 0.490 ,1.0))
L1table.SetTableValue( 213 , ( 0.529 , 1.000 , 0.514 ,1.0))
L1table.SetTableValue( 214 , ( 0.529 , 1.000 , 0.537 ,1.0))
L1table.SetTableValue( 215 , ( 0.529 , 1.000 , 0.565 ,1.0))
L1table.SetTableValue( 216 , ( 0.529 , 1.000 , 0.584 ,1.0))
L1table.SetTableValue( 217 , ( 0.529 , 1.000 , 0.604 ,1.0))
L1table.SetTableValue( 218 , ( 0.529 , 1.000 , 0.620 ,1.0))
L1table.SetTableValue( 219 , ( 0.529 , 1.000 , 0.647 ,1.0))
L1table.SetTableValue( 220 , ( 0.529 , 1.000 , 0.675 ,1.0))
L1table.SetTableValue( 221 , ( 0.529 , 1.000 , 0.702 ,1.0))
L1table.SetTableValue( 222 , ( 0.529 , 1.000 , 0.729 ,1.0))
L1table.SetTableValue( 223 , ( 0.529 , 1.000 , 0.749 ,1.0))
L1table.SetTableValue( 224 , ( 0.529 , 1.000 , 0.776 ,1.0))
L1table.SetTableValue( 225 , ( 0.529 , 1.000 , 0.796 ,1.0))
L1table.SetTableValue( 226 , ( 0.529 , 1.000 , 0.827 ,1.0))
L1table.SetTableValue( 227 , ( 0.529 , 1.000 , 0.847 ,1.0))
L1table.SetTableValue( 228 , ( 0.529 , 1.000 , 0.878 ,1.0))
L1table.SetTableValue( 229 , ( 0.529 , 1.000 , 0.910 ,1.0))
L1table.SetTableValue( 230 , ( 0.529 , 1.000 , 0.941 ,1.0))
L1table.SetTableValue( 231 , ( 0.529 , 1.000 , 0.973 ,1.0))
L1table.SetTableValue( 232 , ( 0.529 , 1.000 , 0.996 ,1.0))
L1table.SetTableValue( 233 , ( 0.529 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 234 , ( 0.549 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 235 , ( 0.573 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 236 , ( 0.600 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 237 , ( 0.612 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 238 , ( 0.631 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 239 , ( 0.659 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 240 , ( 0.675 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 241 , ( 0.694 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 242 , ( 0.714 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 243 , ( 0.741 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 244 , ( 0.753 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 245 , ( 0.780 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 246 , ( 0.800 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 247 , ( 0.824 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 248 , ( 0.843 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 249 , ( 0.863 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 250 , ( 0.882 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 251 , ( 0.910 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 252 , ( 0.925 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 253 , ( 0.941 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 254 , ( 0.973 , 1.000 , 1.000 ,1.0))
L1table.SetTableValue( 255 , ( 1.000 , 1.000 , 1.000 ,1.0))
L1table.Build()
self.Linear_Heat[key] = L1table
return self.Linear_Heat[key]
def LUT_Linear_BlackToWhite(self, LUrange):
key = tuple(LUrange)
try:
return self.Linear_BlackToWhite[key]
except KeyError:
L1table = vtk.vtkLookupTable()
L1table.SetRange(LUrange[0], LUrange[1])
L1table.SetNumberOfColors(256)
L1table.SetTableValue( 0 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 1 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 2 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 3 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 4 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 5 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 6 , ( 0.000 , 0.000 , 0.000 ,1.0))
L1table.SetTableValue( 7 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 8 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 9 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 10 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 11 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 12 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 13 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 14 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 15 , ( 0.004 , 0.004 , 0.004 ,1.0))
L1table.SetTableValue( 16 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 17 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 18 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 19 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 20 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 21 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 22 , ( 0.008 , 0.008 , 0.008 ,1.0))
L1table.SetTableValue( 23 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 24 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 25 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 26 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 27 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 28 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 29 , ( 0.012 , 0.012 , 0.012 ,1.0))
L1table.SetTableValue( 30 , ( 0.016 , 0.016 , 0.016 ,1.0))
L1table.SetTableValue( 31 , ( 0.016 , 0.016 , 0.016 ,1.0))
L1table.SetTableValue( 32 , ( 0.016 , 0.016 , 0.016 ,1.0))
L1table.SetTableValue( 33 , ( 0.016 , 0.016 , 0.016 ,1.0))
L1table.SetTableValue( 34 , ( 0.016 , 0.016 , 0.016 ,1.0))
L1table.SetTableValue( 35 , ( 0.020 , 0.020 , 0.020 ,1.0))
L1table.SetTableValue( 36 , ( 0.020 , 0.020 , 0.020 ,1.0))
L1table.SetTableValue( 37 , ( 0.020 , 0.020 , 0.020 ,1.0))
L1table.SetTableValue( 38 , ( 0.020 , 0.020 , 0.020 ,1.0))
L1table.SetTableValue( 39 , ( 0.020 , 0.020 , 0.020 ,1.0))
L1table.SetTableValue( 40 , ( 0.024 , 0.024 , 0.024 ,1.0))
L1table.SetTableValue( 41 , ( 0.024 , 0.024 , 0.024 ,1.0))
L1table.SetTableValue( 42 , ( 0.024 , 0.024 , 0.024 ,1.0))
L1table.SetTableValue( 43 , ( 0.024 , 0.024 , 0.024 ,1.0))
L1table.SetTableValue( 44 , ( 0.024 , 0.024 , 0.024 ,1.0))
L1table.SetTableValue( 45 , ( 0.027 , 0.027 , 0.027 ,1.0))
L1table.SetTableValue( 46 , ( 0.027 , 0.027 , 0.027 ,1.0))
L1table.SetTableValue( 47 , ( 0.027 , 0.027 , 0.027 ,1.0))
L1table.SetTableValue( 48 , ( 0.027 , 0.027 , 0.027 ,1.0))
L1table.SetTableValue( 49 , ( 0.027 , 0.027 , 0.027 ,1.0))
L1table.SetTableValue( 50 , ( 0.031 , 0.031 , 0.031 ,1.0))
L1table.SetTableValue( 51 , ( 0.031 , 0.031 , 0.031 ,1.0))
L1table.SetTableValue( 52 , ( 0.035 , 0.035 , 0.035 ,1.0))
L1table.SetTableValue( 53 , ( 0.035 , 0.035 , 0.035 ,1.0))
L1table.SetTableValue( 54 , ( 0.035 , 0.035 , 0.035 ,1.0))
L1table.SetTableValue( 55 , ( 0.035 , 0.035 , 0.035 ,1.0))
L1table.SetTableValue( 56 , ( 0.039 , 0.039 , 0.039 ,1.0))
L1table.SetTableValue( 57 , ( 0.039 , 0.039 , 0.039 ,1.0))
L1table.SetTableValue( 58 , ( 0.039 , 0.039 , 0.039 ,1.0))
L1table.SetTableValue( 59 , ( 0.039 , 0.039 , 0.039 ,1.0))
L1table.SetTableValue( 60 , ( 0.039 , 0.039 , 0.039 ,1.0))
L1table.SetTableValue( 61 , ( 0.043 , 0.043 , 0.043 ,1.0))
L1table.SetTableValue( 62 , ( 0.043 , 0.043 , 0.043 ,1.0))
L1table.SetTableValue( 63 , ( 0.047 , 0.047 , 0.047 ,1.0))
L1table.SetTableValue( 64 , ( 0.047 , 0.047 , 0.047 ,1.0))
L1table.SetTableValue( 65 , ( 0.047 , 0.047 , 0.047 ,1.0))
L1table.SetTableValue( 66 , ( 0.051 , 0.051 , 0.051 ,1.0))
L1table.SetTableValue( 67 , ( 0.051 , 0.051 , 0.051 ,1.0))
L1table.SetTableValue( 68 , ( 0.055 , 0.055 , 0.055 ,1.0))
L1table.SetTableValue( 69 , ( 0.055 , 0.055 , 0.055 ,1.0))
L1table.SetTableValue( 70 , ( 0.059 , 0.059 , 0.059 ,1.0))
L1table.SetTableValue( 71 , ( 0.059 , 0.059 , 0.059 ,1.0))
L1table.SetTableValue( 72 , ( 0.059 , 0.059 , 0.059 ,1.0))
L1table.SetTableValue( 73 , ( 0.063 , 0.063 , 0.063 ,1.0))
L1table.SetTableValue( 74 , ( 0.063 , 0.063 , 0.063 ,1.0))
L1table.SetTableValue( 75 , ( 0.067 , 0.067 , 0.067 ,1.0))
L1table.SetTableValue( 76 , ( 0.067 , 0.067 , 0.067 ,1.0))
L1table.SetTableValue( 77 , ( 0.071 , 0.071 , 0.071 ,1.0))
L1table.SetTableValue( 78 , ( 0.071 , 0.071 , 0.071 ,1.0))
L1table.SetTableValue( 79 , ( 0.075 , 0.075 , 0.075 ,1.0))
L1table.SetTableValue( 80 , ( 0.075 , 0.075 , 0.075 ,1.0))
L1table.SetTableValue( 81 , ( 0.075 , 0.075 , 0.075 ,1.0))
L1table.SetTableValue( 82 , ( 0.075 , 0.075 , 0.075 ,1.0))
L1table.SetTableValue( 83 , ( 0.075 , 0.075 , 0.075 ,1.0))
L1table.SetTableValue( 84 , ( 0.078 , 0.078 , 0.078 ,1.0))
L1table.SetTableValue( 85 , ( 0.078 , 0.078 , 0.078 ,1.0))
L1table.SetTableValue( 86 , ( 0.086 , 0.086 , 0.086 ,1.0))
L1table.SetTableValue( 87 , ( 0.086 , 0.086 , 0.086 ,1.0))
L1table.SetTableValue( 88 , ( 0.086 , 0.086 , 0.086 ,1.0))
L1table.SetTableValue( 89 , ( 0.090 , 0.090 , 0.090 ,1.0))
L1table.SetTableValue( 90 , ( 0.090 , 0.090 , 0.090 ,1.0))
L1table.SetTableValue( 91 , ( 0.094 , 0.094 , 0.094 ,1.0))
L1table.SetTableValue( 92 , ( 0.094 , 0.094 , 0.094 ,1.0))
L1table.SetTableValue( 93 , ( 0.102 , 0.102 , 0.102 ,1.0))
L1table.SetTableValue( 94 , ( 0.102 , 0.102 , 0.102 ,1.0))
L1table.SetTableValue( 95 , ( 0.102 , 0.102 , 0.102 ,1.0))
L1table.SetTableValue( 96 , ( 0.106 , 0.106 , 0.106 ,1.0))
L1table.SetTableValue( 97 , ( 0.106 , 0.106 , 0.106 ,1.0))
L1table.SetTableValue( 98 , ( 0.114 , 0.114 , 0.114 ,1.0))
L1table.SetTableValue( 99 , ( 0.114 , 0.114 , 0.114 ,1.0))
L1table.SetTableValue( 100 , ( 0.118 , 0.118 , 0.118 ,1.0))
L1table.SetTableValue( 101 , ( 0.118 , 0.118 , 0.118 ,1.0))
L1table.SetTableValue( 102 , ( 0.125 , 0.125 , 0.125 ,1.0))
L1table.SetTableValue( 103 , ( 0.125 , 0.125 , 0.125 ,1.0))
L1table.SetTableValue( 104 , ( 0.125 , 0.125 , 0.125 ,1.0))
L1table.SetTableValue( 105 , ( 0.125 , 0.125 , 0.125 ,1.0))
L1table.SetTableValue( 106 , ( 0.125 , 0.125 , 0.125 ,1.0))
L1table.SetTableValue( 107 , ( 0.133 , 0.133 , 0.133 ,1.0))
L1table.SetTableValue( 108 , ( 0.133 , 0.133 , 0.133 ,1.0))
L1table.SetTableValue( 109 , ( 0.137 , 0.137 , 0.137 ,1.0))
L1table.SetTableValue( 110 , ( 0.137 , 0.137 , 0.137 ,1.0))
L1table.SetTableValue( 111 , ( 0.137 , 0.137 , 0.137 ,1.0))
L1table.SetTableValue( 112 , ( 0.145 , 0.145 , 0.145 ,1.0))
L1table.SetTableValue( 113 , ( 0.145 , 0.145 , 0.145 ,1.0))
L1table.SetTableValue( 114 , ( 0.153 , 0.153 , 0.153 ,1.0))
L1table.SetTableValue( 115 , ( 0.153 , 0.153 , 0.153 ,1.0))
L1table.SetTableValue( 116 , ( 0.161 , 0.161 , 0.161 ,1.0))
L1table.SetTableValue( 117 , ( 0.161 , 0.161 , 0.161 ,1.0))
L1table.SetTableValue( 118 , ( 0.161 , 0.161 , 0.161 ,1.0))
L1table.SetTableValue( 119 , ( 0.169 , 0.169 , 0.169 ,1.0))
L1table.SetTableValue( 120 , ( 0.169 , 0.169 , 0.169 ,1.0))
L1table.SetTableValue( 121 , ( 0.176 , 0.176 , 0.176 ,1.0))
L1table.SetTableValue( 122 , ( 0.176 , 0.176 , 0.176 ,1.0))
L1table.SetTableValue( 123 , ( 0.180 , 0.180 , 0.180 ,1.0))
L1table.SetTableValue( 124 , ( 0.180 , 0.180 , 0.180 ,1.0))
L1table.SetTableValue( 125 , ( 0.180 , 0.180 , 0.180 ,1.0))
L1table.SetTableValue( 126 , ( 0.184 , 0.184 , 0.184 ,1.0))
L1table.SetTableValue( 127 , ( 0.184 , 0.184 , 0.184 ,1.0))
L1table.SetTableValue( 128 , ( 0.192 , 0.192 , 0.192 ,1.0))
L1table.SetTableValue( 129 , ( 0.192 , 0.192 , 0.192 ,1.0))
L1table.SetTableValue( 130 , ( 0.200 , 0.200 , 0.200 ,1.0))
L1table.SetTableValue( 131 , ( 0.200 , 0.200 , 0.200 ,1.0))
L1table.SetTableValue( 132 , ( 0.204 , 0.204 , 0.204 ,1.0))
L1table.SetTableValue( 133 , ( 0.204 , 0.204 , 0.204 ,1.0))
L1table.SetTableValue( 134 , ( 0.204 , 0.204 , 0.204 ,1.0))
L1table.SetTableValue( 135 , ( 0.212 , 0.212 , 0.212 ,1.0))
L1table.SetTableValue( 136 , ( 0.212 , 0.212 , 0.212 ,1.0))
L1table.SetTableValue( 137 , ( 0.220 , 0.220 , 0.220 ,1.0))
L1table.SetTableValue( 138 , ( 0.220 , 0.220 , 0.220 ,1.0))
L1table.SetTableValue( 139 , ( 0.231 , 0.231 , 0.231 ,1.0))
L1table.SetTableValue( 140 , ( 0.231 , 0.231 , 0.231 ,1.0))
L1table.SetTableValue( 141 , ( 0.231 , 0.231 , 0.231 ,1.0))
L1table.SetTableValue( 142 , ( 0.239 , 0.239 , 0.239 ,1.0))
L1table.SetTableValue( 143 , ( 0.239 , 0.239 , 0.239 ,1.0))
L1table.SetTableValue( 144 , ( 0.251 , 0.251 , 0.251 ,1.0))
L1table.SetTableValue( 145 , ( 0.251 , 0.251 , 0.251 ,1.0))
L1table.SetTableValue( 146 , ( 0.263 , 0.263 , 0.263 ,1.0))
L1table.SetTableValue( 147 , ( 0.263 , 0.263 , 0.263 ,1.0))
L1table.SetTableValue( 148 , ( 0.263 , 0.263 , 0.263 ,1.0))
L1table.SetTableValue( 149 , ( 0.271 , 0.271 , 0.271 ,1.0))
L1table.SetTableValue( 150 , ( 0.271 , 0.271 , 0.271 ,1.0))
L1table.SetTableValue( 151 , ( 0.282 , 0.282 , 0.282 ,1.0))
L1table.SetTableValue( 152 , ( 0.282 , 0.282 , 0.282 ,1.0))
L1table.SetTableValue( 153 , ( 0.294 , 0.294 , 0.294 ,1.0))
L1table.SetTableValue( 154 , ( 0.294 , 0.294 , 0.294 ,1.0))
L1table.SetTableValue( 155 , ( 0.298 , 0.298 , 0.298 ,1.0))
L1table.SetTableValue( 156 , ( 0.298 , 0.298 , 0.298 ,1.0))
L1table.SetTableValue( 157 , ( 0.298 , 0.298 , 0.298 ,1.0))
L1table.SetTableValue( 158 , ( 0.306 , 0.306 , 0.306 ,1.0))
L1table.SetTableValue( 159 , ( 0.306 , 0.306 , 0.306 ,1.0))
L1table.SetTableValue( 160 , ( 0.318 , 0.318 , 0.318 ,1.0))
L1table.SetTableValue( 161 , ( 0.318 , 0.318 , 0.318 ,1.0))
L1table.SetTableValue( 162 , ( 0.329 , 0.329 , 0.329 ,1.0))
L1table.SetTableValue( 163 , ( 0.329 , 0.329 , 0.329 ,1.0))
L1table.SetTableValue( 164 , ( 0.329 , 0.329 , 0.329 ,1.0))
L1table.SetTableValue( 165 , ( 0.341 , 0.341 , 0.341 ,1.0))
L1table.SetTableValue( 166 , ( 0.341 , 0.341 , 0.341 ,1.0))
L1table.SetTableValue( 167 , ( 0.357 , 0.357 , 0.357 ,1.0))
L1table.SetTableValue( 168 , ( 0.357 , 0.357 , 0.357 ,1.0))
L1table.SetTableValue( 169 , ( 0.369 , 0.369 , 0.369 ,1.0))
L1table.SetTableValue( 170 , ( 0.369 , 0.369 , 0.369 ,1.0))
L1table.SetTableValue( 171 , ( 0.369 , 0.369 , 0.369 ,1.0))
L1table.SetTableValue( 172 , ( 0.380 , 0.380 , 0.380 ,1.0))
L1table.SetTableValue( 173 , ( 0.380 , 0.380 , 0.380 ,1.0))
L1table.SetTableValue( 174 , ( 0.396 , 0.396 , 0.396 ,1.0))
L1table.SetTableValue( 175 , ( 0.396 , 0.396 , 0.396 ,1.0))
L1table.SetTableValue( 176 , ( 0.408 , 0.408 , 0.408 ,1.0))
L1table.SetTableValue( 177 , ( 0.408 , 0.408 , 0.408 ,1.0))
L1table.SetTableValue( 178 , ( 0.420 , 0.420 , 0.420 ,1.0))
L1table.SetTableValue( 179 , ( 0.420 , 0.420 , 0.420 ,1.0))
L1table.SetTableValue( 180 , ( 0.420 , 0.420 , 0.420 ,1.0))
L1table.SetTableValue( 181 , ( 0.424 , 0.424 , 0.424 ,1.0))
L1table.SetTableValue( 182 , ( 0.424 , 0.424 , 0.424 ,1.0))
L1table.SetTableValue( 183 , ( 0.439 , 0.439 , 0.439 ,1.0))
L1table.SetTableValue( 184 , ( 0.439 , 0.439 , 0.439 ,1.0))
L1table.SetTableValue( 185 , ( 0.455 , 0.455 , 0.455 ,1.0))
L1table.SetTableValue( 186 , ( 0.455 , 0.455 , 0.455 ,1.0))
L1table.SetTableValue( 187 , ( 0.455 , 0.455 , 0.455 ,1.0))
L1table.SetTableValue( 188 , ( 0.471 , 0.471 , 0.471 ,1.0))
L1table.SetTableValue( 189 , ( 0.471 , 0.471 , 0.471 ,1.0))
L1table.SetTableValue( 190 , ( 0.486 , 0.486 , 0.486 ,1.0))
L1table.SetTableValue( 191 , ( 0.486 , 0.486 , 0.486 ,1.0))
L1table.SetTableValue( 192 , ( 0.502 , 0.502 , 0.502 ,1.0))
L1table.SetTableValue( 193 , ( 0.502 , 0.502 , 0.502 ,1.0))
L1table.SetTableValue( 194 , ( 0.502 , 0.502 , 0.502 ,1.0))
L1table.SetTableValue( 195 , ( 0.518 , 0.518 , 0.518 ,1.0))
L1table.SetTableValue( 196 , ( 0.518 , 0.518 , 0.518 ,1.0))
L1table.SetTableValue( 197 , ( 0.533 , 0.533 , 0.533 ,1.0))
L1table.SetTableValue( 198 , ( 0.533 , 0.533 , 0.533 ,1.0))
L1table.SetTableValue( 199 , ( 0.553 , 0.553 , 0.553 ,1.0))
L1table.SetTableValue( 200 , ( 0.553 , 0.553 , 0.553 ,1.0))
L1table.SetTableValue( 201 , ( 0.569 , 0.569 , 0.569 ,1.0))
L1table.SetTableValue( 202 , ( 0.569 , 0.569 , 0.569 ,1.0))
L1table.SetTableValue( 203 , ( 0.569 , 0.569 , 0.569 ,1.0))
L1table.SetTableValue( 204 , ( 0.576 , 0.576 , 0.576 ,1.0))
L1table.SetTableValue( 205 , ( 0.576 , 0.576 , 0.576 ,1.0))
L1table.SetTableValue( 206 , ( 0.588 , 0.588 , 0.588 ,1.0))
L1table.SetTableValue( 207 , ( 0.588 , 0.588 , 0.588 ,1.0))
L1table.SetTableValue( 208 , ( 0.604 , 0.604 , 0.604 ,1.0))
L1table.SetTableValue( 209 , ( 0.604 , 0.604 , 0.604 ,1.0))
L1table.SetTableValue( 210 , ( 0.604 , 0.604 , 0.604 ,1.0))
L1table.SetTableValue( 211 , ( 0.624 , 0.624 , 0.624 ,1.0))
L1table.SetTableValue( 212 , ( 0.624 , 0.624 , 0.624 ,1.0))
L1table.SetTableValue( 213 , ( 0.643 , 0.643 , 0.643 ,1.0))
L1table.SetTableValue( 214 , ( 0.643 , 0.643 , 0.643 ,1.0))
L1table.SetTableValue( 215 , ( 0.663 , 0.663 , 0.663 ,1.0))
L1table.SetTableValue( 216 , ( 0.663 , 0.663 , 0.663 ,1.0))
L1table.SetTableValue( 217 , ( 0.663 , 0.663 , 0.663 ,1.0))
L1table.SetTableValue( 218 , ( 0.682 , 0.682 , 0.682 ,1.0))
L1table.SetTableValue( 219 , ( 0.682 , 0.682 , 0.682 ,1.0))
L1table.SetTableValue( 220 , ( 0.702 , 0.702 , 0.702 ,1.0))
L1table.SetTableValue( 221 , ( 0.702 , 0.702 , 0.702 ,1.0))
L1table.SetTableValue( 222 , ( 0.725 , 0.725 , 0.725 ,1.0))
L1table.SetTableValue( 223 , ( 0.725 , 0.725 , 0.725 ,1.0))
L1table.SetTableValue( 224 , ( 0.745 , 0.745 , 0.745 ,1.0))
L1table.SetTableValue( 225 , ( 0.745 , 0.745 , 0.745 ,1.0))
L1table.SetTableValue( 226 , ( 0.745 , 0.745 , 0.745 ,1.0))
L1table.SetTableValue( 227 , ( 0.765 , 0.765 , 0.765 ,1.0))
L1table.SetTableValue( 228 , ( 0.765 , 0.765 , 0.765 ,1.0))
L1table.SetTableValue( 229 , ( 0.765 , 0.765 , 0.765 ,1.0))
L1table.SetTableValue( 230 , ( 0.765 , 0.765 , 0.765 ,1.0))
L1table.SetTableValue( 231 , ( 0.788 , 0.788 , 0.788 ,1.0))
L1table.SetTableValue( 232 , ( 0.788 , 0.788 , 0.788 ,1.0))
L1table.SetTableValue( 233 , ( 0.788 , 0.788 , 0.788 ,1.0))
L1table.SetTableValue( 234 , ( 0.812 , 0.812 , 0.812 ,1.0))
L1table.SetTableValue( 235 , ( 0.812 , 0.812 , 0.812 ,1.0))
L1table.SetTableValue( 236 , ( 0.831 , 0.831 , 0.831 ,1.0))
L1table.SetTableValue( 237 , ( 0.831 , 0.831 , 0.831 ,1.0))
L1table.SetTableValue( 238 , ( 0.855 , 0.855 , 0.855 ,1.0))
L1table.SetTableValue( 239 , ( 0.855 , 0.855 , 0.855 ,1.0))
L1table.SetTableValue( 240 , ( 0.855 , 0.855 , 0.855 ,1.0))
L1table.SetTableValue( 241 , ( 0.878 , 0.878 , 0.878 ,1.0))
L1table.SetTableValue( 242 , ( 0.878 , 0.878 , 0.878 ,1.0))
L1table.SetTableValue( 243 , ( 0.902 , 0.902 , 0.902 ,1.0))
L1table.SetTableValue( 244 , ( 0.902 , 0.902 , 0.902 ,1.0))
L1table.SetTableValue( 245 , ( 0.929 , 0.929 , 0.929 ,1.0))
L1table.SetTableValue( 246 , ( 0.929 , 0.929 , 0.929 ,1.0))
L1table.SetTableValue( 247 , ( 0.953 , 0.953 , 0.953 ,1.0))
L1table.SetTableValue( 248 , ( 0.953 , 0.953 , 0.953 ,1.0))
L1table.SetTableValue( 249 , ( 0.953 , 0.953 , 0.953 ,1.0))
L1table.SetTableValue( 250 , ( 0.976 , 0.976 , 0.976 ,1.0))
L1table.SetTableValue( 251 , ( 0.976 , 0.976 , 0.976 ,1.0))
L1table.SetTableValue( 252 , ( 0.988 , 0.988 , 0.988 ,1.0))
L1table.SetTableValue( 253 , ( 0.988 , 0.988 , 0.988 ,1.0))
L1table.SetTableValue( 254 , ( 0.988 , 0.988 , 0.988 ,1.0))
L1table.SetTableValue( 255 , ( 1.000 , 1.000 , 1.000 ,1.0))
L1table.Build()
self.Linear_BlackToWhite[key] = L1table
return self.Linear_BlackToWhite[key]
def LUT_Linear_BlueToYellow(self, LUrange):
key = tuple(LUrange)
try:
return self.Linear_BlueToYellow[key]
except KeyError:
L1table = vtk.vtkLookupTable()
L1table.SetRange(LUrange[0], LUrange[1])
L1table.SetNumberOfColors(256)
L1table.SetTableValue( 0 , ( 0.027 , 0.027 , 0.996 ,1.0))
L1table.SetTableValue( 1 , ( 0.090 , 0.090 , 0.988 ,1.0))
L1table.SetTableValue( 2 , ( 0.118 , 0.118 , 0.980 ,1.0))
L1table.SetTableValue( 3 , ( 0.141 , 0.141 , 0.973 ,1.0))
L1table.SetTableValue( 4 , ( 0.157 , 0.157 , 0.969 ,1.0))
L1table.SetTableValue( 5 , ( 0.173 , 0.173 , 0.961 ,1.0))
L1table.SetTableValue( 6 , ( 0.184 , 0.184 , 0.953 ,1.0))
L1table.SetTableValue( 7 , ( 0.196 , 0.196 , 0.949 ,1.0))
L1table.SetTableValue( 8 , ( 0.204 , 0.204 , 0.941 ,1.0))
L1table.SetTableValue( 9 , ( 0.216 , 0.216 , 0.937 ,1.0))
L1table.SetTableValue( 10 , ( 0.224 , 0.224 , 0.933 ,1.0))
L1table.SetTableValue( 11 , ( 0.231 , 0.231 , 0.925 ,1.0))
L1table.SetTableValue( 12 , ( 0.239 , 0.239 , 0.922 ,1.0))
L1table.SetTableValue( 13 , ( 0.247 , 0.247 , 0.918 ,1.0))
L1table.SetTableValue( 14 , ( 0.255 , 0.255 , 0.914 ,1.0))
L1table.SetTableValue( 15 , ( 0.259 , 0.259 , 0.906 ,1.0))
L1table.SetTableValue( 16 , ( 0.267 , 0.267 , 0.902 ,1.0))
L1table.SetTableValue( 17 , ( 0.271 , 0.271 , 0.898 ,1.0))
L1table.SetTableValue( 18 , ( 0.278 , 0.278 , 0.894 ,1.0))
L1table.SetTableValue( 19 , ( 0.282 , 0.282 , 0.890 ,1.0))
L1table.SetTableValue( 20 , ( 0.290 , 0.290 , 0.886 ,1.0))
L1table.SetTableValue( 21 , ( 0.294 , 0.294 , 0.882 ,1.0))
L1table.SetTableValue( 22 , ( 0.298 , 0.298 , 0.882 ,1.0))
L1table.SetTableValue( 23 , ( 0.306 , 0.306 , 0.878 ,1.0))
L1table.SetTableValue( 24 , ( 0.310 , 0.310 , 0.875 ,1.0))
L1table.SetTableValue( 25 , ( 0.314 , 0.314 , 0.871 ,1.0))
L1table.SetTableValue( 26 , ( 0.318 , 0.318 , 0.867 ,1.0))
L1table.SetTableValue( 27 , ( 0.322 , 0.322 , 0.867 ,1.0))
L1table.SetTableValue( 28 , ( 0.329 , 0.329 , 0.863 ,1.0))
L1table.SetTableValue( 29 , ( 0.333 , 0.333 , 0.859 ,1.0))
L1table.SetTableValue( 30 , ( 0.337 , 0.337 , 0.855 ,1.0))
L1table.SetTableValue( 31 , ( 0.341 , 0.341 , 0.855 ,1.0))
L1table.SetTableValue( 32 , ( 0.345 , 0.345 , 0.851 ,1.0))
L1table.SetTableValue( 33 , ( 0.349 , 0.349 , 0.847 ,1.0))
L1table.SetTableValue( 34 , ( 0.353 , 0.353 , 0.847 ,1.0))
L1table.SetTableValue( 35 , ( 0.357 , 0.357 , 0.843 ,1.0))
L1table.SetTableValue( 36 , ( 0.361 , 0.361 , 0.839 ,1.0))
L1table.SetTableValue( 37 , ( 0.365 , 0.365 , 0.839 ,1.0))
L1table.SetTableValue( 38 , ( 0.369 , 0.369 , 0.835 ,1.0))
L1table.SetTableValue( 39 , ( 0.373 , 0.373 , 0.835 ,1.0))
L1table.SetTableValue( 40 , ( 0.376 , 0.376 , 0.831 ,1.0))
L1table.SetTableValue( 41 , ( 0.380 , 0.380 , 0.831 ,1.0))
L1table.SetTableValue( 42 , ( 0.384 , 0.384 , 0.827 ,1.0))
L1table.SetTableValue( 43 , ( 0.384 , 0.384 , 0.824 ,1.0))
L1table.SetTableValue( 44 , ( 0.388 , 0.388 , 0.824 ,1.0))
L1table.SetTableValue( 45 , ( 0.392 , 0.392 , 0.820 ,1.0))
L1table.SetTableValue( 46 , ( 0.396 , 0.396 , 0.820 ,1.0))
L1table.SetTableValue( 47 , ( 0.400 , 0.400 , 0.816 ,1.0))
L1table.SetTableValue( 48 , ( 0.404 , 0.404 , 0.816 ,1.0))
L1table.SetTableValue( 49 , ( 0.408 , 0.408 , 0.816 ,1.0))
L1table.SetTableValue( 50 , ( 0.412 , 0.412 , 0.812 ,1.0))
L1table.SetTableValue( 51 , ( 0.412 , 0.412 , 0.812 ,1.0))
L1table.SetTableValue( 52 , ( 0.416 , 0.416 , 0.808 ,1.0))
L1table.SetTableValue( 53 , ( 0.420 , 0.420 , 0.808 ,1.0))
L1table.SetTableValue( 54 , ( 0.424 , 0.424 , 0.804 ,1.0))
L1table.SetTableValue( 55 , ( 0.427 , 0.427 , 0.804 ,1.0))
L1table.SetTableValue( 56 , ( 0.431 , 0.431 , 0.800 ,1.0))
L1table.SetTableValue( 57 , ( 0.431 , 0.431 , 0.800 ,1.0))
L1table.SetTableValue( 58 , ( 0.435 , 0.435 , 0.800 ,1.0))
L1table.SetTableValue( 59 , ( 0.439 , 0.439 , 0.796 ,1.0))
L1table.SetTableValue( 60 , ( 0.443 , 0.443 , 0.796 ,1.0))
L1table.SetTableValue( 61 , ( 0.447 , 0.447 , 0.792 ,1.0))
L1table.SetTableValue( 62 , ( 0.447 , 0.447 , 0.792 ,1.0))
L1table.SetTableValue( 63 , ( 0.451 , 0.451 , 0.792 ,1.0))
L1table.SetTableValue( 64 , ( 0.455 , 0.455 , 0.788 ,1.0))
L1table.SetTableValue( 65 , ( 0.459 , 0.459 , 0.788 ,1.0))
L1table.SetTableValue( 66 , ( 0.463 , 0.463 , 0.784 ,1.0))
L1table.SetTableValue( 67 , ( 0.463 , 0.463 , 0.784 ,1.0))
L1table.SetTableValue( 68 , ( 0.467 , 0.467 , 0.784 ,1.0))
L1table.SetTableValue( 69 , ( 0.471 , 0.471 , 0.780 ,1.0))
L1table.SetTableValue( 70 , ( 0.475 , 0.475 , 0.780 ,1.0))
L1table.SetTableValue( 71 , ( 0.475 , 0.475 , 0.780 ,1.0))
L1table.SetTableValue( 72 , ( 0.478 , 0.478 , 0.776 ,1.0))
L1table.SetTableValue( 73 , ( 0.482 , 0.482 , 0.776 ,1.0))
L1table.SetTableValue( 74 , ( 0.486 , 0.486 , 0.776 ,1.0))
L1table.SetTableValue( 75 , ( 0.486 , 0.486 , 0.773 ,1.0))
L1table.SetTableValue( 76 , ( 0.490 , 0.490 , 0.773 ,1.0))
L1table.SetTableValue( 77 , ( 0.494 , 0.494 , 0.773 ,1.0))
L1table.SetTableValue( 78 , ( 0.498 , 0.498 , 0.769 ,1.0))
L1table.SetTableValue( 79 , ( 0.502 , 0.502 , 0.769 ,1.0))
L1table.SetTableValue( 80 , ( 0.502 , 0.502 , 0.765 ,1.0))
L1table.SetTableValue( 81 , ( 0.506 , 0.506 , 0.765 ,1.0))
L1table.SetTableValue( 82 , ( 0.510 , 0.510 , 0.765 ,1.0))
L1table.SetTableValue( 83 , ( 0.510 , 0.510 , 0.761 ,1.0))
L1table.SetTableValue( 84 , ( 0.514 , 0.514 , 0.761 ,1.0))
L1table.SetTableValue( 85 , ( 0.518 , 0.518 , 0.761 ,1.0))
L1table.SetTableValue( 86 , ( 0.522 , 0.522 , 0.757 ,1.0))
L1table.SetTableValue( 87 , ( 0.522 , 0.522 , 0.757 ,1.0))
L1table.SetTableValue( 88 , ( 0.525 , 0.525 , 0.757 ,1.0))
L1table.SetTableValue( 89 , ( 0.529 , 0.529 , 0.753 ,1.0))
L1table.SetTableValue( 90 , ( 0.533 , 0.533 , 0.753 ,1.0))
L1table.SetTableValue( 91 , ( 0.533 , 0.533 , 0.753 ,1.0))
L1table.SetTableValue( 92 , ( 0.537 , 0.537 , 0.749 ,1.0))
L1table.SetTableValue( 93 , ( 0.541 , 0.541 , 0.749 ,1.0))
L1table.SetTableValue( 94 , ( 0.545 , 0.545 , 0.749 ,1.0))
L1table.SetTableValue( 95 , ( 0.545 , 0.545 , 0.745 ,1.0))
L1table.SetTableValue( 96 , ( 0.549 , 0.549 , 0.745 ,1.0))
L1table.SetTableValue( 97 , ( 0.553 , 0.553 , 0.745 ,1.0))
L1table.SetTableValue( 98 , ( 0.557 , 0.557 , 0.741 ,1.0))
L1table.SetTableValue( 99 , ( 0.557 , 0.557 , 0.741 ,1.0))
L1table.SetTableValue( 100 , ( 0.561 , 0.561 , 0.741 ,1.0))
L1table.SetTableValue( 101 , ( 0.565 , 0.565 , 0.737 ,1.0))
L1table.SetTableValue( 102 , ( 0.565 , 0.565 , 0.737 ,1.0))
L1table.SetTableValue( 103 , ( 0.569 , 0.569 , 0.737 ,1.0))
L1table.SetTableValue( 104 , ( 0.573 , 0.573 , 0.733 ,1.0))
L1table.SetTableValue( 105 , ( 0.576 , 0.576 , 0.733 ,1.0))
L1table.SetTableValue( 106 , ( 0.576 , 0.576 , 0.733 ,1.0))
L1table.SetTableValue( 107 , ( 0.580 , 0.580 , 0.729 ,1.0))
L1table.SetTableValue( 108 , ( 0.584 , 0.584 , 0.729 ,1.0))
L1table.SetTableValue( 109 , ( 0.584 , 0.584 , 0.729 ,1.0))
L1table.SetTableValue( 110 , ( 0.588 , 0.588 , 0.725 ,1.0))
L1table.SetTableValue( 111 , ( 0.592 , 0.592 , 0.725 ,1.0))
L1table.SetTableValue( 112 , ( 0.596 , 0.596 , 0.725 ,1.0))
L1table.SetTableValue( 113 , ( 0.596 , 0.596 , 0.722 ,1.0))
L1table.SetTableValue( 114 , ( 0.600 , 0.600 , 0.722 ,1.0))
L1table.SetTableValue( 115 , ( 0.604 , 0.604 , 0.722 ,1.0))
L1table.SetTableValue( 116 , ( 0.604 , 0.604 , 0.718 ,1.0))
L1table.SetTableValue( 117 , ( 0.608 , 0.608 , 0.718 ,1.0))
L1table.SetTableValue( 118 , ( 0.612 , 0.612 , 0.714 ,1.0))
L1table.SetTableValue( 119 , ( 0.616 , 0.616 , 0.714 ,1.0))
L1table.SetTableValue( 120 , ( 0.616 , 0.616 , 0.714 ,1.0))
L1table.SetTableValue( 121 , ( 0.620 , 0.620 , 0.710 ,1.0))
L1table.SetTableValue( 122 , ( 0.624 , 0.624 , 0.710 ,1.0))
L1table.SetTableValue( 123 , ( 0.624 , 0.624 , 0.710 ,1.0))
L1table.SetTableValue( 124 , ( 0.627 , 0.627 , 0.706 ,1.0))
L1table.SetTableValue( 125 , ( 0.631 , 0.631 , 0.706 ,1.0))
L1table.SetTableValue( 126 , ( 0.635 , 0.635 , 0.706 ,1.0))
L1table.SetTableValue( 127 , ( 0.635 , 0.635 , 0.702 ,1.0))
L1table.SetTableValue( 128 , ( 0.639 , 0.639 , 0.702 ,1.0))
L1table.SetTableValue( 129 , ( 0.643 , 0.643 , 0.698 ,1.0))
L1table.SetTableValue( 130 , ( 0.643 , 0.643 , 0.698 ,1.0))
L1table.SetTableValue( 131 , ( 0.647 , 0.647 , 0.698 ,1.0))
L1table.SetTableValue( 132 , ( 0.651 , 0.651 , 0.694 ,1.0))
L1table.SetTableValue( 133 , ( 0.655 , 0.655 , 0.694 ,1.0))
L1table.SetTableValue( 134 , ( 0.655 , 0.655 , 0.690 ,1.0))
L1table.SetTableValue( 135 , ( 0.659 , 0.659 , 0.690 ,1.0))
L1table.SetTableValue( 136 , ( 0.663 , 0.663 , 0.690 ,1.0))
L1table.SetTableValue( 137 , ( 0.663 , 0.663 , 0.686 ,1.0))
L1table.SetTableValue( 138 , ( 0.667 , 0.667 , 0.686 ,1.0))
L1table.SetTableValue( 139 , ( 0.671 , 0.671 , 0.682 ,1.0))
L1table.SetTableValue( 140 , ( 0.675 , 0.675 , 0.682 ,1.0))
L1table.SetTableValue( 141 , ( 0.675 , 0.675 , 0.678 ,1.0))
L1table.SetTableValue( 142 , ( 0.678 , 0.678 , 0.678 ,1.0))
L1table.SetTableValue( 143 , ( 0.682 , 0.682 , 0.678 ,1.0))
L1table.SetTableValue( 144 , ( 0.682 , 0.682 , 0.675 ,1.0))
L1table.SetTableValue( 145 , ( 0.686 , 0.686 , 0.675 ,1.0))
L1table.SetTableValue( 146 , ( 0.690 , 0.690 , 0.671 ,1.0))
L1table.SetTableValue( 147 , ( 0.694 , 0.694 , 0.671 ,1.0))
L1table.SetTableValue( 148 , ( 0.694 , 0.694 , 0.667 ,1.0))
L1table.SetTableValue( 149 , ( 0.698 , 0.698 , 0.667 ,1.0))
L1table.SetTableValue( 150 , ( 0.702 , 0.702 , 0.663 ,1.0))
L1table.SetTableValue( 151 , ( 0.702 , 0.702 , 0.663 ,1.0))
L1table.SetTableValue( 152 , ( 0.706 , 0.706 , 0.659 ,1.0))
L1table.SetTableValue( 153 , ( 0.710 , 0.710 , 0.659 ,1.0))
L1table.SetTableValue( 154 , ( 0.710 , 0.710 , 0.655 ,1.0))
L1table.SetTableValue( 155 , ( 0.714 , 0.714 , 0.655 ,1.0))
L1table.SetTableValue( 156 , ( 0.718 , 0.718 , 0.651 ,1.0))
L1table.SetTableValue( 157 , ( 0.722 , 0.722 , 0.651 ,1.0))
L1table.SetTableValue( 158 , ( 0.722 , 0.722 , 0.647 ,1.0))
L1table.SetTableValue( 159 , ( 0.725 , 0.725 , 0.647 ,1.0))
L1table.SetTableValue( 160 , ( 0.729 , 0.729 , 0.643 ,1.0))
L1table.SetTableValue( 161 , ( 0.729 , 0.729 , 0.643 ,1.0))
L1table.SetTableValue( 162 , ( 0.733 , 0.733 , 0.639 ,1.0))
L1table.SetTableValue( 163 , ( 0.737 , 0.737 , 0.639 ,1.0))
L1table.SetTableValue( 164 , ( 0.741 , 0.741 , 0.635 ,1.0))
L1table.SetTableValue( 165 , ( 0.741 , 0.741 , 0.635 ,1.0))
L1table.SetTableValue( 166 , ( 0.745 , 0.745 , 0.631 ,1.0))
L1table.SetTableValue( 167 , ( 0.749 , 0.749 , 0.631 ,1.0))
L1table.SetTableValue( 168 , ( 0.749 , 0.749 , 0.627 ,1.0))
L1table.SetTableValue( 169 , ( 0.753 , 0.753 , 0.624 ,1.0))
L1table.SetTableValue( 170 , ( 0.757 , 0.757 , 0.624 ,1.0))
L1table.SetTableValue( 171 , ( 0.761 , 0.761 , 0.620 ,1.0))
L1table.SetTableValue( 172 , ( 0.761 , 0.761 , 0.620 ,1.0))
L1table.SetTableValue( 173 , ( 0.765 , 0.765 , 0.616 ,1.0))
L1table.SetTableValue( 174 , ( 0.769 , 0.769 , 0.616 ,1.0))
L1table.SetTableValue( 175 , ( 0.769 , 0.769 , 0.612 ,1.0))
L1table.SetTableValue( 176 , ( 0.773 , 0.773 , 0.608 ,1.0))
L1table.SetTableValue( 177 , ( 0.776 , 0.776 , 0.608 ,1.0))
L1table.SetTableValue( 178 , ( 0.780 , 0.780 , 0.604 ,1.0))
L1table.SetTableValue( 179 , ( 0.780 , 0.780 , 0.600 ,1.0))
L1table.SetTableValue( 180 , ( 0.784 , 0.784 , 0.600 ,1.0))
L1table.SetTableValue( 181 , ( 0.788 , 0.788 , 0.596 ,1.0))
L1table.SetTableValue( 182 , ( 0.788 , 0.788 , 0.592 ,1.0))
L1table.SetTableValue( 183 , ( 0.792 , 0.792 , 0.592 ,1.0))
L1table.SetTableValue( 184 , ( 0.796 , 0.796 , 0.588 ,1.0))
L1table.SetTableValue( 185 , ( 0.800 , 0.800 , 0.584 ,1.0))
L1table.SetTableValue( 186 , ( 0.800 , 0.800 , 0.584 ,1.0))
L1table.SetTableValue( 187 , ( 0.804 , 0.804 , 0.580 ,1.0))
L1table.SetTableValue( 188 , ( 0.808 , 0.808 , 0.576 ,1.0))
L1table.SetTableValue( 189 , ( 0.808 , 0.808 , 0.573 ,1.0))
L1table.SetTableValue( 190 , ( 0.812 , 0.812 , 0.573 ,1.0))
L1table.SetTableValue( 191 , ( 0.816 , 0.816 , 0.569 ,1.0))
L1table.SetTableValue( 192 , ( 0.820 , 0.820 , 0.565 ,1.0))
L1table.SetTableValue( 193 , ( 0.820 , 0.820 , 0.561 ,1.0))
L1table.SetTableValue( 194 , ( 0.824 , 0.824 , 0.561 ,1.0))
L1table.SetTableValue( 195 , ( 0.827 , 0.827 , 0.557 ,1.0))
L1table.SetTableValue( 196 , ( 0.827 , 0.827 , 0.553 ,1.0))
L1table.SetTableValue( 197 , ( 0.831 , 0.831 , 0.549 ,1.0))
L1table.SetTableValue( 198 , ( 0.835 , 0.835 , 0.545 ,1.0))
L1table.SetTableValue( 199 , ( 0.839 , 0.839 , 0.541 ,1.0))
L1table.SetTableValue( 200 , ( 0.839 , 0.839 , 0.541 ,1.0))
L1table.SetTableValue( 201 , ( 0.843 , 0.843 , 0.537 ,1.0))
L1table.SetTableValue( 202 , ( 0.847 , 0.847 , 0.533 ,1.0))
L1table.SetTableValue( 203 , ( 0.847 , 0.847 , 0.529 ,1.0))
L1table.SetTableValue( 204 , ( 0.851 , 0.851 , 0.525 ,1.0))
L1table.SetTableValue( 205 , ( 0.855 , 0.855 , 0.522 ,1.0))
L1table.SetTableValue( 206 , ( 0.859 , 0.859 , 0.518 ,1.0))
L1table.SetTableValue( 207 , ( 0.859 , 0.859 , 0.514 ,1.0))
L1table.SetTableValue( 208 , ( 0.863 , 0.863 , 0.510 ,1.0))
L1table.SetTableValue( 209 , ( 0.867 , 0.867 , 0.506 ,1.0))
L1table.SetTableValue( 210 , ( 0.867 , 0.867 , 0.502 ,1.0))
L1table.SetTableValue( 211 , ( 0.871 , 0.871 , 0.498 ,1.0))
L1table.SetTableValue( 212 , ( 0.875 , 0.875 , 0.494 ,1.0))
L1table.SetTableValue( 213 , ( 0.878 , 0.878 , 0.490 ,1.0))
L1table.SetTableValue( 214 , ( 0.878 , 0.878 , 0.486 ,1.0))
L1table.SetTableValue( 215 , ( 0.882 , 0.882 , 0.482 ,1.0))
L1table.SetTableValue( 216 , ( 0.886 , 0.886 , 0.478 ,1.0))
L1table.SetTableValue( 217 , ( 0.886 , 0.886 , 0.475 ,1.0))
L1table.SetTableValue( 218 , ( 0.890 , 0.890 , 0.467 ,1.0))
L1table.SetTableValue( 219 , ( 0.894 , 0.894 , 0.463 ,1.0))
L1table.SetTableValue( 220 , ( 0.898 , 0.898 , 0.459 ,1.0))
L1table.SetTableValue( 221 , ( 0.898 , 0.898 , 0.455 ,1.0))
L1table.SetTableValue( 222 , ( 0.902 , 0.902 , 0.447 ,1.0))
L1table.SetTableValue( 223 , ( 0.906 , 0.906 , 0.443 ,1.0))
L1table.SetTableValue( 224 , ( 0.910 , 0.910 , 0.439 ,1.0))
L1table.SetTableValue( 225 , ( 0.910 , 0.910 , 0.431 ,1.0))
L1table.SetTableValue( 226 , ( 0.914 , 0.914 , 0.427 ,1.0))
L1table.SetTableValue( 227 , ( 0.918 , 0.918 , 0.420 ,1.0))
L1table.SetTableValue( 228 , ( 0.918 , 0.918 , 0.416 ,1.0))
L1table.SetTableValue( 229 , ( 0.922 , 0.922 , 0.408 ,1.0))
L1table.SetTableValue( 230 , ( 0.925 , 0.925 , 0.404 ,1.0))
L1table.SetTableValue( 231 , ( 0.929 , 0.929 , 0.396 ,1.0))
L1table.SetTableValue( 232 , ( 0.929 , 0.929 , 0.392 ,1.0))
L1table.SetTableValue( 233 , ( 0.933 , 0.933 , 0.384 ,1.0))
L1table.SetTableValue( 234 , ( 0.937 , 0.937 , 0.376 ,1.0))
L1table.SetTableValue( 235 , ( 0.937 , 0.937 , 0.369 ,1.0))
L1table.SetTableValue( 236 , ( 0.941 , 0.941 , 0.361 ,1.0))
L1table.SetTableValue( 237 , ( 0.945 , 0.945 , 0.357 ,1.0))
L1table.SetTableValue( 238 , ( 0.949 , 0.949 , 0.349 ,1.0))
L1table.SetTableValue( 239 , ( 0.949 , 0.949 , 0.337 ,1.0))
L1table.SetTableValue( 240 , ( 0.953 , 0.953 , 0.329 ,1.0))
L1table.SetTableValue( 241 , ( 0.957 , 0.957 , 0.322 ,1.0))
L1table.SetTableValue( 242 , ( 0.961 , 0.961 , 0.314 ,1.0))
L1table.SetTableValue( 243 , ( 0.961 , 0.961 , 0.302 ,1.0))
L1table.SetTableValue( 244 , ( 0.965 , 0.965 , 0.290 ,1.0))
L1table.SetTableValue( 245 , ( 0.969 , 0.969 , 0.282 ,1.0))
L1table.SetTableValue( 246 , ( 0.969 , 0.969 , 0.271 ,1.0))
L1table.SetTableValue( 247 , ( 0.973 , 0.973 , 0.255 ,1.0))
L1table.SetTableValue( 248 , ( 0.976 , 0.976 , 0.243 ,1.0))
L1table.SetTableValue( 249 , ( 0.980 , 0.980 , 0.227 ,1.0))
L1table.SetTableValue( 250 , ( 0.980 , 0.980 , 0.212 ,1.0))
L1table.SetTableValue( 251 , ( 0.984 , 0.984 , 0.192 ,1.0))
L1table.SetTableValue( 252 , ( 0.988 , 0.988 , 0.173 ,1.0))
L1table.SetTableValue( 253 , ( 0.992 , 0.992 , 0.145 ,1.0))
L1table.SetTableValue( 254 , ( 0.992 , 0.992 , 0.110 ,1.0))
L1table.SetTableValue( 255 , ( 0.996 , 0.996 , 0.051 ,1.0))
L1table.Build()
self.Linear_BlueToYellow[key] = L1table
return self.Linear_BlueToYellow[key]
| Python |
# $Id$
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""vtk_kit package driver file.
This performs all initialisation necessary to use VTK from DeVIDE. Makes
sure that all VTK classes have ErrorEvent handlers that report back to
the ModuleManager.
Inserts the following modules in sys.modules: vtk, vtkdevide.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import re
import sys
import traceback
import types
VERSION = ''
def preImportVTK(progressMethod):
vtkImportList = [('vtk.common', 'VTK Common.'),
('vtk.filtering', 'VTK Filtering.'),
('vtk.io', 'VTK IO.'),
('vtk.imaging', 'VTK Imaging.'),
('vtk.graphics', 'VTK Graphics.'),
('vtk.rendering', 'VTK Rendering.'),
('vtk.hybrid', 'VTK Hybrid.'),
#('vtk.patented', 'VTK Patented.'),
('vtk', 'Other VTK symbols')]
# set the dynamic loading flags. If we don't do this, we get strange
# errors on 64 bit machines. To see this happen, comment this statement
# and then run the VTK->ITK connection test case.
oldflags = setDLFlags()
percentStep = 100.0 / len(vtkImportList)
currentPercent = 0.0
# do the imports
for module, message in vtkImportList:
currentPercent += percentStep
progressMethod(currentPercent, 'Initialising vtk_kit: %s' % (message,),
noTime=True)
exec('import %s' % (module,))
# restore previous dynamic loading flags
resetDLFlags(oldflags)
def setDLFlags():
# brought over from ITK Wrapping/CSwig/Python
# Python "help(sys.setdlopenflags)" states:
#
# setdlopenflags(...)
# setdlopenflags(n) -> None
#
# Set the flags that will be used for dlopen() calls. Among other
# things, this will enable a lazy resolving of symbols when
# importing a module, if called as sys.setdlopenflags(0) To share
# symbols across extension modules, call as
#
# sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
#
# GCC 3.x depends on proper merging of symbols for RTTI:
# http://gcc.gnu.org/faq.html#dso
#
try:
import dl
newflags = dl.RTLD_NOW|dl.RTLD_GLOBAL
except:
newflags = 0x102 # No dl module, so guess (see above).
try:
oldflags = sys.getdlopenflags()
sys.setdlopenflags(newflags)
except:
oldflags = None
return oldflags
def resetDLFlags(data):
# brought over from ITK Wrapping/CSwig/Python
# Restore the original dlopen flags.
try:
sys.setdlopenflags(data)
except:
pass
def init(module_manager, pre_import=True):
# first do the VTK pre-imports: this is here ONLY to keep the user happy
# it's not necessary for normal functioning
if pre_import:
preImportVTK(module_manager.setProgress)
# import the main module itself
# the global is so that users can also do:
# from module_kits import vtk_kit
# vtk_kit.vtk.vtkSomeFilter()
global vtk
import vtk
# and do the same for vtkdevide
global vtkdevide
import vtkdevide
# load up some generic functions into this namespace
# user can, after import of module_kits.vtk_kit, address these as
# module_kits.vtk_kit.blaat. In this case we don't need "global",
# as these are modules directly in this package.
import module_kits.vtk_kit.misc as misc
import module_kits.vtk_kit.mixins as mixins
import module_kits.vtk_kit.utils as utils
import module_kits.vtk_kit.constants as constants
import module_kits.vtk_kit.color_scales as color_scales
# setup the kit version
global VERSION
VERSION = '%s' % (vtk.vtkVersion.GetVTKVersion(),)
| Python |
# $Id: __init__.py 1945 2006-03-05 01:06:37Z cpbotha $
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""numpy_kit package driver file.
Inserts the following modules in sys.modules: numpy.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import os
import re
import sys
import types
# you have to define this
VERSION = ''
def init(theModuleManager, pre_import=True):
theModuleManager.setProgress(5, 'Initialising numpy_kit: start')
# import numpy into the global namespace
global numpy
import numpy
# we add this so that modules using "import Numeric" will probably also
# work (such as the FloatCanvas)
sys.modules['Numeric'] = numpy
sys.modules['numarray'] = numpy
theModuleManager.setProgress(95, 'Initialising numpy_kit: import done')
# build up VERSION
global VERSION
VERSION = '%s' % (numpy.version.version,)
theModuleManager.setProgress(100, 'Initialising numpy_kit: complete')
| Python |
import math
import numpy
epsilon = 1e-12
def abs(v1):
return numpy.absolute(v1)
def norm(v1):
"""Given vector v1, return its norm.
"""
v1a = numpy.array(v1)
norm = numpy.sqrt(numpy.sum(v1a * v1a))
return norm
def normalise_line(p1, p2):
"""Given two points, return normal vector, magnitude and original
line vector.
Example: normal_vec, mag, line_vec = normalize_line(p1_tuple, p2_tuple)
"""
line_vector = numpy.array(p2) - numpy.array(p1)
squared_norm = numpy.sum(line_vector * line_vector)
norm = numpy.sqrt(squared_norm)
if norm != 0.0:
unit_vec = line_vector / norm
else:
unit_vec = line_vector
return (unit_vec, norm, line_vector)
def points_to_vector(p1, p2):
v = numpy.array(p2) - numpy.array(p1)
return v
def dot(v1, v2):
"""Return dot-product between vectors v1 and v2.
"""
dot_product = numpy.sum(numpy.array(v1) * numpy.array(v2))
return dot_product
def move_line_to_target_along_normal(p1, p2, n, target):
"""Move the line (p1,p2) along normal vector n until it intersects
with target.
@returns: Adjusted p1,p2
"""
# n has to be a normal vector, mmmkay?
if norm(n) - 1.0 > epsilon:
raise RuntimeError('normal vector not unit size.')
p1a = numpy.array(p1)
p2a = numpy.array(p2)
ta = numpy.array(target)
# see how far p2a is from ta measured along n
dp = dot(p2a - ta, n)
# better to use an epsilon?
if numpy.absolute(dp) > epsilon:
# calculate vector needed to correct along n
dvec = - dp * n
p1a = p1a + dvec
p2a = p2a + dvec
return (p1a, p2a)
def intersect_line_sphere(p1, p2, sc, r):
"""Calculates intersection between line going through p1 and p2 and
sphere determined by centre sc and radius r.
Requires numpy.
@param p1: tuple, or 1D matrix, or 1D array with first point defining line.
@param p2: tuple, or 1D matrix, or 1D array with second point defining line.
See http://local.wasp.uwa.edu.au/~pbourke/geometry/sphereline/source.cpp
"""
# a is squared distance between the two points defining line
p_diff = numpy.array(p2) - numpy.array(p1)
a = numpy.sum(numpy.multiply(p_diff, p_diff))
b = 2 * ( (p2[0] - p1[0]) * (p1[0] - sc[0]) + \
(p2[1] - p1[1]) * (p1[1] - sc[1]) + \
(p2[2] - p1[2]) * (p1[2] - sc[2]) )
c = sc[0] ** 2 + sc[1] ** 2 + \
sc[2] ** 2 + p1[0] ** 2 + \
p1[1] ** 2 + p1[2] ** 2 - \
2 * (sc[0] * p1[0] + sc[1] * p1[1] + sc[2]*p1[2]) - r ** 2
i = b * b - 4 * a * c
if (i < 0.0):
# no intersections
return []
if (i == 0.0):
# one intersection
mu = -b / (2 * a)
return [ (p1[0] + mu * (p2[0] - p1[0]),
p1[1] + mu * (p2[1] - p1[1]),
p1[2] + mu * (p2[2] - p1[2])) ]
if (i > 0.0):
# two intersections
mu = (-b + math.sqrt( b ** 2 - 4*a*c )) / (2*a)
i1 = (p1[0] + mu * (p2[0] - p1[0]),
p1[1] + mu * (p2[1] - p1[1]),
p1[2] + mu * (p2[2] - p1[2]))
mu = (-b - math.sqrt( b ** 2 - 4*a*c )) / (2*a)
i2 = (p1[0] + mu * (p2[0] - p1[0]),
p1[1] + mu * (p2[1] - p1[1]),
p1[2] + mu * (p2[2] - p1[2]))
# in the case of two intersections, we want to make sure
# that the vector i1,i2 has the same orientation as vector p1,p2
i_diff = numpy.array(i2) - numpy.array(i1)
if numpy.dot(p_diff, i_diff) < 0:
return [i2, i1]
else:
return [i1, i2]
def intersect_line_ellipsoid(p1, p2, ec, radius_vectors):
"""Determine intersection points between line defined by p1 and p2,
and ellipsoid defined by centre ec and three radius vectors (tuple
of tuples, each inner tuple is a radius vector).
This requires numpy.
"""
# create transformation matrix that has the radius_vectors
# as its columns (hence the transpose)
rv = numpy.transpose(numpy.matrix(radius_vectors))
# calculate its inverse
rv_inv = numpy.linalg.pinv(rv)
# now transform the two points
# all points have to be relative to ellipsoid centre
# the [0] at the end and the numpy.array at the start is to make sure
# we pass a row vector (array) to the line_sphere_intersection
p1_e = numpy.array(numpy.matrixmultiply(rv_inv, numpy.array(p1) - numpy.array(ec)))[0]
p2_e = numpy.array(numpy.matrixmultiply(rv_inv, numpy.array(p2) - numpy.array(ec)))[0]
# now we only have to determine the intersection between the points
# (now transformed to ellipsoid space) with the unit sphere centred at 0
isects_e = intersect_line_sphere(p1_e, p2_e, (0.0,0.0,0.0), 1.0)
# transform intersections back to "normal" space
isects = []
for i in isects_e:
# numpy.array(...)[0] is for returning only row of matrix as array
itemp = numpy.array(numpy.matrixmultiply(rv, numpy.array(i)))[0]
isects.append(itemp + numpy.array(ec))
return isects
def intersect_line_mask(p1, p2, mask, incr):
"""Calculate FIRST intersection of line (p1,p2) with mask, as we walk
from p1 to p2 with increments == incr.
"""
p1 = numpy.array(p1)
p2 = numpy.array(p2)
origin = numpy.array(mask.GetOrigin())
spacing = numpy.array(mask.GetSpacing())
incr = float(incr)
line_vector = p2 - p1
squared_norm = numpy.sum(line_vector * line_vector)
norm = numpy.sqrt(squared_norm)
unit_vec = line_vector / norm
curp = p1
intersect = False
end_of_line = False
i_point = numpy.array((), float) # empty array
while not intersect and not end_of_line:
# get voxel coords
voxc = (curp - origin) / spacing
e = mask.GetExtent()
if voxc[0] >= e[0] and voxc[0] <= e[1] and \
voxc[1] >= e[2] and voxc[1] <= e[3] and \
voxc[2] >= e[4] and voxc[2] <= e[5]:
val = mask.GetScalarComponentAsDouble(
voxc[0], voxc[1], voxc[2], 0)
else:
val = 0.0
if val > 0.0:
intersect = True
i_point = curp
else:
curp = curp + unit_vec * incr
cur_squared_norm = numpy.sum(numpy.square(curp - p1))
if cur_squared_norm > squared_norm:
end_of_line = True
if end_of_line:
return None
else:
return i_point
| Python |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""geometry_kit package driver file.
Inserts the following modules in sys.modules: geometry.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import sys
# you have to define this
VERSION = 'INTEGRATED'
def init(module_manager, pre_import=True):
global geometry
import geometry
# if we don't do this, the module will be in sys.modules as
# module_kits.stats_kit.stats because it's not in the sys.path.
# iow. if a module is in sys.path, "import module" will put 'module' in
# sys.modules. if a module isn't, "import module" will put
# 'relative.path.to.module' in sys.path.
sys.modules['geometry'] = geometry
module_manager.set_progress(100, 'Initialising geometry_kit: complete.')
def refresh():
# we have none of our own packages yet...
global geometry
reload(geometry)
| Python |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
"""sqlite_kit package driver file.
With this we make sure that sqlite3 is always packaged.
"""
VERSION = ''
def init(module_manager, pre_import=True):
global sqlite3
import sqlite3
global VERSION
VERSION = '%s (sqlite %s)' % (sqlite3.version,
sqlite3.sqlite_version)
| Python |
# $Id: __init__.py 1945 2006-03-05 01:06:37Z cpbotha $
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""vtktudoss_kit package driver file.
Inserts the following modules in sys.modules: vtktudoss.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import re
import sys
import types
# you have to define this
VERSION = 'SVN'
def init(theModuleManager, pre_import=True):
# import the main module itself
import vtktudoss
# I added the version variable on 20070802
try:
global VERSION
VERSION = vtktudoss.version
except AttributeError:
pass
theModuleManager.setProgress(100, 'Initialising vtktudoss_kit')
| Python |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import ConfigParser
import glob
import os
import sys
import time
"""Top-level __init__ of the module_kits.
All .mkd files in the module_kits directory are parsed and their
corresponding module_kits are loaded. MKD specify the priority (order
of loading), the dependencies and whether they are crucial kits or
not. Error on loading a crucial kit terminates the application, error
on loading a non-crucial kit simply notifies the user.
"""
module_kit_list = []
class MKDef:
def __init__(self):
self.name = ''
self.crucial = False
self.priority = 100
self.dependencies = []
def get_sorted_mkds(module_kits_dir):
"""Given the module_kits dir, return list, sorted according to
priority, of MKDef instances representing the mkd files that are
found and parsed. NoKits are NOT removed yet.
"""
mkd_fnames = glob.glob(
os.path.join(module_kits_dir, '*.mkd'))
mkd_defaults = {
'crucial' : False,
'priority' : 100,
'dependencies' : ''
}
mkds = []
for mkd_fname in mkd_fnames:
mkd = MKDef()
cp = ConfigParser.ConfigParser(mkd_defaults)
cp.read(mkd_fname)
mkd.name = os.path.splitext(os.path.basename(mkd_fname))[0]
mkd.crucial = cp.getboolean('default', 'crucial')
mkd.priority = cp.getint('default', 'priority')
mkd.dependencies = [i.strip()
for i in cp.get('default', 'dependencies').split(',')
if i]
mkds.append(mkd)
# now sort the mkds according to priority
def cmp(a,b):
if a.priority < b.priority:
return -1
elif a.priority > b.priority:
return 1
else:
return 0
mkds.sort(cmp)
return mkds
def load(module_manager):
tot_start_time = time.time()
module_kits_dir = os.path.join(
module_manager.get_appdir(), 'module_kits')
mkds = get_sorted_mkds(module_kits_dir)
# then remove the nokits
nokits = module_manager.get_app_main_config().nokits
mkds = [mkd for mkd in mkds
if mkd.name not in nokits]
loaded_kit_names = []
# load the remaining kits
for mkd in mkds:
# first check that all dependencies are satisfied
deps_satisfied = True
for d in mkd.dependencies:
if d not in loaded_kit_names:
deps_satisfied = False
# break out of the for loop
break
if not deps_satisfied:
# skip this iteration of the for, go to the next iteration
# (we don't want to try loading this module)
continue
start_time = time.time()
try:
# import module_kit into module_kits namespace
exec('import module_kits.%s' % (mkd.name,))
# call module_kit.init()
getattr(module_kits, mkd.name).init(module_manager)
# add it to the loaded_kits for dependency checking
loaded_kit_names.append(mkd.name)
except Exception, e:
# if it's a crucial module_kit, we re-raise with our own
# message added using th three argument raise form
# see: http://docs.python.org/ref/raise.html
if mkd.crucial:
es = 'Error loading required module_kit %s: %s.' \
% (mkd.name, str(e))
raise Exception, es, sys.exc_info()[2]
# if not we can report the error and continue
else:
module_manager.log_error_with_exception(
'Unable to load non-critical module_kit %s: '
'%s. Continuing with startup.' %
(mkd.name, str(e)))
end_time = time.time()
module_manager.log_info('Loaded %s in %.2f seconds.' %
(mkd.name, end_time - start_time))
# if we got this far, startup was successful, but not all kits
# were loaded: some not due to failure, and some not due to
# unsatisfied dependencies. set the current list to the list of
# module_kits that did actually load.
global module_kit_list
module_kit_list = loaded_kit_names
tot_end_time = time.time()
module_manager.log_info(
'Loaded ALL module_kits in %.2f seconds.' %
(tot_end_time - tot_start_time))
| Python |
# $Id: __init__.py 1945 2006-03-05 01:06:37Z cpbotha $
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""itktudoss_kit package driver file.
This driver makes sure that itktudoss has been integrated with the main WrapITK
instalation.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import re
import sys
import types
# you have to define this
VERSION = 'SVN'
def init(theModuleManager, pre_import=True):
theModuleManager.setProgress(80, 'Initialising itktudoss_kit: TPGAC')
import itk # this will have been pre-imported by the itk_kit
a = itk.TPGACLevelSetImageFilter
theModuleManager.setProgress(100, 'Initialising itktudoss_kit: DONE')
| Python |
# Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
Defines a number of functions for pseudo-command-line OS functionality.
cd(directory)
pwd <-- can be used WITHOUT parens
ls(d='.')
rename(from,to)
get(namepatterns,verbose=1)
getstrings(namepatterns,verbose=1)
put(outlist,filename,writetype='w')
aget(namepatterns,verbose=1)
aput(outarray,filename,writetype='w')
bget(filename,numslices=1,xsize=64,ysize=64)
braw(filename,btype)
bput(outarray,filename,writeheader=0,packstring='h',writetype='wb')
mrget(filename)
find_dirs(sourcedir)
"""
## CHANGES:
## =======
## 02-11-20 ... added binget(), binput(), array2afni(), version 0.5
## 02-10-20 ... added find_dirs() function, changed version to 0.4
## 01-11-15 ... changed aput() and put() to accept a delimiter
## 01-04-19 ... added oneperline option to put() function
## 99-11-07 ... added DAs quick flat-text-file loaders, load() and fload()
## 99-11-01 ... added version number (0.1) for distribution
## 99-08-30 ... Put quickload in here
## 99-06-27 ... Changed bget thing back ... confused ...
## 99-06-24 ... exchanged xsize and ysize in bget for non-square images (NT??)
## modified bget to raise an IOError when file not found
## 99-06-12 ... added load() and save() aliases for aget() and aput() (resp.)
## 99-04-13 ... changed aget() to ignore (!!!!) lines beginning with # or %
## 99-01-17 ... changed get() so ints come in as ints (not floats)
##
try:
import mmapfile
except:
pass
import pstat
import glob, re, string, types, os, Numeric, struct, copy, time, tempfile, sys
from types import *
N = Numeric
__version__ = 0.5
def wrap(f):
"""
Wraps a function so that if it's entered *by itself*
in the interpreter without ()'s, it gets called anyway
"""
class W:
def __init__(self, f):
self.f = f
def __repr__(self):
x =apply(self.f)
if x:
return repr(x)
else:
return ''
return W(f)
def cd (directory):
"""
Changes the working python directory for the interpreter.
Usage: cd(directory) where 'directory' is a string
"""
os.chdir(directory)
return
def pwd():
"""
Changes the working python directory for the interpreter.
Usage: pwd (no parens needed)
"""
return os.getcwd()
pwd = wrap(pwd)
def ls(d='.'):
"""
Produces a directory listing. Default is the current directory.
Usage: ls(d='.')
"""
os.system('ls '+d)
return None
def rename(source, dest):
"""
Renames files specified by UNIX inpattern to those specified by UNIX
outpattern. Can only handle a single '*' in the two patterns!!!
Usage: rename (source, dest) e.g., rename('*.txt', '*.c')
"""
infiles = glob.glob(source)
outfiles = []
incutindex = string.index(source,'*')
outcutindex = string.index(source,'*')
findpattern1 = source[0:incutindex]
findpattern2 = source[incutindex+1:]
replpattern1 = dest[0:incutindex]
replpattern2 = dest[incutindex+1:]
for fname in infiles:
if incutindex > 0:
newname = re.sub(findpattern1,replpattern1,fname,1)
if outcutindex < len(dest)-1:
if incutindex > 0:
lastone = string.rfind(newname,replpattern2)
newname = newname[0:lastone] + re.sub(findpattern2,replpattern2,fname[lastone:],1)
else:
lastone = string.rfind(fname,findpattern2)
if lastone <> -1:
newname = fname[0:lastone]
newname = newname + re.sub(findpattern2,replpattern2,fname[lastone:],1)
os.rename(fname,newname)
return
def get (namepatterns,verbose=1):
"""
Loads a list of lists from text files (specified by a UNIX-style
wildcard filename pattern) and converts all numeric values to floats.
Uses the glob module for filename pattern conversion. Loaded filename
is printed if verbose=1.
Usage: get (namepatterns,verbose=1)
Returns: a 1D or 2D list of lists from whitespace delimited text files
specified by namepatterns; numbers that can be converted to floats
are so converted
"""
fnames = []
if type(namepatterns) in [ListType,TupleType]:
for item in namepatterns:
fnames = fnames + glob.glob(item)
else:
fnames = glob.glob(namepatterns)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepatterns+') !!'
return None
if verbose:
print fnames # so user knows what has been loaded
elements = []
for i in range(len(fnames)):
file = open(fnames[i])
newelements = map(string.split,file.readlines())
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atoi(newelements[i][j])
except ValueError:
try:
newelements[i][j] = string.atof(newelements[i][j])
except:
pass
elements = elements + newelements
if len(elements)==1: elements = elements[0]
return elements
def getstrings (namepattern,verbose=1):
"""
Loads a (set of) text file(s), with all elements left as string type.
Uses UNIX-style wildcards (i.e., function uses glob). Loaded filename
is printed if verbose=1.
Usage: getstrings (namepattern, verbose=1)
Returns: a list of strings, one per line in each text file specified by
namepattern
"""
fnames = glob.glob(namepattern)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepattern+') !!'
return None
if verbose:
print fnames
elements = []
for filename in fnames:
file = open(filename)
newelements = map(string.split,file.readlines())
elements = elements + newelements
return elements
def put (outlist,fname,writetype='w',oneperline=0,delimit=' '):
"""
Writes a passed mixed-type list (str and/or numbers) to an output
file, and then closes the file. Default is overwrite the destination
file.
Usage: put (outlist,fname,writetype='w',oneperline=0,delimit=' ')
Returns: None
"""
if type(outlist) in [N.ArrayType]:
aput(outlist,fname,writetype)
return
if type(outlist[0]) not in [ListType,TupleType]: # 1D list
outfile = open(fname,writetype)
if not oneperline:
outlist = pstat.list2string(outlist,delimit)
outfile.write(outlist)
outfile.write('\n')
else: # they want one element from the list on each file line
for item in outlist:
outfile.write(str(item)+'\n')
outfile.close()
else: # 2D list (list-of-lists)
outfile = open(fname,writetype)
for row in outlist:
outfile.write(pstat.list2string(row,delimit))
outfile.write('\n')
outfile.close()
return None
def isstring(x):
if type(x)==StringType:
return 1
else:
return 0
def aget (namepattern,verbose=1):
"""
Loads an array from 2D text files (specified by a UNIX-style wildcard
filename pattern). ONLY 'GET' FILES WITH EQUAL NUMBERS OF COLUMNS
ON EVERY ROW (otherwise returned array will be zero-dimensional).
Usage: aget (namepattern)
Returns: an array of integers, floats or objects (type='O'), depending on the
contents of the files specified by namepattern
"""
fnames = glob.glob(namepattern)
if len(fnames) == 0:
if verbose:
print 'NO FILENAMES MATCH ('+namepattern+') !!'
return None
if verbose:
print fnames
elements = []
for filename in fnames:
file = open(filename)
newelements = file.readlines()
del_list = []
for row in range(len(newelements)):
if (newelements[row][0]=='%' or newelements[row][0]=='#'
or len(newelements[row])==1):
del_list.append(row)
del_list.reverse()
for i in del_list:
newelements.pop(i)
newelements = map(string.split,newelements)
for i in range(len(newelements)):
for j in range(len(newelements[i])):
try:
newelements[i][j] = string.atof(newelements[i][j])
except:
pass
elements = elements + newelements
for row in range(len(elements)):
if N.add.reduce(N.array(map(isstring,elements[row])))==len(elements[row]):
print "A row of strings was found. Returning a LIST."
return elements
try:
elements = N.array(elements)
except TypeError:
elements = N.array(elements,'O')
return elements
def aput (outarray,fname,writetype='w',delimit=' '):
"""
Sends passed 1D or 2D array to an output file and closes the file.
Usage: aput (outarray,fname,writetype='w',delimit=' ')
Returns: None
"""
outfile = open(fname,writetype)
if len(outarray.shape) == 1:
outarray = outarray[N.NewAxis,:]
if len(outarray.shape) > 2:
raise TypeError, "put() and aput() require 1D or 2D arrays. Otherwise use some kind of pickling."
else: # must be a 2D array
for row in outarray:
outfile.write(string.join(map(str,row),delimit))
outfile.write('\n')
outfile.close()
return None
def bget(imfile,shp=None,unpackstr=N.Int16,bytesperpixel=2.0,sliceinit=0):
"""
Reads in a binary file, typically with a .bshort or .bfloat extension.
If so, the last 3 parameters are set appropriately. If not, the last 3
parameters default to reading .bshort files (2-byte integers in big-endian
binary format).
Usage: bget(imfile,shp=None,unpackstr=N.Int16,bytesperpixel=2.0,sliceinit=0)
"""
if imfile[:3] == 'COR':
return CORget(imfile)
if imfile[-2:] == 'MR':
return mrget(imfile,unpackstr)
if imfile[-4:] == 'BRIK':
return brikget(imfile,unpackstr,shp)
if imfile[-3:] in ['mnc','MNC']:
return mincget(imfile,unpackstr,shp)
if imfile[-3:] == 'img':
return mghbget(imfile,unpackstr,shp)
if imfile[-6:] == 'bshort' or imfile[-6:] == 'bfloat':
if shp == None:
return mghbget(imfile,unpackstr=unpackstr,bytesperpixel=bytesperpixel,sliceinit=sliceinit)
else:
return mghbget(imfile,shp[0],shp[1],shp[2],unpackstr,bytesperpixel,sliceinit)
def CORget(infile):
"""
Reads a binary COR-nnn file (flattening file).
Usage: CORget(imfile)
Returns: 2D array of 16-bit ints
"""
d=braw(infile,N.Int8)
d.shape = (256,256)
d = N.where(N.greater_equal(d,0),d,256+d)
return d
def mincget(imfile,unpackstr=N.Int16,shp=None):
"""
Loads in a .MNC file.
Usage: mincget(imfile,unpackstr=N.Int16,shp=None) default shp = -1,20,64,64
"""
if shp == None:
shp = (-1,20,64,64)
os.system('mincextract -short -range 0 4095 -image_range 0 4095 ' +
imfile+' > minctemp.bshort')
try:
d = braw('minctemp.bshort',unpackstr)
except:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in mincget()"
print shp, d.shape
d.shape = shp
os.system('rm minctemp.bshort')
return d
def brikget(imfile,unpackstr=N.Int16,shp=None):
"""
Gets an AFNI BRIK file.
Usage: brikget(imfile,unpackstr=N.Int16,shp=None) default shp: (-1,48,61,51)
"""
if shp == None:
shp = (-1,48,61,51)
try:
file = open(imfile, "rb")
except:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in brikget()"
try:
header = imfile[0:-4]+'HEAD'
lines = open(header).readlines()
for i in range(len(lines)):
if string.find(lines[i],'DATASET_DIMENSIONS') <> -1:
dims = string.split(lines[i+2][0:string.find(lines[i+2],' 0')])
dims = map(string.atoi,dims)
if string.find(lines[i],'BRICK_FLOAT_FACS') <> -1:
count = string.atoi(string.split(lines[i+1])[2])
mults = []
for j in range(int(N.ceil(count/5.))):
mults += map(string.atof,string.split(lines[i+2+j]))
mults = N.array(mults)
dims.reverse()
shp = [-1]+dims
except IOError:
print "No header file. Continuing ..."
lines = None
print shp
print 'Using unpackstr:',unpackstr #,', bytesperpixel=',bytesperpixel
file = open(imfile, "rb")
bdata = file.read()
# the > forces big-endian (for or from Sun/SGI)
bdata = N.fromstring(bdata,unpackstr)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
if (littleEndian and os.uname()[0]<>'Linux') or (max(bdata)>1e30):
bdata = bdata.byteswapped()
try:
bdata.shape = shp
except:
print 'Incorrect shape ...',shp,len(bdata)
raise ValueError, 'Incorrect shape for file size'
if len(bdata) == 1:
bdata = bdata[0]
if N.sum(mults) == 0:
return bdata
try:
multshape = [1]*len(bdata.shape)
for i in range(len(bdata.shape)):
if len(mults) == bdata.shape[i]:
multshape[i] = len(mults)
break
mults.shape = multshape
return bdata*mults
except:
return bdata
def mghbget(imfile,numslices=-1,xsize=64,ysize=64,
unpackstr=N.Int16,bytesperpixel=2.0,sliceinit=0):
"""
Reads in a binary file, typically with a .bshort or .bfloat extension.
If so, the last 3 parameters are set appropriately. If not, the last 3
parameters default to reading .bshort files (2-byte integers in big-endian
binary format).
Usage: mghbget(imfile, numslices=-1, xsize=64, ysize=64,
unpackstr=N.Int16, bytesperpixel=2.0, sliceinit=0)
"""
try:
file = open(imfile, "rb")
except:
print "Couldn't find file: "+imfile
raise IOError, "Couldn't find file in bget()"
try:
header = imfile[0:-6]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
xsize = int(vals[0])
ysize = int(vals[1])
numslices = int(vals[2])
except:
print "No header file. Continuing ..."
suffix = imfile[-6:]
if suffix == 'bshort':
pass
elif suffix[-3:] == 'img':
pass
elif suffix == 'bfloat':
unpackstr = N.Float32
bytesperpixel = 4.0
sliceinit = 0.0
else:
print 'Not a bshort, bfloat or img file.'
print 'Using unpackstr:',unpackstr,', bytesperpixel=',bytesperpixel
imsize = xsize*ysize
file = open(imfile, "rb")
bdata = file.read()
numpixels = len(bdata) / bytesperpixel
if numpixels%1 != 0:
raise ValueError, "Incorrect file size in fmri.bget()"
else: # the > forces big-endian (for or from Sun/SGI)
bdata = N.fromstring(bdata,unpackstr)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# bdata = bdata.byteswapped()
if (littleEndian and os.uname()[0]<>'Linux') or (max(bdata)>1e30):
bdata = bdata.byteswapped()
if suffix[-3:] == 'img':
if numslices == -1:
numslices = len(bdata)/8200 # 8200=(64*64*2)+8 bytes per image
xsize = 64
ysize = 128
slices = N.zeros((numslices,xsize,ysize),N.Int)
for i in range(numslices):
istart = i*8 + i*xsize*ysize
iend = i*8 + (i+1)*xsize*ysize
print i, istart,iend
slices[i] = N.reshape(N.array(bdata[istart:iend]),(xsize,ysize))
else:
if numslices == 1:
slices = N.reshape(N.array(bdata),[xsize,ysize])
else:
slices = N.reshape(N.array(bdata),[numslices,xsize,ysize])
if len(slices) == 1:
slices = slices[0]
return slices
def braw(fname,btype,shp=None):
"""
Opens a binary file, unpacks it, and returns a flat array of the
type specified. Use Numeric types ... N.Float32, N.Int64, etc.
Usage: braw(fname,btype,shp=None)
Returns: flat array of floats, or ints (if btype=N.Int16)
"""
file = open(fname,'rb')
bdata = file.read()
bdata = N.fromstring(bdata,btype)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
# if littleEndian:
# bdata = bdata.byteswapped() # didn't used to need this with '>' above
if (littleEndian and os.uname()[0]<>'Linux') or (max(bdata)>1e30):
bdata = bdata.byteswapped()
if shp:
try:
bdata.shape = shp
return bdata
except:
pass
return N.array(bdata)
def glget(fname,btype):
"""
Load in a file containing pixels from glReadPixels dump.
Usage: glget(fname,btype)
Returns: array of 'btype elements with shape 'shape', suitable for im.ashow()
"""
d = braw(fname,btype)
d = d[8:]
f = open(fname,'rb')
shp = f.read(8)
f.close()
shp = N.fromstring(shp,N.Int)
shp[0],shp[1] = shp[1],shp[0]
try:
carray = N.reshape(d,shp)
return
except:
pass
try:
r = d[0::3]+0
g = d[1::3]+0
b = d[2::3]+0
r.shape = shp
g.shape = shp
b.shape = shp
carray = N.array([r,g,b])
except:
outstr = "glget: shape not correct for data of length "+str(len(d))
raise ValueError, outstr
return carray
def mget(fname,btype):
"""
Load in a file that was saved from matlab
Usage: mget(fname,btype)
"""
d = braw(fname,btype)
try:
header = fname[0:-6]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
xsize = int(vals[0])
ysize = int(vals[1])
numslices = int(vals[2])
print xsize,ysize,numslices, d.shape
except:
print "No header file. Continuing ..."
if numslices == 1:
d.shape = [ysize,xsize]
return N.transpose(d)*1
else:
d.shape = [numslices,ysize,xsize]
return N.transpose(d)*1
def mput(outarray,fname,writeheader=0,btype=N.Int16):
"""
Save a file for use in matlab.
"""
outarray = N.transpose(outarray)
outdata = N.ravel(outarray).astype(btype)
outdata = outdata.tostring()
outfile = open(fname,'wb')
outfile.write(outdata)
outfile.close()
if writeheader == 1:
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex]
except ValueError:
hdrname = fname
if len(outarray.shape) == 2:
hdr = [outarray.shape[1],outarray.shape[0], 1, 0]
else:
hdr = [outarray.shape[2],outarray.shape[1],outarray.shape[0], 0,'\n']
print hdrname+'.hdr'
outfile = open(hdrname+'.hdr','w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def bput(outarray,fname,writeheader=0,packtype=N.Int16,writetype='wb'):
"""
Writes the passed array to a binary output file, and then closes
the file. Default is overwrite the destination file.
Usage: bput (outarray,filename,writeheader=0,packtype=N.Int16,writetype='wb')
"""
suffix = fname[-6:]
if suffix == 'bshort':
packtype = N.Int16
elif suffix == 'bfloat':
packtype = N.Float32
else:
print 'Not a bshort or bfloat file. Using packtype=',packtype
outdata = N.ravel(outarray).astype(packtype)
littleEndian = ( struct.pack('i',1)==struct.pack('<i',1) )
if littleEndian and os.uname()[0]<>'Linux':
outdata = outdata.byteswapped()
outdata = outdata.tostring()
outfile = open(fname,writetype)
outfile.write(outdata)
outfile.close()
if writeheader == 1:
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex]
except ValueError:
hdrname = fname
if len(outarray.shape) == 2:
hdr = [outarray.shape[0],outarray.shape[1], 1, 0]
else:
hdr = [outarray.shape[1],outarray.shape[2],outarray.shape[0], 0,'\n']
print hdrname+'.hdr'
outfile = open(hdrname+'.hdr','w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def mrget(fname,datatype=N.Int16):
"""
Opens a binary .MR file and clips off the tail data portion of it, returning
the result as an array.
Usage: mrget(fname,datatype=N.Int16)
"""
d = braw(fname,datatype)
if len(d) > 512*512:
return N.reshape(d[-512*512:],(512,512))
elif len(d) > 256*256:
return N.reshape(d[-256*256:],(256,256))
elif len(d) > 128*128:
return N.reshape(d[-128*128:],(128,128))
elif len(d) > 64*64:
return N.reshape(d[-64*64:],(64,64))
else:
return N.reshape(d[-32*32:],(32,32))
def quickload(fname,linestocut=4):
"""
Quickly loads in a long text file, chopping off first n 'linestocut'.
Usage: quickload(fname,linestocut=4)
Returns: array filled with data in fname
"""
f = open(fname,'r')
d = f.readlines()
f.close()
print fname,'read in.'
d = d[linestocut:]
d = map(string.split,d)
print 'Done with string.split on lines.'
for i in range(len(d)):
d[i] = map(string.atoi,d[i])
print 'Conversion to ints done.'
return N.array(d)
def writedelimited (listoflists, delimiter, file, writetype='w'):
"""
Writes a list of lists in columns, separated by character(s) delimiter
to specified file. File-overwrite is the default.
Usage: writedelimited (listoflists,delimiter,filename,writetype='w')
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '------'
outfile.write(pstat.linedelimited(dashes,delimiter))
else:
outfile.write(pstat.linedelimited(row,delimiter))
outfile.write('\n')
outfile.close()
return None
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def writefc (listoflists,colsize,file,writetype='w'):
"""
Writes a list of lists to a file in columns of fixed size. File-overwrite
is the default.
Usage: writefc (listoflists,colsize,file,writetype='w')
Returns: None
"""
if type(listoflists) == N.ArrayType:
listoflists = listoflists.tolist()
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
n = [0]*len(list2print[0])
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*colsize
for j in range(len(n)):
dashes[j] = '-'*(colsize)
outfile.write(pstat.lineincols(dashes,colsize))
else:
outfile.write(pstat.lineincols(row,colsize))
outfile.write('\n')
outfile.close()
return None
def load(fname,lines_to_ignore=4,type='i'):
"""
Load in huge, flat, 2D text files. Can handle differing line-lengths AND
can strip #/% on UNIX (or with a better NT grep). Requires wc, grep, and
mmapfile.lib/.pyd. Type can be 'i', 'f' or 'd', for ints, floats or doubles,
respectively. Lines_to_ignore determines how many lines at the start of the
file to ignore (required for non-working grep).
Usage: load(fname,lines_to_ignore=4,type='i')
Returns: numpy array of specified type
"""
start = time.time() ## START TIMER
if type == 'i':
intype = int
elif type in ['f','d']:
intype = float
else:
raise ValueError, "type can be 'i', 'f' or 'd' in load()"
## STRIP OUT % AND # LINES
tmpname = tempfile.mktemp()
if sys.platform == 'win32':
# NT VERSION OF GREP DOESN'T DO THE STRIPPING ... SIGH
cmd = "grep.exe -v \'%\' "+fname+" > "+tmpname
print cmd
os.system(cmd)
else:
# UNIX SIDE SHOULD WORK
cmd = "cat "+fname+" | grep -v \'%\' |grep -v \'#\' > "+tmpname
print cmd
os.system(cmd)
## GET NUMBER OF ROWS, COLUMNS AND LINE-LENGTH, USING WC
wc = string.split(os.popen("wc "+tmpname).read())
numlines = int(wc[0]) - lines_to_ignore
tfp = open(tmpname)
if lines_to_ignore <> 0:
for i in range(lines_to_ignore):
junk = tfp.readline()
numcols = len(string.split(tfp.readline())) #int(float(wc[1])/numlines)
tfp.close()
## PREPARE INPUT SPACE
a = N.zeros((numlines*numcols), type)
block = 65536 # chunk to read, in bytes
data = mmapfile.mmapfile(tmpname, '', 0)
if lines_to_ignore <> 0 and sys.platform == 'win32':
for i in range(lines_to_ignore):
junk = data.readline()
i = 0
d = ' '
carryover = ''
while len(d) <> 0:
d = carryover + data.read(block)
cutindex = string.rfind(d,'\n')
carryover = d[cutindex+1:]
d = d[:cutindex+1]
d = map(intype,string.split(d))
a[i:i+len(d)] = d
i = i + len(d)
end = time.time()
print "%d sec" % round(end-start,2)
data.close()
os.remove(tmpname)
return N.reshape(a,[numlines,numcols])
def find_dirs(sourcedir):
"""Finds and returns all directories in sourcedir
Usage: find_dirs(sourcedir)
Returns: list of directory names (potentially empty)
"""
files = os.listdir(sourcedir)
dirs = []
for fname in files:
if os.path.isdir(os.path.join(sourcedir,fname)):
dirs.append(fname)
return dirs
# ALIASES ...
save = aput
def binget(fname,btype=None):
"""
Loads a binary file from disk. Assumes associated hdr file is in same
location. You can force an unpacking type, or else it tries to figure
it out from the filename (4th-to-last character). Hence, readable file
formats are ...
1bin=Int8, sbin=Int16, ibin=Int32, fbin=Float32, dbin=Float64, etc.
Usage: binget(fname,btype=None)
Returns: data in file fname of type btype
"""
file = open(fname,'rb')
bdata = file.read()
file.close()
# if none given, assume character preceeding 'bin' is the unpacktype
if not btype:
btype = fname[-4]
try:
bdata = N.fromstring(bdata,btype)
except:
raise ValueError, "Bad unpacking type."
# force the data on disk to be LittleEndian (for more efficient PC/Linux use)
if not N.LittleEndian:
bdata = bdata.byteswapped()
try:
header = fname[:-3]+'hdr'
vals = get(header,0) # '0' means no missing-file warning msg
print vals
if type(vals[0]) == ListType: # it's an extended header
xsize = int(vals[0][0])
ysize = int(vals[0][1])
numslices = int(vals[0][2])
else:
bdata.shape = vals
except:
print "No (or bad) header file. Returning unshaped array."
return N.array(bdata)
def binput(outarray,fname,packtype=None,writetype='wb'):
"""
Unravels outarray and writes the data to a file, always in LittleEndian
format, along with a header file containing the original data shape. Default
is overwrite the destination file. Tries to figure out packtype from
4th-to-last character in filename. Thus, the routine understands these
file formats ...
1bin=Int8, sbin=Int16, ibin=Int32, fbin=Float32, dbin=Float64, etc.
Usage: binput(outarray,filename,packtype=None,writetype='wb')
"""
if not packtype:
packtype = fname[-4]
# a speck of error checking
if packtype == N.Int16 and outarray.typecode() == 'f':
# check to see if there's data loss
if max(N.ravel(outarray)) > 32767 or min(N.ravel(outarray))<-32768:
print "*** WARNING: CONVERTING FLOAT DATA TO OUT-OF RANGE INT16 DATA"
outdata = N.ravel(outarray).astype(packtype)
# force the data on disk to be LittleEndian (for more efficient PC/Linux use)
if not N.LittleEndian:
outdata = outdata.byteswapped()
outdata = outdata.tostring()
outfile = open(fname,writetype)
outfile.write(outdata)
outfile.close()
# Now, write the header file
try:
suffixindex = string.rfind(fname,'.')
hdrname = fname[0:suffixindex+2]+'hdr' # include .s or .f or .1 or whatever
except ValueError:
hdrname = fname
hdr = outarray.shape
print hdrname
outfile = open(hdrname,'w')
outfile.write(pstat.list2string(hdr))
outfile.close()
return None
def array2afni(d,brikprefix,voltype=None,TR=2.0,sliceorder='seqplus',geomparent=None,view=None):
"""
Converts an array 'd' to an AFNI BRIK/HEAD combo via putbin and to3d. Tries to
guess the AFNI volume type
voltype = {'-anat','-epan','-fim'}
geomparent = filename of the afni BRIK file with the same geometry
view = {'tlrc', 'acpc' or 'orig'}
Usage: array2afni(d,brikprefix,voltype=None,TR=2.0,
sliceorder='seqplus',geomparent=None,view=None)
Returns: None
"""
# converts Numeric typecode()s into appropriate strings for to3d command line
typecodemapping = {'c':'b', # character
'b':'b', # UnsignedInt8
'f':'f', # Float0, Float8, Float16, Float32
'd':'f', # Float64
'1':'b', # Int0, Int8
's':'', # Int16
'i':'i', # Int32
'l':'i'} # Int
# Verify that the data is proper size (3- or 4-D)
if len(d.shape) not in [3,4]:
raise ValueError, "A 3D or 4D array is required for array2afni() ... %s" %d.shape
# Save out the array to a binary file, homebrew style
if d.typecode() == N.Float64:
outcode = 'f'
else:
outcode = d.typecode()
tmpoutname = 'afnitmp.%sbin' % outcode
binput(d.astype(outcode),tmpoutname)
if not voltype:
if len(d.shape) == 3: # either anatomy or functional
if d.typecode() in ['s','i','l']: # if floats, assume functional
voltype = '-anat'
else:
voltype = '-fim'
else: # 4D dataset, must be anatomical timeseries (epan)
voltype = '-anat'
if len(d.shape) == 3: # either anatomy or functional
timepts = 1
slices = d.shape[0]
timestr = ''
elif len(d.shape) == 4:
timepts = d.shape[0]
slices = d.shape[1]
timestr = '-time:zt %d %d %0.3f %s ' % (slices,timepts,TR,sliceorder)
cmd = 'to3d %s -prefix %s -session . ' % (voltype, brikprefix)
if view:
cmd += '-view %s ' % view
if geomparent:
cmd += '-geomparent %s ' % geomparent
cmd += timestr
cmd += '3D%s:0:0:%d:%d:%d:%s' % (typecodemapping[d.typecode()],d.shape[-1],d.shape[-2],slices*timepts,tmpoutname)
print cmd
os.system(cmd)
os.remove(tmpoutname)
os.remove(tmpoutname[:-3]+'hdr')
| Python |
# Copyright (c) 1999-2000 Gary Strangman; All Rights Reserved.
#
# This software is distributable under the terms of the GNU
# General Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise
# using this module constitutes acceptance of the terms of this License.
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
pstat.py module
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: Jun 29, 2001 ###########
#################################################
This module provides some useful list and array manipulation routines
modeled after those found in the |Stat package by Gary Perlman, plus a
number of other useful list/file manipulation functions. The list-based
functions include:
abut (source,*args)
simpleabut (source, addon)
colex (listoflists,cnums)
collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
dm (listoflists,criterion)
flat (l)
linexand (listoflists,columnlist,valuelist)
linexor (listoflists,columnlist,valuelist)
linedelimited (inlist,delimiter)
lineincols (inlist,colsize)
lineincustcols (inlist,colsizes)
list2string (inlist)
makelol(inlist)
makestr(x)
printcc (lst,extra=2)
printincols (listoflists,colsize)
pl (listoflists)
printl(listoflists)
replace (lst,oldval,newval)
recode (inlist,listmap,cols='all')
remap (listoflists,criterion)
roundlist (inlist,num_digits_to_round_floats_to)
sortby(listoflists,sortcols)
unique (inlist)
duplicates(inlist)
writedelimited (listoflists, delimiter, file, writetype='w')
Some of these functions have alternate versions which are defined only if
Numeric (NumPy) can be imported. These functions are generally named as
above, with an 'a' prefix.
aabut (source, *args)
acolex (a,indices,axis=1)
acollapse (a,keepcols,collapsecols,sterr=0,ns=0)
adm (a,criterion)
alinexand (a,columnlist,valuelist)
alinexor (a,columnlist,valuelist)
areplace (a,oldval,newval)
arecode (a,listmap,col='all')
arowcompare (row1, row2)
arowsame (row1, row2)
asortrows(a,axis=0)
aunique(inarray)
aduplicates(inarray)
Currently, the code is all but completely un-optimized. In many cases, the
array versions of functions amount simply to aliases to built-in array
functions/methods. Their inclusion here is for function name consistency.
"""
## CHANGE LOG:
## ==========
## 01-11-15 ... changed list2string() to accept a delimiter
## 01-06-29 ... converted exec()'s to eval()'s to make compatible with Py2.1
## 01-05-31 ... added duplicates() and aduplicates() functions
## 00-12-28 ... license made GPL, docstring and import requirements
## 99-11-01 ... changed version to 0.3
## 99-08-30 ... removed get, getstrings, put, aget, aput (into io.py)
## 03/27/99 ... added areplace function, made replace fcn recursive
## 12/31/98 ... added writefc function for ouput to fixed column sizes
## 12/07/98 ... fixed import problem (failed on collapse() fcn)
## added __version__ variable (now 0.2)
## 12/05/98 ... updated doc-strings
## added features to collapse() function
## added flat() function for lists
## fixed a broken asortrows()
## 11/16/98 ... fixed minor bug in aput for 1D arrays
##
## 11/08/98 ... fixed aput to output large arrays correctly
import stats # required 3rd party module
import string, copy
from types import *
__version__ = 0.4
###=========================== LIST FUNCTIONS ==========================
###
### Here are the list functions, DEFINED FOR ALL SYSTEMS.
### Array functions (for NumPy-enabled computers) appear below.
###
def abut (source,*args):
"""
Like the |Stat abut command. It concatenates two lists side-by-side
and returns the result. '2D' lists are also accomodated for either argument
(source or addon). CAUTION: If one list is shorter, it will be repeated
until it is as long as the longest list. If this behavior is not desired,
use pstat.simpleabut().
Usage: abut(source, args) where args=any # of lists
Returns: a list of lists as long as the LONGEST list past, source on the
'left', lists in <args> attached consecutively on the 'right'
"""
if type(source) not in [ListType,TupleType]:
source = [source]
for addon in args:
if type(addon) not in [ListType,TupleType]:
addon = [addon]
if len(addon) < len(source): # is source list longer?
if len(source) % len(addon) == 0: # are they integer multiples?
repeats = len(source)/len(addon) # repeat addon n times
origadd = copy.deepcopy(addon)
for i in range(repeats-1):
addon = addon + origadd
else:
repeats = len(source)/len(addon)+1 # repeat addon x times,
origadd = copy.deepcopy(addon) # x is NOT an integer
for i in range(repeats-1):
addon = addon + origadd
addon = addon[0:len(source)]
elif len(source) < len(addon): # is addon list longer?
if len(addon) % len(source) == 0: # are they integer multiples?
repeats = len(addon)/len(source) # repeat source n times
origsour = copy.deepcopy(source)
for i in range(repeats-1):
source = source + origsour
else:
repeats = len(addon)/len(source)+1 # repeat source x times,
origsour = copy.deepcopy(source) # x is NOT an integer
for i in range(repeats-1):
source = source + origsour
source = source[0:len(addon)]
source = simpleabut(source,addon)
return source
def simpleabut (source, addon):
"""
Concatenates two lists as columns and returns the result. '2D' lists
are also accomodated for either argument (source or addon). This DOES NOT
repeat either list to make the 2 lists of equal length. Beware of list pairs
with different lengths ... the resulting list will be the length of the
FIRST list passed.
Usage: simpleabut(source,addon) where source, addon=list (or list-of-lists)
Returns: a list of lists as long as source, with source on the 'left' and
addon on the 'right'
"""
if type(source) not in [ListType,TupleType]:
source = [source]
if type(addon) not in [ListType,TupleType]:
addon = [addon]
minlen = min(len(source),len(addon))
list = copy.deepcopy(source) # start abut process
if type(source[0]) not in [ListType,TupleType]:
if type(addon[0]) not in [ListType,TupleType]:
for i in range(minlen):
list[i] = [source[i]] + [addon[i]] # source/addon = column
else:
for i in range(minlen):
list[i] = [source[i]] + addon[i] # addon=list-of-lists
else:
if type(addon[0]) not in [ListType,TupleType]:
for i in range(minlen):
list[i] = source[i] + [addon[i]] # source=list-of-lists
else:
for i in range(minlen):
list[i] = source[i] + addon[i] # source/addon = list-of-lists
source = list
return source
def colex (listoflists,cnums):
"""
Extracts from listoflists the columns specified in the list 'cnums'
(cnums can be an integer, a sequence of integers, or a string-expression that
corresponds to a slice operation on the variable x ... e.g., 'x[3:]' will colex
columns 3 onward from the listoflists).
Usage: colex (listoflists,cnums)
Returns: a list-of-lists corresponding to the columns from listoflists
specified by cnums, in the order the column numbers appear in cnums
"""
global index
column = 0
if type(cnums) in [ListType,TupleType]: # if multiple columns to get
index = cnums[0]
column = map(lambda x: x[index], listoflists)
for col in cnums[1:]:
index = col
column = abut(column,map(lambda x: x[index], listoflists))
elif type(cnums) == StringType: # if an 'x[3:]' type expr.
evalstring = 'map(lambda x: x'+cnums+', listoflists)'
column = eval(evalstring)
else: # else it's just 1 col to get
index = cnums
column = map(lambda x: x[index], listoflists)
return column
def collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
"""
Averages data in collapsecol, keeping all unique items in keepcols
(using unique, which keeps unique LISTS of column numbers), retaining the
unique sets of values in keepcols, the mean for each. Setting fcn1
and/or fcn2 to point to a function rather than None (e.g., stats.sterr, len)
will append those results (e.g., the sterr, N) after each calculated mean.
cfcn is the collapse function to apply (defaults to mean, defined here in the
pstat module to avoid circular imports with stats.py, but harmonicmean or
others could be passed).
Usage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
Returns: a list of lists with all unique permutations of entries appearing in
columns ("conditions") specified by keepcols, abutted with the result of
cfcn (if cfcn=None, defaults to the mean) of each column specified by
collapsecols.
"""
def collmean (inlist):
s = 0
for item in inlist:
s = s + item
return s/float(len(inlist))
if type(keepcols) not in [ListType,TupleType]:
keepcols = [keepcols]
if type(collapsecols) not in [ListType,TupleType]:
collapsecols = [collapsecols]
if cfcn == None:
cfcn = collmean
if keepcols == []:
means = [0]*len(collapsecols)
for i in range(len(collapsecols)):
avgcol = colex(listoflists,collapsecols[i])
means[i] = cfcn(avgcol)
if fcn1:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
means[i] = [means[i], test]
if fcn2:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
try:
means[i] = means[i] + [len(avgcol)]
except TypeError:
means[i] = [means[i],len(avgcol)]
return means
else:
values = colex(listoflists,keepcols)
uniques = unique(values)
uniques.sort()
newlist = []
if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols]
for item in uniques:
if type(item) not in [ListType,TupleType]: item =[item]
tmprows = linexand(listoflists,keepcols,item)
for col in collapsecols:
avgcol = colex(tmprows,col)
item.append(cfcn(avgcol))
if fcn1 <> None:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
item.append(test)
if fcn2 <> None:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
item.append(test)
newlist.append(item)
return newlist
def dm (listoflists,criterion):
"""
Returns rows from the passed list of lists that meet the criteria in
the passed criterion expression (a string as a function of x; e.g., 'x[3]>=9'
will return all rows where the 4th column>=9 and "x[2]=='N'" will return rows
with column 2 equal to the string 'N').
Usage: dm (listoflists, criterion)
Returns: rows from listoflists that meet the specified criterion.
"""
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def flat(l):
"""
Returns the flattened version of a '2D' list. List-correlate to the a.flat()
method of NumPy arrays.
Usage: flat(l)
"""
newl = []
for i in range(len(l)):
for j in range(len(l[i])):
newl.append(l[i][j])
return newl
def linexand (listoflists,columnlist,valuelist):
"""
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for EVERY pair of values (columnlist[i],valuelists[i]).
len(columnlist) must equal len(valuelist).
Usage: linexand (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ALL i
"""
if type(columnlist) not in [ListType,TupleType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType]:
valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
criterion = criterion[0:-3] # remove the "and" after the last crit
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def linexor (listoflists,columnlist,valuelist):
"""
Returns the rows of a list of lists where col (from columnlist) = val
(from valuelist) for ANY pair of values (colunmlist[i],valuelist[i[).
One value is required for each column in columnlist. If only one value
exists for columnlist but multiple values appear in valuelist, the
valuelist values are all assumed to pertain to the same column.
Usage: linexor (listoflists,columnlist,valuelist)
Returns: the rows of listoflists where columnlist[i]=valuelist[i] for ANY i
"""
if type(columnlist) not in [ListType,TupleType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType]:
valuelist = [valuelist]
criterion = ''
if len(columnlist) == 1 and len(valuelist) > 1:
columnlist = columnlist*len(valuelist)
for i in range(len(columnlist)): # build an exec string
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'
criterion = criterion[0:-2] # remove the "or" after the last crit
function = 'filter(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def linedelimited (inlist,delimiter):
"""
Returns a string composed of elements in inlist, with each element
separated by 'delimiter.' Used by function writedelimited. Use '\t'
for tab-delimiting.
Usage: linedelimited (inlist,delimiter)
"""
outstr = ''
for item in inlist:
if type(item) <> StringType:
item = str(item)
outstr = outstr + item + delimiter
outstr = outstr[0:-1]
return outstr
def lineincols (inlist,colsize):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in columns of (fixed) colsize.
Usage: lineincols (inlist,colsize) where colsize is an integer
"""
outstr = ''
for item in inlist:
if type(item) <> StringType:
item = str(item)
size = len(item)
if size <= colsize:
for i in range(colsize-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsize+1]
return outstr
def lineincustcols (inlist,colsizes):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
outstr = ''
for i in range(len(inlist)):
if type(inlist[i]) <> StringType:
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr
def list2string (inlist,delimit=' '):
"""
Converts a 1D list to a single long string for file output, using
the string.join function.
Usage: list2string (inlist,delimit=' ')
Returns: the string created from inlist
"""
stringlist = map(makestr,inlist)
return string.join(stringlist,delimit)
def makelol(inlist):
"""
Converts a 1D list to a 2D list (i.e., a list-of-lists). Useful when you
want to use put() to write a 1D list one item per line in the file.
Usage: makelol(inlist)
Returns: if l = [1,2,'hi'] then returns [[1],[2],['hi']] etc.
"""
x = []
for item in inlist:
x.append([item])
return x
def makestr (x):
if type(x) <> StringType:
x = str(x)
return x
def printcc (lst,extra=2):
"""
Prints a list of lists in columns, customized by the max size of items
within the columns (max size of items in col, plus 'extra' number of spaces).
Use 'dashes' or '\\n' in the list-of-lists to print dashes or blank lines,
respectively.
Usage: printcc (lst,extra=2)
Returns: None
"""
if type(lst[0]) not in [ListType,TupleType]:
lst = [lst]
rowstokill = []
list2print = copy.deepcopy(lst)
for i in range(len(lst)):
if lst[i] == ['\n'] or lst[i]=='\n' or lst[i]=='dashes' or lst[i]=='' or lst[i]==['']:
rowstokill = rowstokill + [i]
rowstokill.reverse() # delete blank rows from the end
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = colex(list2print,col)
items = map(makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in lst:
if row == ['\n'] or row == '\n' or row == '' or row == ['']:
print
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
print lineincustcols(dashes,maxsize)
else:
print lineincustcols(row,maxsize)
return None
def printincols (listoflists,colsize):
"""
Prints a list of lists in columns of (fixed) colsize width, where
colsize is an integer.
Usage: printincols (listoflists,colsize)
Returns: None
"""
for row in listoflists:
print lineincols(row,colsize)
return None
def pl (listoflists):
"""
Prints a list of lists, 1 list (row) at a time.
Usage: pl(listoflists)
Returns: None
"""
for row in listoflists:
if row[-1] == '\n':
print row,
else:
print row
return None
def printl(listoflists):
"""Alias for pl."""
pl(listoflists)
return
def replace (inlst,oldval,newval):
"""
Replaces all occurrences of 'oldval' with 'newval', recursively.
Usage: replace (inlst,oldval,newval)
"""
lst = inlst*1
for i in range(len(lst)):
if type(lst[i]) not in [ListType,TupleType]:
if lst[i]==oldval: lst[i]=newval
else:
lst[i] = replace(lst[i],oldval,newval)
return lst
def recode (inlist,listmap,cols=None):
"""
Changes the values in a list to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers. cols defaults
to None (meaning all columns are recoded).
Usage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list
Returns: inlist with the appropriate values replaced with new ones
"""
lst = copy.deepcopy(inlist)
if cols != None:
if type(cols) not in [ListType,TupleType]:
cols = [cols]
for col in cols:
for row in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
else:
for row in range(len(lst)):
for col in range(len(lst)):
try:
idx = colex(listmap,0).index(lst[row][col])
lst[row][col] = listmap[idx][1]
except ValueError:
pass
return lst
def remap (listoflists,criterion):
"""
Remaps values in a given column of a 2D list (listoflists). This requires
a criterion as a function of 'x' so that the result of the following is
returned ... map(lambda x: 'criterion',listoflists).
Usage: remap(listoflists,criterion) criterion=string
Returns: remapped version of listoflists
"""
function = 'map(lambda x: '+criterion+',listoflists)'
lines = eval(function)
return lines
def roundlist (inlist,digits):
"""
Goes through each element in a 1D or 2D inlist, and applies the following
function to all elements of FloatType ... round(element,digits).
Usage: roundlist(inlist,digits)
Returns: list with rounded floats
"""
if type(inlist[0]) in [IntType, FloatType]:
inlist = [inlist]
l = inlist*1
for i in range(len(l)):
for j in range(len(l[i])):
if type(l[i][j])==FloatType:
l[i][j] = round(l[i][j],digits)
return l
def sortby(listoflists,sortcols):
"""
Sorts a list of lists on the column(s) specified in the sequence
sortcols.
Usage: sortby(listoflists,sortcols)
Returns: sorted list, unchanged column ordering
"""
newlist = abut(colex(listoflists,sortcols),listoflists)
newlist.sort()
try:
numcols = len(sortcols)
except TypeError:
numcols = 1
crit = '[' + str(numcols) + ':]'
newlist = colex(newlist,crit)
return newlist
def unique (inlist):
"""
Returns all unique items in the passed list. If the a list-of-lists
is passed, unique LISTS are found (i.e., items in the first dimension are
compared).
Usage: unique (inlist)
Returns: the unique elements (or rows) in inlist
"""
uniques = []
for item in inlist:
if item not in uniques:
uniques.append(item)
return uniques
def duplicates(inlist):
"""
Returns duplicate items in the FIRST dimension of the passed list.
Usage: duplicates (inlist)
"""
dups = []
for i in range(len(inlist)):
if inlist[i] in inlist[i+1:]:
dups.append(inlist[i])
return dups
def nonrepeats(inlist):
"""
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
"""
nonrepeats = []
for i in range(len(inlist)):
if inlist.count(inlist[i]) == 1:
nonrepeats.append(inlist[i])
return nonrepeats
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
#=================== PSTAT ARRAY FUNCTIONS =====================
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import Numeric
N = Numeric
def aabut (source, *args):
"""
Like the |Stat abut command. It concatenates two arrays column-wise
and returns the result. CAUTION: If one array is shorter, it will be
repeated until it is as long as the other.
Usage: aabut (source, args) where args=any # of arrays
Returns: an array as long as the LONGEST array past, source appearing on the
'left', arrays in <args> attached on the 'right'.
"""
if len(source.shape)==1:
width = 1
source = N.resize(source,[source.shape[0],width])
else:
width = source.shape[1]
for addon in args:
if len(addon.shape)==1:
width = 1
addon = N.resize(addon,[source.shape[0],width])
else:
width = source.shape[1]
if len(addon) < len(source):
addon = N.resize(addon,[source.shape[0],addon.shape[1]])
elif len(source) < len(addon):
source = N.resize(source,[addon.shape[0],source.shape[1]])
source = N.concatenate((source,addon),1)
return source
def acolex (a,indices,axis=1):
"""
Extracts specified indices (a list) from passed array, along passed
axis (column extraction is default). BEWARE: A 1D array is presumed to be a
column-array (and that the whole array will be returned as a column).
Usage: acolex (a,indices,axis=1)
Returns: the columns of a specified by indices
"""
if type(indices) not in [ListType,TupleType,N.ArrayType]:
indices = [indices]
if len(N.shape(a)) == 1:
cols = N.resize(a,[a.shape[0],1])
else:
cols = N.take(a,indices,axis)
return cols
def acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None):
"""
Averages data in collapsecol, keeping all unique items in keepcols
(using unique, which keeps unique LISTS of column numbers), retaining
the unique sets of values in keepcols, the mean for each. If stderror or
N of the mean are desired, set either or both parameters to 1.
Usage: acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None)
Returns: unique 'conditions' specified by the contents of columns specified
by keepcols, abutted with the mean(s) of column(s) specified by
collapsecols
"""
def acollmean (inarray):
return N.sum(N.ravel(inarray))
if cfcn == None:
cfcn = acollmean
if keepcols == []:
avgcol = acolex(a,collapsecols)
means = N.sum(avgcol)/float(len(avgcol))
if fcn1<>None:
try:
test = fcn1(avgcol)
except:
test = N.array(['N/A']*len(means))
means = aabut(means,test)
if fcn2<>None:
try:
test = fcn2(avgcol)
except:
test = N.array(['N/A']*len(means))
means = aabut(means,test)
return means
else:
if type(keepcols) not in [ListType,TupleType,N.ArrayType]:
keepcols = [keepcols]
values = colex(a,keepcols) # so that "item" can be appended (below)
uniques = unique(values) # get a LIST, so .sort keeps rows intact
uniques.sort()
newlist = []
for item in uniques:
if type(item) not in [ListType,TupleType,N.ArrayType]:
item =[item]
tmprows = alinexand(a,keepcols,item)
for col in collapsecols:
avgcol = acolex(tmprows,col)
item.append(acollmean(avgcol))
if fcn1<>None:
try:
test = fcn1(avgcol)
except:
test = 'N/A'
item.append(test)
if fcn2<>None:
try:
test = fcn2(avgcol)
except:
test = 'N/A'
item.append(test)
newlist.append(item)
try:
new_a = N.array(newlist)
except TypeError:
new_a = N.array(newlist,'O')
return new_a
def adm (a,criterion):
"""
Returns rows from the passed list of lists that meet the criteria in
the passed criterion expression (a string as a function of x).
Usage: adm (a,criterion) where criterion is like 'x[2]==37'
"""
function = 'filter(lambda x: '+criterion+',a)'
lines = eval(function)
try:
lines = N.array(lines)
except:
lines = N.array(lines,'O')
return lines
def isstring(x):
if type(x)==StringType:
return 1
else:
return 0
def alinexand (a,columnlist,valuelist):
"""
Returns the rows of an array where col (from columnlist) = val
(from valuelist). One value is required for each column in columnlist.
Usage: alinexand (a,columnlist,valuelist)
Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i
"""
if type(columnlist) not in [ListType,TupleType,N.ArrayType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType,N.ArrayType]:
valuelist = [valuelist]
criterion = ''
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' and'
criterion = criterion[0:-3] # remove the "and" after the last crit
return adm(a,criterion)
def alinexor (a,columnlist,valuelist):
"""
Returns the rows of an array where col (from columnlist) = val (from
valuelist). One value is required for each column in columnlist.
The exception is if either columnlist or valuelist has only 1 value,
in which case that item will be expanded to match the length of the
other list.
Usage: alinexor (a,columnlist,valuelist)
Returns: the rows of a where columnlist[i]=valuelist[i] for ANY i
"""
if type(columnlist) not in [ListType,TupleType,N.ArrayType]:
columnlist = [columnlist]
if type(valuelist) not in [ListType,TupleType,N.ArrayType]:
valuelist = [valuelist]
criterion = ''
if len(columnlist) == 1 and len(valuelist) > 1:
columnlist = columnlist*len(valuelist)
elif len(valuelist) == 1 and len(columnlist) > 1:
valuelist = valuelist*len(columnlist)
for i in range(len(columnlist)):
if type(valuelist[i])==StringType:
critval = '\'' + valuelist[i] + '\''
else:
critval = str(valuelist[i])
criterion = criterion + ' x['+str(columnlist[i])+']=='+critval+' or'
criterion = criterion[0:-2] # remove the "or" after the last crit
return adm(a,criterion)
def areplace (a,oldval,newval):
"""
Replaces all occurrences of oldval with newval in array a.
Usage: areplace(a,oldval,newval)
"""
newa = N.not_equal(a,oldval)*a
return newa+N.equal(a,oldval)*newval
def arecode (a,listmap,col='all'):
"""
Remaps the values in an array to a new set of values (useful when
you need to recode data from (e.g.) strings to numbers as most stats
packages require. Can work on SINGLE columns, or 'all' columns at once.
Usage: arecode (a,listmap,col='all')
Returns: a version of array a where listmap[i][0] = (instead) listmap[i][1]
"""
ashape = a.shape
if col == 'all':
work = a.flat
else:
work = acolex(a,col)
work = work.flat
for pair in listmap:
if type(pair[1]) == StringType or work.typecode()=='O' or a.typecode()=='O':
work = N.array(work,'O')
a = N.array(a,'O')
for i in range(len(work)):
if work[i]==pair[0]:
work[i] = pair[1]
if col == 'all':
return N.reshape(work,ashape)
else:
return N.concatenate([a[:,0:col],work[:,N.NewAxis],a[:,col+1:]],1)
else: # must be a non-Object type array and replacement
work = N.where(N.equal(work,pair[0]),pair[1],work)
return N.concatenate([a[:,0:col],work[:,N.NewAxis],a[:,col+1:]],1)
def arowcompare(row1, row2):
"""
Compares two rows from an array, regardless of whether it is an
array of numbers or of python objects (which requires the cmp function).
Usage: arowcompare(row1,row2)
Returns: an array of equal length containing 1s where the two rows had
identical elements and 0 otherwise
"""
if row1.typecode()=='O' or row2.typecode=='O':
cmpvect = N.logical_not(abs(N.array(map(cmp,row1,row2)))) # cmp fcn gives -1,0,1
else:
cmpvect = N.equal(row1,row2)
return cmpvect
def arowsame(row1, row2):
"""
Compares two rows from an array, regardless of whether it is an
array of numbers or of python objects (which requires the cmp function).
Usage: arowsame(row1,row2)
Returns: 1 if the two rows are identical, 0 otherwise.
"""
cmpval = N.alltrue(arowcompare(row1,row2))
return cmpval
def asortrows(a,axis=0):
"""
Sorts an array "by rows". This differs from the Numeric.sort() function,
which sorts elements WITHIN the given axis. Instead, this function keeps
the elements along the given axis intact, but shifts them 'up or down'
relative to one another.
Usage: asortrows(a,axis=0)
Returns: sorted version of a
"""
if axis != 0:
a = N.swapaxes(a, axis, 0)
l = a.tolist()
l.sort() # or l.sort(_sort)
y = N.array(l)
if axis != 0:
y = N.swapaxes(y, axis, 0)
return y
def aunique(inarray):
"""
Returns unique items in the FIRST dimension of the passed array. Only
works on arrays NOT including string items.
Usage: aunique (inarray)
"""
uniques = N.array([inarray[0]])
if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY
for item in inarray[1:]:
if N.add.reduce(N.equal(uniques,item).flat) == 0:
try:
uniques = N.concatenate([uniques,N.array[N.NewAxis,:]])
except TypeError:
uniques = N.concatenate([uniques,N.array([item])])
else: # IT MUST BE A 2+D ARRAY
if inarray.typecode() != 'O': # not an Object array
for item in inarray[1:]:
if not N.sum(N.alltrue(N.equal(uniques,item),1)):
try:
uniques = N.concatenate( [uniques,item[N.NewAxis,:]] )
except TypeError: # the item to add isn't a list
uniques = N.concatenate([uniques,N.array([item])])
else:
pass # this item is already in the uniques array
else: # must be an Object array, alltrue/equal functions don't work
for item in inarray[1:]:
newflag = 1
for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=>
test = N.sum(abs(N.array(map(cmp,item,unq))))
if test == 0: # if item identical to any 1 row in uniques
newflag = 0 # then not a novel item to add
break
if newflag == 1:
try:
uniques = N.concatenate( [uniques,item[N.NewAxis,:]] )
except TypeError: # the item to add isn't a list
uniques = N.concatenate([uniques,N.array([item])])
return uniques
def aduplicates(inarray):
"""
Returns duplicate items in the FIRST dimension of the passed array. Only
works on arrays NOT including string items.
Usage: aunique (inarray)
"""
inarray = N.array(inarray)
if len(inarray.shape) == 1: # IF IT'S A 1D ARRAY
dups = []
inarray = inarray.tolist()
for i in range(len(inarray)):
if inarray[i] in inarray[i+1:]:
dups.append(inarray[i])
dups = aunique(dups)
else: # IT MUST BE A 2+D ARRAY
dups = []
aslist = inarray.tolist()
for i in range(len(aslist)):
if aslist[i] in aslist[i+1:]:
dups.append(aslist[i])
dups = unique(dups)
dups = N.array(dups)
return dups
except ImportError: # IF NUMERIC ISN'T AVAILABLE, SKIP ALL arrayfuncs
pass
| Python |
# $Id: __init__.py 1945 2006-03-05 01:06:37Z cpbotha $
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""stats_kit package driver file.
Inserts the following modules in sys.modules: stats.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import sys
# you have to define this
VERSION = 'Strangman - May 10, 2002'
def init(theModuleManager, pre_import=True):
# import the main module itself
global stats
import stats
# if we don't do this, the module will be in sys.modules as
# module_kits.stats_kit.stats because it's not in the sys.path.
# iow. if a module is in sys.path, "import module" will put 'module' in
# sys.modules. if a module isn't, "import module" will put
# 'relative.path.to.module' in sys.path.
sys.modules['stats'] = stats
theModuleManager.setProgress(100, 'Initialising stats_kit: complete.')
def refresh():
reload(stats)
| Python |
# Copyright (c) 1999-2002 Gary Strangman; All Rights Reserved.
#
# This software is distributable under the terms of the GNU
# General Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise
# using this module constitutes acceptance of the terms of this License.
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: May 10, 2002 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
## CHANGE LOG:
## ===========
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
## 00-12-28 ... removed aanova() to separate module, fixed licensing to
## match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
__version__ = 0.6
############# DISPATCH CODE ##############
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in self._dispatch.keys():
raise ValueError, "can't have two dispatches on "+str(t)
self._dispatch[t] = func
self._types = self._dispatch.keys()
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError, "don't know how to dispatch %s arguments" % type(arg1)
return apply(self._dispatch[type(arg1)], (arg1,) + args, kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
##########################################################################
### Define these regardless
####################################
####### CENTRAL TENDENCY #########
####################################
def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian (inlist,numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist,numbins) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore (inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) /2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore=0
return maxfreq, mode
####################################
############ MOMENTS #############
####################################
def lmoment(inlist,moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist,3)/pow(moment(inlist,2),1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist,4)/pow(moment(inlist,2),2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist),max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile (inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print "\nDividing percent>1 by 100 in lscoreatpercentile().\n"
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits <> None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print '\nPoints outside given histogram range =',extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def lrelfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check <> 1:
raise ValueError, 'Problem in obrientransform.'
else:
return nargs
def lsamplevar (inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev (inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lvar (inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev (inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem (inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs (inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
def ltrimboth (l,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1 (l,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
def lpaired(x,y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print '\nIndependent or related samples, or correlation (i,r,c): ',
samples = raw_input()
if samples in ['i','I','r','R']:
print '\nComparing variances ...',
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print vartype
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
print '\nIndependent samples t-test: ', round(t,4),round(p,4)
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
else:
u,p = mannwhitneyu(x,y)
print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print '\nRelated samples t-test: ', round(t,4),round(p,4)
else:
t,p = ranksums(x,y)
print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ",round(r,4),round(p,4)
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print '\nAssuming x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,4),round(p,4)
print '\n\n'
return None
def lpearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in pearsonr. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in spearmanr. Aborting.'
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) <> len(y):
raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'
data = pstat.abut(x,y)
categories = pstat.unique(x)
if len(categories) <> 2:
raise ValueError, "Exactly 2 categories required for pointbiserialr()."
else: # there are 2 categories, continue
codemap = pstat.abut(categories,range(2))
recoded = pstat.recode(data,codemap,0)
x = pstat.linexand(data,0,categories[0])
y = pstat.linexand(data,0,categories[1])
xmean = mean(pstat.colex(x,1))
ymean = mean(pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) <> len(y):
raise ValueError, 'Input values not paired in linregress. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,float(df)/(df+t*t))
if printit <> 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,min(a),max(a),
statname,t,prob)
return t,prob
def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t,prob
def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a)<>len(b):
raise ValueError, 'Unequal length lists in ttest_rel.'
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,min(a),max(a),
name2,n,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob
def lchisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
def lmannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError, 'All numbers are identical in lmannwhitneyu'
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def lwilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) <> len(y):
raise ValueError, 'Unequal N in wilcoxont. Aborting.'
d=[]
for i in range(len(x)):
diff = x[i] - y[i]
if diff <> 0:
d.append(diff)
count = len(d)
absd = map(abs,d)
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = map(len,args)
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError, 'All numbers are identical in lkruskalwallish'
h = h / float(T)
return h, chisqprob(h,df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'
n = len(args[0])
data = apply(pstat.abut,tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a,b,x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print 'a or b too big, or ITMAX too small in Betacf.'
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a,b,x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x<0.0 or x>1.0):
raise ValueError, 'Bad x in lbetai'
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0]*a
vars = [0]*a
ns = [0]*a
alldata = []
tmp = map(N.array,lists)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,lists)
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def lF_value (ER,EF,dfnum,dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l,cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum (inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult (list1,list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) <> len(list2):
raise ValueError, "Lists not equal length in summult."
s = 0
for item1,item2 in pstat.abut(list1,list2):
s = s + item1*item2
return s
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] <> svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)<>StringType or len(fname)==0:
print
print statname
print
pstat.printcc(lofl)
print
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix
print
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
def lfindwithin (data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1,numfact):
examplelevel = pstat.unique(pstat.colex(data,col))[0]
rows = pstat.linexand(data,col,examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows,0))
allsubjs = pstat.unique(pstat.colex(data,0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
#########################################################
#########################################################
####### DISPATCH LISTS AND TUPLES TO ABOVE FCNS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)), )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)), )
mean = Dispatch ( (lmean, (ListType, TupleType)), )
median = Dispatch ( (lmedian, (ListType, TupleType)), )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)), )
mode = Dispatch ( (lmode, (ListType, TupleType)), )
## MOMENTS:
moment = Dispatch ( (lmoment, (ListType, TupleType)), )
variation = Dispatch ( (lvariation, (ListType, TupleType)), )
skew = Dispatch ( (lskew, (ListType, TupleType)), )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)), )
describe = Dispatch ( (ldescribe, (ListType, TupleType)), )
## FREQUENCY STATISTICS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)), )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)), )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)), )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)), )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)), )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)), )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)), )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)), )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)), )
var = Dispatch ( (lvar, (ListType, TupleType)), )
stdev = Dispatch ( (lstdev, (ListType, TupleType)), )
sterr = Dispatch ( (lsterr, (ListType, TupleType)), )
sem = Dispatch ( (lsem, (ListType, TupleType)), )
z = Dispatch ( (lz, (ListType, TupleType)), )
zs = Dispatch ( (lzs, (ListType, TupleType)), )
## TRIMMING FCNS:
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)), )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)), )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)), )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)), )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)), )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)), )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)), )
linregress = Dispatch ( (llinregress, (ListType, TupleType)), )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)), )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)), )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)), )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)), )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)), )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)), )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)), )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)), )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)), )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)), )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)), )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)), )
zprob = Dispatch ( (lzprob, (IntType, FloatType)), )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)), )
fprob = Dispatch ( (lfprob, (IntType, FloatType)), )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)), )
betai = Dispatch ( (lbetai, (IntType, FloatType)), )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)), )
gammln = Dispatch ( (lgammln, (IntType, FloatType)), )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)), )
F_value = Dispatch ( (lF_value, (ListType, TupleType)), )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)), )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)), )
ss = Dispatch ( (lss, (ListType, TupleType)), )
summult = Dispatch ( (lsummult, (ListType, TupleType)), )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)), )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)), )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)), )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)), )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import Numeric
N = Numeric
import LinearAlgebra
LA = LinearAlgebra
#####################################
######## ACENTRAL TENDENCY ########
#####################################
def ageometricmean (inarray,dimension=None,keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray,N.Float)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [IntType,FloatType]:
size = inarray.shape[dimension]
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult,dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.Float)
mult = N.power(inarray,1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult,dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult,shp)
return mult
def aharmonicmean (inarray,dimension=None,keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.Float)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [IntType,FloatType]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
idx = [0] *len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s],N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
s = N.zeros(loopcap+1,N.Float)
while incr(idx,loopcap) <> -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape,dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return size / s
def amean (inarray,dimension=None,keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.typecode() in ['l','s','b']:
inarray = inarray.astype(N.Float)
if dimension == None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [IntType,FloatType]:
sum = asum(inarray,dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a TUPLE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
sum = inarray *1.0
for dim in dims:
sum = N.add.reduce(sum,dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.Float)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum,shp)
return sum/denom
def amedian (inarray,numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray,numbins)
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore (inarray,dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray,dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray,[indx],dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a,score)
counts = asum(template,dimension,1)
mostfrequent = N.where(N.greater(counts,oldcounts),score,oldmostfreq)
oldcounts = N.where(N.greater(counts,oldcounts),counts,oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a,limits=None,inclusive=(1,1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.typecode() in ['l','s','b']:
a = a.astype(N.Float)
if limits == None:
return mean(a)
assert type(limits) in [ListType,TupleType,N.ArrayType], "Wrong type for limits in atmean"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atmean)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a,limits=None,inclusive=(1,1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.Float)
if limits == None or limits == [None,None]:
term1 = N.add.reduce(N.ravel(a*a))
n = float(len(N.ravel(a))) - 1
term2 = N.add.reduce(N.ravel(a))**2 / n
print term1, term2, n
return (term1 - term2) / n
assert type(limits) in [ListType,TupleType,N.ArrayType], "Wrong type for limits in atvar"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atvar)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask))) - 1
term2 = N.add.reduce(N.ravel(a*mask))**2 / n
print term1, term2, n
return (term1 - term2) / n
def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive: lowerfcn = N.greater
else: lowerfcn = N.greater_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if lowerlimit == None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
return N.minimum.reduce(ta,dimension)
def atmax(a,upperlimit,dimension=None,inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive: upperfcn = N.less
else: upperfcn = N.less_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if upperlimit == None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a,upperlimit),a,smallest)
return N.maximum.reduce(ta,dimension)
def atstdev(a,limits=None,inclusive=(1,1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a,limits,inclusive))
def atsem(a,limits=None,inclusive=(1,1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a,limits,inclusive)
if limits == None or limits == [None,None]:
n = float(len(N.ravel(a)))
assert type(limits) in [ListType,TupleType,N.ArrayType], "Wrong type for limits in atsem"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atsem)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
def amoment(a,moment=1,dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a,dimension,1) # 1=keepdims
s = N.power((a-mn),moment)
return amean(s,dimension)
def avariation(a,dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
def askew(a,dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ArrayType and asum(zero) <> 0:
print "Number of zeros in askew: ",asum(zero)
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
def akurtosis(a,dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ArrayType and asum(zero) <> 0:
print "Number of zeros in akurtosis: ",asum(zero)
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
def adescribe(inarray,dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
m = amean(inarray,dimension)
sd = astdev(inarray,dimension)
skew = askew(inarray,dimension)
kurt = akurtosis(inarray,dimension)
return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def askewtest(a,dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
b2 = askew(a,dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(N.equal(y,0),1,y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a,dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n<20:
print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom,0), 99, denom)
term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom,99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a,dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
s,p = askewtest(a,dimension)
k,p = akurtosistest(a,dimension)
k2 = N.power(s,2) + N.power(k,2)
return k2, achisqprob(k2,2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a,scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile (inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits <> None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print '\nPoints outside given histogram range =',extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
cumhist = cumsum(h*1)
return cumhist,l,b,e
def arelfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h,l,b,e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k,N.Float)
v = N.zeros(k,N.Float)
m = N.zeros(k,N.Float)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.Float))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check <> 1:
raise ValueError, 'Lack of convergence in obrientransform.'
else:
return N.array(nargs)
def asamplevar (inarray,dimension=None,keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray,dimension)[:,N.NewAxis]
else:
mn = amean(inarray,dimension,keepdims=1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations,dimension,keepdims) / float(n)
return svar
def asamplestdev (inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray,dimension,keepdims))
def asignaltonoise(instack,dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack,dimension)
sd = stdev(instack,dimension)
return N.where(N.equal(sd,0),0,m/sd)
def avar (inarray, dimension=None,keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray,dimension,1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations,dimension,keepdims)/float(n-1)
return var
def astdev (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray,dimension,keepdims))
def asterr (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem (inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
return s
def az (a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs (a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a,item))
return N.array(zscores)
def azmap (scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare,dimension)
sstd = asamplestdev(compare,0)
return (scores - mns) / sstd
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
def around(a,digits=1):
"""
Rounds all values in array a to 'digits' decimal places.
Usage: around(a,digits)
Returns: a, where each value is rounded to 'digits' decimals
"""
def ar(x,d=digits):
return round(x,d)
if type(a) <> N.ArrayType:
try:
a = N.array(a)
except:
a = N.array(a,'O')
shp = a.shape
if a.typecode() in ['f','F','d','D']:
b = N.ravel(a)
b = N.array(map(ar,b))
b.shape = shp
elif a.typecode() in ['o','O']:
b = N.ravel(a)*1
for i in range(len(b)):
if type(b[i]) == FloatType:
b[i] = round(b[i],digits)
b.shape = shp
else: # not a float, double or Object array
b = a*1
return b
def athreshold(a,threshmin=None,threshmax=None,newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin <> None:
mask = mask + N.where(N.less(a,threshmin),1,0)
if threshmax <> None:
mask = mask + N.where(N.greater(a,threshmax),1,0)
mask = N.clip(mask,0,1)
return N.where(mask,newval,a)
def atrimboth (a,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1 (a,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) <> 2:
raise TypeError, "acovariance requires 2D matrices"
n = X.shape[0]
mX = amean(X,0)
return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V,V))
def apaired(x,y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print '\nIndependent or related samples, or correlation (i,r,c): ',
samples = raw_input()
if samples in ['i','I','r','R']:
print '\nComparing variances ...',
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print vartype
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
print '\nIndependent samples t-test: ', round(t,4),round(p,4)
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
else:
u,p = mannwhitneyu(x,y)
print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print '\nRelated samples t-test: ', round(t,4),round(p,4)
else:
t,p = ranksums(x,y)
print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ",round(r,4),round(p,4)
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print '\nAssuming x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,4),round(p,4)
print '\n\n'
return None
def apearsonr(x,y,verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
return r,prob
def aspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df,0.5,df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x,y)
if len(categories) <> 2:
raise ValueError, "Exactly 2 categories required (in x) for pointbiserialr()."
else: # there are 2 categories, continue
codemap = pstat.aabut(categories,N.arange(2))
recoded = pstat.arecode(data,codemap,0)
x = pstat.alinexand(data,0,categories[0])
y = pstat.alinexand(data,0,categories[1])
xmean = amean(pstat.acolex(x,1))
ymean = amean(pstat.acolex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
return rpb, prob
def akendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ArrayType:
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname,t,prob)
return t,prob
def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar,0)
svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ArrayType:
probs = N.reshape(probs,t.shape)
if len(probs) == 1:
probs = probs[0]
if printit <> 0:
if type(t) == N.ArrayType:
t = t[0]
if type(probs) == N.ArrayType:
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a)<>len(b):
raise ValueError, 'Unequal length arrays.'
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
zerodivproblem = N.equal(denom,0)
denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ArrayType:
probs = N.reshape(probs,t.shape)
if len(probs) == 1:
probs = probs[0]
if printit <> 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def achisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp == None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.Float)
f_exp = f_exp.astype(N.Float)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, chisqprob(chisq, k-1)
def aks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.Float)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.Float)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:],N.Float)
data1 = N.sort(data1,0)
data2 = N.sort(data2,0)
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
except:
prob = 1.0
return d, prob
def amannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError, 'All numbers are identical in amannwhitneyu'
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x,y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def awilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) <> len(y):
raise ValueError, 'Unequal N in awilcoxont. Aborting.'
d = x-y
d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = map(len,args)
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError, 'All numbers are identical in akruskalwallish'
h = h / float(T)
return h, chisqprob(h,df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError, '\nLess than 3 levels. Friedman test not appropriate.\n'
n = len(args[0])
data = apply(pstat.aabut,args)
data = data.astype(N.Float)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args,1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
def achisqprob(chisq,df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x,-BIG),-BIG,x)
return N.exp(exponents)
if type(chisq) == N.ArrayType:
arrayflag = 1
else:
arrayflag = 0
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape,N.float)
probs = N.zeros(chisq.shape,N.Float)
probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df%2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape,N.Float)
else:
z = 0.5 *N.ones(probs.shape,N.Float)
if even:
e = N.zeros(probs.shape,N.Float)
else:
e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.Float)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a,BIG)
a_big_frozen = -1 *N.ones(probs.shape,N.Float)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask)<>totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z,chisq)
a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask,0,1)
if even:
z = N.ones(probs.shape,N.Float)
e = N.ones(probs.shape,N.Float)
else:
z = 0.5 *N.ones(probs.shape,N.Float)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.Float)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 *N.ones(probs.shape,N.Float)
while asum(mask)<>totalelements:
e = e * (a/z.astype(N.Float))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z,chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask,0,1)
probs = N.where(N.equal(probs,1),1,
N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x,0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape,N.Float) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if type(alam) == N.ArrayType:
frozen = -1 *N.ones(alam.shape,N.Float64)
alam = alam.astype(N.Float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam,N.Float64)
mask = N.zeros(alam.shape)
fac = 2.0 *N.ones(alam.shape,N.Float)
sum = N.zeros(alam.shape,N.Float)
termbf = N.zeros(alam.shape,N.Float)
a2 = N.array(-2.0*alam*alam,N.Float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1,201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents,-746)
frozen = N.where(overflowmask,0,frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
N.less(abs(term),1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
mask = N.clip(mask+newmask,0,1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob (dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ArrayType:
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a,b,x,verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if type(x) == N.ArrayType:
frozen = N.ones(x.shape,N.Float) *-1 #start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen,-1)))==0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold),EPS*abs(az))
frozen = N.where(newmask*N.equal(mask,0), az, frozen)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge <> 0 and verbose:
print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a,b,x,verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ArrayType:
if asum(N.less(x,0)+N.greater(x,1)) <> 0:
raise ValueError, 'Bad x in abetai'
x = N.where(N.equal(x,0),TINY,x)
x = N.where(N.equal(x,1.0),1-TINY,x)
bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
N.log(1.0-x) )
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents,-740),-740,exponents)
bt = N.exp(exponents)
if type(x) == N.ArrayType:
ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
bt*abetacf(a,b,x,verbose)/float(a),
1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
else:
if x<(a+1)/(a+b+2.0):
ans = bt*abetacf(a,b,x,verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
return ans
#####################################
####### AANOVA CALCULATIONS #######
#####################################
import LinearAlgebra, operator
LA = LinearAlgebra
def aglm(data,para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) <> len(data):
print "data and para must be same length in aglm"
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = N.equal(para,p[l])
b = N.dot(N.dot(LA.inverse(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x,b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1,-1])
df = n-2
fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c,b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
means = [0]*na
vars = [0]*na
ns = [0]*na
alldata = []
tmp = map(N.array,args)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,args)
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def aF_value (ER,EF,dfR,dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum,3)
Eden = round(Eden,3)
dfnum = round(Enum,3)
dfden = round(dfden,3)
f = round(f,3)
prob = round(prob,3)
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['EF/ER','DF','Mean Square','F-value','prob','']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden),3),'','','']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [IntType, FloatType]:
ER = N.array([[ER]])
if type(EF) in [IntType, FloatType]:
EF = N.array([[EF]])
n_um = (LA.determinant(ER) - LA.determinant(EF)) / float(dfnum)
d_en = LA.determinant(EF) / float(dfden)
return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
return a-a-N.less(a,0)+N.greater(a,0)
else:
return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
def asum (a, dimension=None,keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if type(a) == N.ArrayType and a.typecode() in ['l','s','b']:
a = a.astype(N.Float)
if dimension == None:
s = N.sum(N.ravel(a))
elif type(dimension) in [IntType,FloatType]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to sum over
dims = list(dimension)
dims.sort()
dims.reverse()
s = a *1.0
for dim in dims:
s = N.add.reduce(s,dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return s
def acumsum (a,dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [ListType, TupleType, N.ArrayType]:
dimension = list(dimension)
dimension.sort()
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a,d)
return a
else:
return N.add.accumulate(a,dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray,dimension,keepdims)
def asummult (array1,array2,dimension=None,keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2,dimension,keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray,dimension,keepdims)
if type(s) == N.ArrayType:
return s.astype(N.Float)*s
else:
return float(s)*s
def asumdiffsquared(a,b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
inarray = N.ravel(a)
dimension = 0
return asum((a-b)**2,dimension,keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray *1.0
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n,N.Float)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] <> svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1,numfact+1):
rows = pstat.linexand(data,col,pstat.unique(pstat.colex(data,1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
#########################################################
#########################################################
###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)),
(ageometricmean, (N.ArrayType,)) )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)),
(aharmonicmean, (N.ArrayType,)) )
mean = Dispatch ( (lmean, (ListType, TupleType)),
(amean, (N.ArrayType,)) )
median = Dispatch ( (lmedian, (ListType, TupleType)),
(amedian, (N.ArrayType,)) )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)),
(amedianscore, (N.ArrayType,)) )
mode = Dispatch ( (lmode, (ListType, TupleType)),
(amode, (N.ArrayType,)) )
tmean = Dispatch ( (atmean, (N.ArrayType,)) )
tvar = Dispatch ( (atvar, (N.ArrayType,)) )
tstdev = Dispatch ( (atstdev, (N.ArrayType,)) )
tsem = Dispatch ( (atsem, (N.ArrayType,)) )
## VARIATION:
moment = Dispatch ( (lmoment, (ListType, TupleType)),
(amoment, (N.ArrayType,)) )
variation = Dispatch ( (lvariation, (ListType, TupleType)),
(avariation, (N.ArrayType,)) )
skew = Dispatch ( (lskew, (ListType, TupleType)),
(askew, (N.ArrayType,)) )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)),
(akurtosis, (N.ArrayType,)) )
describe = Dispatch ( (ldescribe, (ListType, TupleType)),
(adescribe, (N.ArrayType,)) )
## DISTRIBUTION TESTS
skewtest = Dispatch ( (askewtest, (ListType, TupleType)),
(askewtest, (N.ArrayType,)) )
kurtosistest = Dispatch ( (akurtosistest, (ListType, TupleType)),
(akurtosistest, (N.ArrayType,)) )
normaltest = Dispatch ( (anormaltest, (ListType, TupleType)),
(anormaltest, (N.ArrayType,)) )
## FREQUENCY STATS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)),
(aitemfreq, (N.ArrayType,)) )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)),
(ascoreatpercentile, (N.ArrayType,)) )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)),
(apercentileofscore, (N.ArrayType,)) )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)),
(ahistogram, (N.ArrayType,)) )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)),
(acumfreq, (N.ArrayType,)) )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)),
(arelfreq, (N.ArrayType,)) )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)),
(aobrientransform, (N.ArrayType,)) )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)),
(asamplevar, (N.ArrayType,)) )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)),
(asamplestdev, (N.ArrayType,)) )
signaltonoise = Dispatch( (asignaltonoise, (N.ArrayType,)),)
var = Dispatch ( (lvar, (ListType, TupleType)),
(avar, (N.ArrayType,)) )
stdev = Dispatch ( (lstdev, (ListType, TupleType)),
(astdev, (N.ArrayType,)) )
sterr = Dispatch ( (lsterr, (ListType, TupleType)),
(asterr, (N.ArrayType,)) )
sem = Dispatch ( (lsem, (ListType, TupleType)),
(asem, (N.ArrayType,)) )
z = Dispatch ( (lz, (ListType, TupleType)),
(az, (N.ArrayType,)) )
zs = Dispatch ( (lzs, (ListType, TupleType)),
(azs, (N.ArrayType,)) )
## TRIMMING FCNS:
threshold = Dispatch( (athreshold, (N.ArrayType,)),)
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)),
(atrimboth, (N.ArrayType,)) )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)),
(atrim1, (N.ArrayType,)) )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)),
(apaired, (N.ArrayType,)) )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)),
(apearsonr, (N.ArrayType,)) )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)),
(aspearmanr, (N.ArrayType,)) )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)),
(apointbiserialr, (N.ArrayType,)) )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)),
(akendalltau, (N.ArrayType,)) )
linregress = Dispatch ( (llinregress, (ListType, TupleType)),
(alinregress, (N.ArrayType,)) )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)),
(attest_1samp, (N.ArrayType,)) )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)),
(attest_ind, (N.ArrayType,)) )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)),
(attest_rel, (N.ArrayType,)) )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)),
(achisquare, (N.ArrayType,)) )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)),
(aks_2samp, (N.ArrayType,)) )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)),
(amannwhitneyu, (N.ArrayType,)) )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)),
(atiecorrect, (N.ArrayType,)) )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)),
(aranksums, (N.ArrayType,)) )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)),
(awilcoxont, (N.ArrayType,)) )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)),
(akruskalwallish, (N.ArrayType,)) )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)),
(afriedmanchisquare, (N.ArrayType,)) )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)),
(achisqprob, (N.ArrayType,)) )
zprob = Dispatch ( (lzprob, (IntType, FloatType)),
(azprob, (N.ArrayType,)) )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)),
(aksprob, (N.ArrayType,)) )
fprob = Dispatch ( (lfprob, (IntType, FloatType)),
(afprob, (N.ArrayType,)) )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)),
(abetacf, (N.ArrayType,)) )
betai = Dispatch ( (lbetai, (IntType, FloatType)),
(abetai, (N.ArrayType,)) )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)),
(aerfcc, (N.ArrayType,)) )
gammln = Dispatch ( (lgammln, (IntType, FloatType)),
(agammln, (N.ArrayType,)) )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)),
(aF_oneway, (N.ArrayType,)) )
F_value = Dispatch ( (lF_value, (ListType, TupleType)),
(aF_value, (N.ArrayType,)) )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType, N.ArrayType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)),
(asum, (N.ArrayType,)) )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)),
(acumsum, (N.ArrayType,)) )
ss = Dispatch ( (lss, (ListType, TupleType)),
(ass, (N.ArrayType,)) )
summult = Dispatch ( (lsummult, (ListType, TupleType)),
(asummult, (N.ArrayType,)) )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)),
(asquare_of_sums, (N.ArrayType,)) )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)),
(asumdiffsquared, (N.ArrayType,)) )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)),
(ashellsort, (N.ArrayType,)) )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)),
(arankdata, (N.ArrayType,)) )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)),
(afindwithin, (N.ArrayType,)) )
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
pass
| Python |
# $Id: module_index.py 2790 2008-02-29 08:33:14Z cpbotha $
class emp_test:
kits = ['vtk_kit']
cats = ['Tests']
keywords = ['test', 'tests', 'testing']
help = \
"""Module to test DeVIDE extra-module-paths functionality.
"""
| Python |
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class emp_test(SimpleVTKClassModuleBase):
"""This is the minimum you need to wrap a single VTK object. This
__doc__ string will be replaced by the __doc__ string of the encapsulated
VTK object, i.e. vtkStripper in this case.
With these few lines, we have error handling, progress reporting, module
help and also: the complete state of the underlying VTK object is also
pickled, i.e. when you save and restore a network, any changes you've
made to the vtkObject will be restored.
"""
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkStripper(), 'Stripping polydata.',
('vtkPolyData',), ('Stripped vtkPolyData',))
| Python |
"""Module to test basic matplotlib functionality.
"""
import os
import unittest
import tempfile
class MPLTest(unittest.TestCase):
def test_figure_output(self):
"""Test if matplotlib figure can be generated and wrote to disc.
"""
# make sure the pythonshell is running
self._devide_app.get_interface()._handler_menu_python_shell(None)
# create new figure
python_shell = self._devide_app.get_interface()._python_shell
f = python_shell.mpl_new_figure()
import pylab
# unfortunately, it's almost impossible to get pixel-identical
# rendering on all platforms, so we can only check that the plot
# itself is correct (all font-rendering is disabled)
# make sure we hardcode the font! (previous experiment)
#pylab.rcParams['font.sans-serif'] = ['Bitstream Vera Sans']
#pylab.rc('font', family='sans-serif')
from pylab import arange, plot, sin, cos, legend, grid, xlabel, ylabel
a = arange(-30, 30, 0.01)
plot(a, sin(a) / a, label='sinc(x)')
plot(a, cos(a), label='cos(x)')
#legend()
grid()
#xlabel('x')
#ylabel('f(x)')
# disable x and y ticks (no fonts allowed, remember)
pylab.xticks([])
pylab.yticks([])
# width and height in inches
f.set_figwidth(7.9)
f.set_figheight(5.28)
# and save it to disc
filename1 = tempfile.mktemp(suffix='.png', prefix='tmp', dir=None)
f.savefig(filename1, dpi=100)
# get rid of the figure
python_shell.mpl_close_figure(f)
# now compare the bugger
test_fn = os.path.join(self._devide_testing.get_images_dir(),
'mpl_test_figure_output.png')
err = self._devide_testing.compare_png_images(test_fn, filename1)
self.failUnless(err == 0, '%s differs from %s, err = %.2f' %
(filename1, test_fn, err))
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
# both of these tests require wx
mm = devide_app.get_module_manager()
mpl_suite = unittest.TestSuite()
if 'matplotlib_kit' in mm.module_kits.module_kit_list:
t = MPLTest('test_figure_output')
t._devide_app = devide_app
t._devide_testing = devide_testing
mpl_suite.addTest(t)
return mpl_suite
| Python |
"""Module to test graph_editor functionality.
"""
import os
import time
import unittest
import wx
class GraphEditorTestBase(unittest.TestCase):
def setUp(self):
self._iface = self._devide_app.get_interface()
self._ge = self._iface._graph_editor
# the graph editor frame is now the main frame of the interface
self._ge_frame = self._iface._main_frame
# make sure the graphEditor is running
self._iface._handlerMenuGraphEditor(None)
# make sure we begin with a clean slate, so we can do
# some module counting
self._ge.clear_all_glyphs_from_canvas()
def tearDown(self):
self._ge.clear_all_glyphs_from_canvas()
del self._ge
del self._iface
del self._ge_frame
class GraphEditorVolumeTestBase(GraphEditorTestBase):
"""Uses superQuadric, implicitToVolume and doubleThreshold to create
a volume that we can run some tests on.
"""
def setUp(self):
# call parent setUp method
GraphEditorTestBase.setUp(self)
# now let's build a volume we can play with
# first the three modules
(sqmod, sqglyph) = self._ge.create_module_and_glyph(
10, 10, 'modules.misc.superQuadric')
self.failUnless(sqmod and sqglyph)
(ivmod, ivglyph) = self._ge.create_module_and_glyph(
10, 70, 'modules.misc.implicitToVolume')
self.failUnless(ivmod and ivglyph)
(dtmod, dtglyph) = self._ge.create_module_and_glyph(
10, 130, 'modules.filters.doubleThreshold')
self.failUnless(dtmod and dtglyph)
# configure the implicitToVolume to have somewhat tighter bounds
cfg = ivmod.get_config()
cfg.modelBounds = (-1.0, 1.0, -0.25, 0.25, 0.0, 0.75)
ivmod.set_config(cfg)
# then configure the doubleThreshold with the correct thresholds
cfg = dtmod.get_config()
cfg.lowerThreshold = -99999.00
cfg.upperThreshold = 0.0
dtmod.set_config(cfg)
# now connect them all
ret = self._ge._connect(sqglyph, 0, ivglyph, 0)
ret = self._ge._connect(ivglyph, 0, dtglyph, 0)
# redraw
self._ge.canvas.redraw()
# run the network
self._ge._handler_execute_network(None)
self.dtglyph = dtglyph
self.dtmod = dtmod
self.sqglyph = sqglyph
self.sqmod = sqmod
# ----------------------------------------------------------------------------
class GraphEditorBasic(GraphEditorTestBase):
def test_startup(self):
"""graphEditor startup.
"""
self.failUnless(
self._ge_frame.IsShown())
def test_module_creation_deletion(self):
"""Creation of simple module and glyph.
"""
(mod, glyph) = self._ge.create_module_and_glyph(
10, 10, 'modules.misc.superQuadric')
self.failUnless(mod and glyph)
ret = self._ge._delete_module(glyph)
self.failUnless(ret)
def test_module_help(self):
"""See if module specific help can be called up for a module.
"""
module_name = 'modules.writers.vtiWRT'
(mod, glyph) = self._ge.create_module_and_glyph(
10, 10, module_name)
self.failUnless(mod and glyph)
self._ge.show_module_help_from_glyph(glyph)
# DURNIT! We can't read back the help HTML from the HtmlWindow!
# make sure that the help is actually displayed in the doc window
#mm = self._devide_app.get_module_manager()
#ht = mm._available_modules[module_name].help
#p = self._ge_frame.doc_window.GetPage()
# fail if it's not there
#self.failUnless(p == self._ge._module_doc_to_html(module_name, ht))
# take it away
ret = self._ge._delete_module(glyph)
self.failUnless(ret)
def test_module_search(self):
import wx
class DummyKeyEvent:
def __init__(self, key_code):
self._key_code = key_code
def GetKeyCode(self):
return self._key_code
# type some text in the module search box
self._ge_frame.search.SetValue('fillholes')
# now place the module by pressing RETURN (simulated)
evt = DummyKeyEvent(wx.WXK_RETURN)
self._ge._handler_search_char(evt)
# check that the imageFillHoles module has been placed
ag = self._ge._get_all_glyphs()
module_name = str(ag[0].module_instance.__class__.__name__)
expected_name = 'imageFillHoles'
self.failUnless(module_name == expected_name, '%s != %s' %
(module_name, expected_name))
def test_simple_network(self):
"""Creation, connection and execution of superQuadric source and
slice3dVWR.
"""
(sqmod, sqglyph) = self._ge.create_module_and_glyph(
10, 10, 'modules.misc.superQuadric')
(svmod, svglyph) = self._ge.create_module_and_glyph(
10, 90, 'modules.viewers.slice3dVWR')
ret = self._ge._connect(sqglyph, 1, svglyph, 0)
self._ge.canvas.redraw()
self.failUnless(ret)
# now run the network
self._ge._handler_execute_network(None)
# the slice viewer should now have an extra object
self.failUnless(svmod._tdObjects.findObjectByName('obj0'))
def test_config_vtk_obj(self):
"""See if the ConfigVtkObj is available and working.
"""
# first create superQuadric
(sqmod, sqglyph) = self._ge.create_module_and_glyph(
10, 10, 'modules.misc.superQuadric')
self.failUnless(sqmod and sqglyph)
self._ge._view_conf_module(sqmod)
# superQuadric is a standard ScriptedConfigModuleMixin, so it has
# a _viewFrame ivar
self.failUnless(sqmod._view_frame.IsShown())
# start up the vtkObjectConfigure window for that object
sqmod.vtkObjectConfigure(sqmod._view_frame, None, sqmod._superquadric)
# check that it's visible
# sqmod._vtk_obj_cfs[sqmod._superquadric] is the ConfigVtkObj instance
self.failUnless(
sqmod._vtk_obj_cfs[sqmod._superquadric]._frame.IsShown())
# end by closing them all (so now all we're left with is the
# module view itself)
sqmod.closeVtkObjectConfigure()
# remove the module as well
ret = self._ge._delete_module(sqglyph)
self.failUnless(ret)
# ----------------------------------------------------------------------------
class TestReadersWriters(GraphEditorVolumeTestBase):
def test_vti(self):
"""Testing basic readers/writers.
"""
self.failUnless(1 == 1)
class TestModulesMisc(GraphEditorTestBase):
def get_sorted_core_module_list(self):
"""Utility function to get a sorted list of all core module names.
"""
mm = self._devide_app.get_module_manager()
# we tested all the vtk_basic modules once with VTK5.0
# but this causes trouble on Weendows.
ml = mm.get_available_modules().keys()
ml = [i for i in ml
if not i.startswith('modules.vtk_basic') and
not i.startswith('modules.user')]
ml.sort()
return ml
def test_create_destroy(self):
"""See if we can create and destroy all core modules, without invoking
up the view window..
"""
ml = self.get_sorted_core_module_list()
for module_name in ml:
print 'About to create %s.' % (module_name,)
(cmod, cglyph) = self._ge.\
create_module_and_glyph(
10, 10, module_name)
print 'Created %s.' % (module_name,)
self.failUnless(cmod and cglyph,
'Error creating %s' % (module_name,))
# destroy
ret = self._ge._delete_module(
cglyph)
print 'Destroyed %s.' % (module_name,)
self.failUnless(ret,
'Error destroying %s' % (module_name,))
# so wx can take a breath and catch up
wx.Yield()
def test_create_view_destroy(self):
"""Create and destroy all core modules, also invoke view window.
"""
ml = self.get_sorted_core_module_list()
for module_name in ml:
print 'About to create %s.' % (module_name,)
(cmod, cglyph) = self._ge.\
create_module_and_glyph(
10, 10, module_name)
print 'Created %s.' % (module_name,)
self.failUnless(cmod and cglyph,
'Error creating %s' % (module_name,))
# call up view window
print 'About to bring up view-conf window'
try:
self._ge._view_conf_module(cmod)
except Exception, e:
self.fail(
'Error invoking view of %s (%s)' % (module_name,str(e)))
# destroy
ret = self._ge._delete_module(
cglyph)
print 'Destroyed %s.' % (module_name,)
self.failUnless(ret,
'Error destroying %s' % (module_name,))
# so wx can take a breath and catch up
wx.Yield()
# ----------------------------------------------------------------------------
class TestVTKBasic(GraphEditorTestBase):
def test_seedconnect(self):
"""Test whether we can load and run a full network, select a point and
do a region growing. This broke with the introduction of vtk 5.6.1 due
to more strict casting.
"""
# load our little test network #####
self._ge._load_and_realise_network(
os.path.join(self._devide_testing.get_networks_dir(),
'seedconnect.dvn'))
# run the network once
self._ge._handler_execute_network(None)
self._ge.canvas.redraw()
# now find the slice3dVWR #####
mm = self._devide_app.get_module_manager()
svmod = mm.get_instance("svmod")
# let's show the control frame
svmod._handlerShowControls(None)
if True:
# we're doing this the long way to test more code paths
svmod.sliceDirections.setCurrentCursor([20.0, 20.0, 20.0, 1.0])
# this handler should result in the whole network being auto-executed
# but somehow it blocks execution (the vktImageSeedConnect sticks at 0.0)
svmod.selectedPoints._handlerStoreCursorAsPoint(None)
else:
# it seems to block here as well: the whole network is linked up,
# so it tries to execute when the storeCursor is called, and that
# blocks everything. WHY?!
#svmod.selectedPoints._storeCursor((20.0,20.0,20.0,1.0))
#self.failUnless(len(svmod.selectedPoints._pointsList) == 1)
# execute the network
self._ge._handler_execute_network(None)
# now count the number of voxels in the segmented result
import vtk
via = vtk.vtkImageAccumulate()
scmod = mm.get_instance("scmod")
via.SetInput(scmod.get_output(0))
via.Update()
# get second bin of output histogram: that should be the
# number of voxels
s = via.GetOutput().GetPointData().GetScalars()
print s.GetTuple1(1)
self.failUnless(s.GetTuple1(1) == 26728)
via.SetInput(None)
del via
# ----------------------------------------------------------------------------
class TestITKBasic(GraphEditorVolumeTestBase):
def test_vtktoitk_types(self):
"""Do quick test on vtk -> itk -> vtk + type conversion.
"""
# create VTKtoITK, set it to cast to float (we're going to
# test signed short and unsigned long as well)
(v2imod, v2iglyph) = self._ge.create_module_and_glyph(
200, 10, 'modules.insight.VTKtoITK')
self.failUnless(v2imod, v2iglyph)
(i2vmod, i2vglyph) = self._ge.create_module_and_glyph(
200, 130, 'modules.insight.ITKtoVTK')
self.failUnless(i2vmod and i2vglyph)
ret = self._ge._connect(self.dtglyph, 0, v2iglyph, 0)
self.failUnless(ret)
ret = self._ge._connect(v2iglyph, 0, i2vglyph, 0)
self.failUnless(ret)
# redraw the canvas
self._ge.canvas.redraw()
for t in (('float', 'float'), ('signed short', 'short'),
('unsigned long', 'unsigned long')):
c = v2imod.get_config()
c.autotype = False
c.type = t[0]
v2imod.set_config(c) # this will modify the module
# execute the network
self._ge._handler_execute_network(None)
# each time make sure that the effective data type at the
# output of the ITKtoVTK is what we expect.
id = i2vmod.get_output(0)
self.failUnless(id.GetScalarTypeAsString() == t[1])
# this is quite nasty: if the next loop is entered too
# quickly and the VTKtoITK module is modified before the
# ticker has reached the next decisecond, the network
# thinks that it has not been modified, and so it won't be
# executed.
time.sleep(0.01)
def test_confidence_seed_connect(self):
"""Test confidenceSeedConnect and VTK<->ITK interconnect.
"""
# this will be the last big created thingy... from now on we'll
# do DVNs. This simulates the user's actions creating the network
# though.
# create a slice3dVWR
(svmod, svglyph) = self._ge.create_module_and_glyph(
200, 190, 'modules.viewers.slice3dVWR')
self.failUnless(svmod and svglyph)
# connect up the created volume and redraw
ret = self._ge._connect(self.dtglyph, 0, svglyph, 0)
# make sure it can connect
self.failUnless(ret)
# we need to execute before storeCursor can work
self._ge._handler_execute_network(None)
# storeCursor wants a 4-tuple and value - we know what these should be
svmod.selectedPoints._storeCursor((20,20,0,1))
self.failUnless(len(svmod.selectedPoints._pointsList) == 1)
# connect up the insight bits
(v2imod, v2iglyph) = self._ge.create_module_and_glyph(
200, 10, 'modules.insight.VTKtoITK')
self.failUnless(v2imod and v2iglyph)
# make sure VTKtoITK will cast to float (because it's getting
# double at the input!)
c = v2imod.get_config()
c.autotype = False
c.type = 'float'
v2imod.set_config(c)
(cscmod, cscglyph) = self._ge.create_module_and_glyph(
200, 70, 'modules.insight.confidenceSeedConnect')
self.failUnless(cscmod and cscglyph)
(i2vmod, i2vglyph) = self._ge.create_module_and_glyph(
200, 130, 'modules.insight.ITKtoVTK')
self.failUnless(i2vmod and i2vglyph)
ret = self._ge._connect(self.dtglyph, 0, v2iglyph, 0)
self.failUnless(ret)
ret = self._ge._connect(v2iglyph, 0, cscglyph, 0)
self.failUnless(ret)
ret = self._ge._connect(cscglyph, 0, i2vglyph, 0)
self.failUnless(ret)
# there's already something on the 0'th input of the slice3dVWR
ret = self._ge._connect(i2vglyph, 0, svglyph, 1)
self.failUnless(ret)
# connect up the selected points
ret = self._ge._connect(svglyph, 0, cscglyph, 1)
self.failUnless(ret)
# redraw the canvas
self._ge.canvas.redraw()
# execute the network
self._ge._handler_execute_network(None)
# now count the number of voxels in the segmented result
import vtk
via = vtk.vtkImageAccumulate()
via.SetInput(i2vmod.get_output(0))
via.Update()
# get second bin of output histogram: that should be the
# number of voxels
s = via.GetOutput().GetPointData().GetScalars()
print s.GetTuple1(1)
self.failUnless(s.GetTuple1(1) == 26728)
via.SetInput(None)
del via
def create_geb_test(name, devide_app):
"""Utility function to create GraphEditorBasic test and stuff all the
data in there that we'd like.
"""
t = GraphEditorBasic(name)
t._devide_app = devide_app
return t
def get_some_suite(devide_testing):
devide_app = devide_testing.devide_app
some_suite = unittest.TestSuite()
t = TestITKBasic('test_confidence_seed_connect')
t._devide_app = devide_app
t._devide_testing = devide_testing # need for networks path
some_suite.addTest(t)
return some_suite
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
mm = devide_app.get_module_manager()
graph_editor_suite = unittest.TestSuite()
# all of these tests require the wx_kit
if 'wx_kit' not in mm.module_kits.module_kit_list:
return graph_editor_suite
graph_editor_suite.addTest(create_geb_test('test_startup', devide_app))
graph_editor_suite.addTest(
create_geb_test('test_module_creation_deletion', devide_app))
graph_editor_suite.addTest(
create_geb_test('test_module_help', devide_app))
graph_editor_suite.addTest(
create_geb_test('test_module_search', devide_app))
graph_editor_suite.addTest(
create_geb_test('test_simple_network', devide_app))
graph_editor_suite.addTest(
create_geb_test('test_config_vtk_obj', devide_app))
t = TestModulesMisc('test_create_destroy')
t._devide_app = devide_app
graph_editor_suite.addTest(t)
t = TestModulesMisc('test_create_view_destroy')
t._devide_app = devide_app
graph_editor_suite.addTest(t)
t = TestVTKBasic('test_seedconnect')
t._devide_app = devide_app
t._devide_testing = devide_testing # need for networks path
graph_editor_suite.addTest(t)
# module_kit_list is up to date with the actual module_kits that
# were imported
if 'itk_kit' in mm.module_kits.module_kit_list:
t = TestITKBasic('test_confidence_seed_connect')
t._devide_app = devide_app
graph_editor_suite.addTest(t)
t = TestITKBasic('test_vtktoitk_types')
t._devide_app = devide_app
graph_editor_suite.addTest(t)
return graph_editor_suite
| Python |
import sys
import unittest
class NumPyTest(unittest.TestCase):
def test_import_mixing(self):
"""Test for bug where packaged numpy and installed numpy would
conflict, causing errors.
"""
import numpy
try:
na = numpy.array([0,0,0])
print na
except Exception, e:
self.fail('numpy.array() cast raises exception: %s' %
(str(e),))
else:
pass
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
# both of these tests require wx
mm = devide_app.get_module_manager()
numpy_suite = unittest.TestSuite()
if 'numpy_kit' in mm.module_kits.module_kit_list:
t = NumPyTest('test_import_mixing')
t._devide_app = devide_app
t._devide_testing = devide_testing
numpy_suite.addTest(t)
return numpy_suite
| Python |
"""Module to test basic DeVIDE functionality.
"""
import unittest
class BasicVTKTest(unittest.TestCase):
def test_vtk_exceptions(self):
"""Test if VTK has been patched with our VTK error to Python exception
patch.
"""
import vtk
a = vtk.vtkXMLImageDataReader()
a.SetFileName('blata22 hello')
b = vtk.vtkMarchingCubes()
b.SetInput(a.GetOutput())
try:
b.Update()
except RuntimeError, e:
self.failUnless(str(e).startswith('ERROR'))
else:
self.fail('VTK object did not raise Python exception.')
def test_vtk_progress_exception_masking(self):
"""Ensure progress events are not masking exceptions.
"""
import vtk
import vtkdevide
def observer_progress(o, e):
print "DICOM progress %s." % (str(o.GetProgress() * 100.0),)
r = vtkdevide.vtkDICOMVolumeReader()
r.AddObserver("ProgressEvent", observer_progress)
try:
r.Update()
except RuntimeError, e:
pass
else:
self.fail('ProgressEvent handler masked RuntimeError.')
def test_vtk_pyexception_deadlock(self):
"""Test if VTK has been patched to release the GIL during all
VTK method calls.
"""
import vtk
# this gives floats by default
s = vtk.vtkImageGridSource()
c1 = vtk.vtkImageCast()
c1.SetOutputScalarTypeToShort()
c1.SetInput(s.GetOutput())
c2 = vtk.vtkImageCast()
c2.SetOutputScalarTypeToFloat()
c2.SetInput(s.GetOutput())
m = vtk.vtkImageMathematics()
# make sure we are multi-threaded
if m.GetNumberOfThreads() < 2:
m.SetNumberOfThreads(2)
m.SetInput1(c1.GetOutput())
m.SetInput2(c2.GetOutput())
# without the patch, this call will deadlock forever
try:
# with the patch this should generate a RuntimeError
m.Update()
except RuntimeError:
pass
else:
self.fail(
'Multi-threaded error vtkImageMathematics did not raise '
'exception.')
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
mm = devide_app.get_module_manager()
basic_vtk_suite = unittest.TestSuite()
if 'vtk_kit' not in mm.module_kits.module_kit_list:
return basic_vtk_suite
t = BasicVTKTest('test_vtk_exceptions')
basic_vtk_suite.addTest(t)
t = BasicVTKTest('test_vtk_progress_exception_masking')
basic_vtk_suite.addTest(t)
t = BasicVTKTest('test_vtk_pyexception_deadlock')
basic_vtk_suite.addTest(t)
return basic_vtk_suite
| Python |
"""Module to test basic DeVIDE functionality.
"""
import unittest
class BasicMiscTest(unittest.TestCase):
def test_sqlite3(self):
"""Test if sqlite3 is available.
"""
import sqlite3
v = sqlite3.version
conn = sqlite3.connect(':memory:')
cur = conn.cursor()
cur.execute('create table stuff (some text)')
cur.execute('insert into stuff values (?)', (v,))
cur.close()
cur = conn.cursor()
cur.execute('select some from stuff')
# cur.fetchall() returns a list of tuples: we get the first
# item in the list, then the first element in that tuple, this
# should be the version we inserted.
self.failUnless(cur.fetchall()[0][0] == v)
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
mm = devide_app.get_module_manager()
misc_suite = unittest.TestSuite()
t = BasicMiscTest('test_sqlite3')
misc_suite.addTest(t)
return misc_suite
| Python |
# testing.__init__.py copyright 2006 by Charl P. Botha http://cpbotha.net/
# $Id$
# this drives the devide unit testing. neat huh?
import os
import time
import unittest
from testing import misc
from testing import basic_vtk
from testing import basic_wx
from testing import graph_editor
from testing import numpy_tests
from testing import matplotlib_tests
module_list = [misc, basic_vtk, basic_wx, graph_editor,
numpy_tests, matplotlib_tests]
for m in module_list:
reload(m)
# ----------------------------------------------------------------------------
class DeVIDETesting:
def __init__(self, devide_app):
self.devide_app = devide_app
suite_list = [misc.get_suite(self),
basic_vtk.get_suite(self),
basic_wx.get_suite(self),
graph_editor.get_suite(self),
numpy_tests.get_suite(self),
matplotlib_tests.get_suite(self)
]
self.main_suite = unittest.TestSuite(tuple(suite_list))
def runAllTests(self):
runner = unittest.TextTestRunner(verbosity=2)
runner.run(self.main_suite)
print "Complete suite consists of 19 (multi-part) tests on "
print "lin32, lin64, win32, win64."
def runSomeTest(self):
#some_suite = misc.get_suite(self)
#some_suite = basic_vtk.get_suite(self)
#some_suite = basic_wx.get_suite(self)
some_suite = graph_editor.get_some_suite(self)
#some_suite = numpy_tests.get_suite(self)
#some_suite = matplotlib_tests.get_suite(self)
runner = unittest.TextTestRunner()
runner.run(some_suite)
def get_images_dir(self):
"""Return full path of directory with test images.
"""
return os.path.join(os.path.dirname(__file__), 'images')
def get_networks_dir(self):
"""Return full path of directory with test networks.
"""
return os.path.join(os.path.dirname(__file__), 'networks')
def compare_png_images(self, image1_filename, image2_filename,
threshold=16, allow_shift=False):
"""Compare two PNG images on disc. No two pixels may differ with more
than the default threshold.
"""
import vtk
r1 = vtk.vtkPNGReader()
r1.SetFileName(image1_filename)
r1.Update()
r2 = vtk.vtkPNGReader()
r2.SetFileName(image2_filename)
r2.Update()
# there's a bug in VTK 5.0.1 where input images of unequal size
# (depending on which input is larger) will cause a segfault
# see http://www.vtk.org/Bug/bug.php?op=show&bugid=3586
# se we check for that situation and bail if it's the case
if r1.GetOutput().GetDimensions() != r2.GetOutput().GetDimensions():
em = 'Input images %s and %s are not of equal size.' % \
(image1_filename, image2_filename)
raise RuntimeError, em
# sometimes PNG files have an ALPHA component we have to chuck away
# do this for both images
ec1 = vtk.vtkImageExtractComponents()
ec1.SetComponents(0,1,2)
ec1.SetInput(r1.GetOutput())
ec2 = vtk.vtkImageExtractComponents()
ec2.SetComponents(0,1,2)
ec2.SetInput(r2.GetOutput())
idiff = vtk.vtkImageDifference()
idiff.SetThreshold(threshold)
if allow_shift:
idiff.AllowShiftOn()
else:
idiff.AllowShiftOff()
idiff.SetImage(ec1.GetOutput())
idiff.SetInputConnection(ec2.GetOutputPort())
idiff.Update()
return idiff.GetThresholdedError()
| Python |
"""Module to test basic DeVIDE functionality.
"""
import unittest
class PythonShellTest(unittest.TestCase):
def test_python_shell(self):
"""Test if PythonShell can be opened successfully.
"""
iface = self._devide_app.get_interface()
iface._handler_menu_python_shell(None)
self.failUnless(iface._python_shell._frame.IsShown())
iface._python_shell._frame.Show(False)
class HelpContentsTest(unittest.TestCase):
def test_help_contents(self):
"""Test if Help Contents can be opened successfully.
"""
import webbrowser
try:
self._devide_app.get_interface()._handlerHelpContents(None)
except webbrowser.Error:
self.fail()
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
# both of these tests require wx
mm = devide_app.get_module_manager()
basic_suite = unittest.TestSuite()
if 'wx_kit' in mm.module_kits.module_kit_list:
t = PythonShellTest('test_python_shell')
t._devide_app = devide_app
basic_suite.addTest(t)
t = HelpContentsTest('test_help_contents')
t._devide_app = devide_app
basic_suite.addTest(t)
return basic_suite
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
###################################################################
# the following programmes should either be on your path, or you
# should specify the full paths here.
# Microsoft utility to rebase files.
REBASE = "rebase"
MAKE_NSIS = "makensis"
STRIP = "strip"
CHRPATH = "chrpath"
# end of programmes ###############################################
# NOTHING TO CONFIGURE BELOW THIS LINE ############################
import getopt
import os
import re
import shutil
import sys
import tarfile
PPF = "[*** DeVIDE make_dist ***]"
S_PPF = "%s =====>>>" % (PPF,) # used for stage headers
S_CLEAN_PYI = 'clean_pyi'
S_RUN_PYI = 'run_pyi'
S_WRAPITK_TREE = 'wrapitk_tree'
S_REBASE_DLLS = 'rebase_dlls'
S_POSTPROC_SOS = 'postproc_sos'
S_PACKAGE_DIST = 'package_dist'
DEFAULT_STAGES = '%s, %s, %s, %s, %s, %s' % \
(S_CLEAN_PYI, S_RUN_PYI, S_WRAPITK_TREE,
S_REBASE_DLLS, S_POSTPROC_SOS, S_PACKAGE_DIST)
HELP_MESSAGE = """
make_dist.py - build DeVIDE distributables.
Invoke as follows:
python make_dist.py -s specfile -i installer_script
where specfile is the pyinstaller spec file and installer_script
refers to the full path of the pyinstaller Build.py
The specfile should be in the directory devide/installer, where devide
is the directory containing the devide source that you are using to
build the distributables.
Other switches:
--stages : by default all stages are run. With this parameter, a
subset of the stages can be specified. The full list is:
%s
""" % (DEFAULT_STAGES,)
####################################################################
class MDPaths:
"""Initialise all directories required for building DeVIDE
distributables.
"""
def __init__(self, specfile, pyinstaller_script):
self.specfile = os.path.normpath(specfile)
self.pyinstaller_script = os.path.normpath(pyinstaller_script)
# devide/installer
self.specfile_dir = os.path.normpath(
os.path.abspath(os.path.dirname(self.specfile)))
self.pyi_dist_dir = os.path.join(self.specfile_dir,
'distdevide')
self.pyi_build_dir = os.path.join(self.specfile_dir,
'builddevide')
# devide
self.devide_dir = \
os.path.normpath(
os.path.join(self.specfile_dir, '..'))
####################################################################
# UTILITY METHODS
####################################################################
def get_status_output(command):
"""Run command, return output of command and exit code in status.
In general, status is None for success and 1 for command not
found.
Method taken from johannes.utils.
"""
ph = os.popen(command)
output = ph.read()
status = ph.close()
return (status, output)
def find_command_with_ver(name, command, ver_re):
"""Try to run command, use ver_re regular expression to parse for
the version string. This will print for example:
CVS: version 2.11 found.
@return: True if command found, False if not or if version could
not be parsed.
Method taken from johannes.utils.
"""
retval = False
s,o = get_status_output(command)
if s:
msg2 = 'NOT FOUND!'
else:
mo = re.search(ver_re, o, re.MULTILINE)
if mo:
msg2 = 'version %s found.' % (mo.groups()[0],)
retval = True
else:
msg2 = 'could not extract version.'
print PPF, "%s: %s" % (name, msg2)
return retval
def find_files(start_dir, re_pattern='.*\.(pyd|dll)', exclude_pats=[]):
"""Recursively find all files (not directories) with filenames
matching given regular expression. Case is ignored.
@param start_dir: search starts in this directory
@param re_pattern: regular expression with which all found files
will be matched. example: re_pattern = '.*\.(pyd|dll)' will match
all filenames ending in pyd or dll.
@param exclude_pats: if filename (without directory) matches any
one of these patterns, do not include it in the list
@return: list of fully qualified filenames that satisfy the
pattern
"""
cpat = re.compile(re_pattern, re.IGNORECASE)
found_files = []
excluded_files = []
for dirpath, dirnames, filenames in os.walk(start_dir):
ndirpath = os.path.normpath(os.path.abspath(dirpath))
for fn in filenames:
if cpat.match(fn):
# see if fn does not satisfy one of the exclude
# patterns
exclude_fn = False
for exclude_pat in exclude_pats:
if re.match(exclude_pat, fn, re.IGNORECASE):
exclude_fn = True
break
if not exclude_fn:
found_files.append(os.path.join(ndirpath,fn))
else:
excluded_files.append(os.path.join(ndirpath,fn))
return found_files, excluded_files
####################################################################
# METHODS CALLED FROM MAIN()
####################################################################
def usage():
print HELP_MESSAGE
def clean_pyinstaller(md_paths):
"""Clean out pyinstaller dist and build directories so that it has
to do everything from scratch. We usually do this before building
full release versions.
"""
print S_PPF, "clean_pyinstaller"
if os.path.isdir(md_paths.pyi_dist_dir):
print PPF, "Removing distdevide..."
shutil.rmtree(md_paths.pyi_dist_dir)
if os.path.isdir(md_paths.pyi_build_dir):
print PPF, "Removing builddevide..."
shutil.rmtree(md_paths.pyi_build_dir)
def run_pyinstaller(md_paths):
"""Run pyinstaller with the given parameters. This does not clean
out the dist and build directories before it begins
"""
print S_PPF, "run_pyinstaller"
# first get rid of all pre-compiled and backup files. These HAVE
# screwed up our binaries in the past!
print PPF, 'Deleting PYC, *~ and #*# files'
dead_files, _ = find_files(md_paths.devide_dir, '(.*\.pyc|.*~|#.*#)')
for fn in dead_files:
os.unlink(fn)
cmd = '%s %s %s' % (sys.executable, md_paths.pyinstaller_script,
md_paths.specfile)
ret = os.system(cmd)
if ret != 0:
raise RuntimeError('Error running PYINSTALLER.')
if os.name == 'nt':
for efile in ['devide.exe.manifest',
'msvcm80.dll', 'Microsoft.VC80.CRT.manifest']:
print PPF, "WINDOWS: copying", efile
src = os.path.join(
md_paths.specfile_dir,
efile)
dst = os.path.join(
md_paths.pyi_dist_dir,
efile)
shutil.copyfile(src, dst)
else:
# rename binary and create invoking script
# we only have to set LD_LIBRARY_PATH, PYTHONPATH is correct
# copy devide binary to devide.bin
invoking_fn = os.path.join(md_paths.pyi_dist_dir, 'devide')
os.rename(invoking_fn,
os.path.join(md_paths.pyi_dist_dir, 'devide.bin'))
# copy our own script to devide
shutil.copyfile(
os.path.join(
md_paths.specfile_dir, 'devideInvokingScript.sh'),
invoking_fn)
# chmod +x $SCRIPTFILE
os.chmod(invoking_fn,0755)
def package_dist(md_paths):
"""After pyinstaller has been executed, do all actions to package
up a distribution.
4. package and timestamp distributables (nsis on win, tar on
posix)
"""
print S_PPF, "package_dist"
# get devide version (we need this to stamp the executables)
cmd = '%s -v' % (os.path.join(md_paths.pyi_dist_dir, 'devide'),)
s,o = get_status_output(cmd)
# s == None if DeVIDE has executed successfully
if s:
raise RuntimeError('Could not exec DeVIDE to extract version.')
mo = re.search('^DeVIDE\s+(v.*)$', o, re.MULTILINE)
if mo:
devide_ver = mo.groups()[0]
else:
raise RuntimeError('Could not extract DeVIDE version.')
if os.name == 'nt':
# we need to be in the installer directory before starting
# makensis
os.chdir(md_paths.specfile_dir)
cmd = '%s devide.nsi' % (MAKE_NSIS,)
ret = os.system(cmd)
if ret != 0:
raise RuntimeError('Error running NSIS.')
# nsis creates devidesetup.exe - we're going to rename
os.rename('devidesetup.exe',
'devidesetup-%s.exe' % (devide_ver,))
else:
# go to the installer dir
os.chdir(md_paths.specfile_dir)
# rename distdevide to devide-version
basename = 'devide-%s' % (devide_ver,)
os.rename('distdevide', basename)
# create tarball with juicy stuff
tar = tarfile.open('%s.tar.bz2' % basename, 'w:bz2')
# recursively add directory
tar.add(basename)
# finalize
tar.close()
# rename devide-version back to distdevide
os.rename(basename, 'distdevide')
def postproc_sos(md_paths):
if os.name == 'posix':
print S_PPF, "postproc_sos (strip, chrpath)"
# strip all libraries
so_files, _ = find_files(md_paths.pyi_dist_dir, '.*\.(so$|so\.)')
print PPF, 'strip / chrpath %d SO files.' % (len(so_files),)
for so_file in so_files:
# strip debug info
ret = os.system('%s %s' % (STRIP, so_file))
if ret != 0:
print "Error stripping %s." % (so_file,)
# remove rpath information
ret = os.system('%s --delete %s' % (CHRPATH, so_file))
if ret != 0:
print "Error chrpathing %s." % (so_file,)
def rebase_dlls(md_paths):
"""Rebase all DLLs in the distdevide tree on Windows.
"""
if os.name == 'nt':
print S_PPF, "rebase_dlls"
# sqlite3.dll cannot be rebased; it even gets corrupted in the
# process! see this test:
# C:\TEMP>rebase -b 0x60000000 -e 0x1000000 sqlite3.dll
# REBASE: *** RelocateImage failed (sqlite3.dll).
# Image may be corrupted
# get list of pyd / dll files, excluding sqlite3
so_files, excluded_files = find_files(
md_paths.pyi_dist_dir, '.*\.(pyd|dll)', ['sqlite3\.dll'])
# add newline to each and every filename
so_files = ['%s\n' % (i,) for i in so_files]
print "Found %d DLL PYD files..." % (len(so_files),)
print "Excluded %d files..." % (len(excluded_files),)
# open file in specfile_dir, write the whole list
dll_list_fn = os.path.join(
md_paths.specfile_dir, 'dll_list.txt')
dll_list = file(dll_list_fn, 'w')
dll_list.writelines(so_files)
dll_list.close()
# now run rebase on the list
os.chdir(md_paths.specfile_dir)
ret = os.system(
'%s -b 0x60000000 -e 0x1000000 @dll_list.txt -v' %
(REBASE,))
# rebase returns 99 after rebasing, no idea why.
if ret != 99:
raise RuntimeError('Could not rebase DLLs.')
def wrapitk_tree(md_paths):
print S_PPF, "wrapitk_tree"
py_file = os.path.join(md_paths.specfile_dir, 'wrapitk_tree.py')
cmd = "%s %s %s" % (sys.executable, py_file, md_paths.pyi_dist_dir)
ret = os.system(cmd)
if ret != 0:
raise RuntimeError(
'Error creating self-contained WrapITK tree.')
def posix_prereq_check():
print S_PPF, 'POSIX prereq check'
# gnu
# have the word version anywhere
v = find_command_with_ver(
'strip',
'%s --version' % (STRIP,),
'([0-9\.]+)')
v = v and find_command_with_ver(
'chrpath',
'%s --version' % (CHRPATH,),
'version\s+([0-9\.]+)')
return v
def windows_prereq_check():
print S_PPF, 'WINDOWS prereq check'
# if you give rebase any other command-line switches (even /?) it
# exits with return code 99 and outputs its stuff to stderr
# with -b it exits with return code 0 (expected) and uses stdout
v = find_command_with_ver(
'Microsoft Rebase (rebase.exe)',
'%s -b 0x60000000' % (REBASE,),
'^(REBASE):\s+Total.*$')
v = v and find_command_with_ver(
'Nullsoft Installer System (makensis.exe)',
'%s /version' % (MAKE_NSIS,),
'^(v[0-9\.]+)$')
# now check that setuptools is NOT installed (it screws up
# everything on Windows)
try:
import setuptools
except ImportError:
# this is what we want
print PPF, 'setuptools not found. Good!'
sut_v = True
else:
print PPF, """setuptools is installed.
setuptools will break the DeVIDE dist build. Please uninstall by doing:
\Python25\Scripts\easy_install -m setuptools
del \Python25\Lib\site-packages\setuptools*.*
You can reinstall later by using ez_setup.py again.
"""
sut_v = False
return v and sut_v
def main():
try:
optlist, args = getopt.getopt(
sys.argv[1:], 'hs:i:',
['help', 'spec=','pyinstaller-script=','stages='])
except getopt.GetoptError,e:
usage
return
spec = None
pyi_script = None
stages = DEFAULT_STAGES
for o, a in optlist:
if o in ('-h', '--help'):
usage()
return
elif o in ('-s', '--spec'):
spec = a
elif o in ('-i', '--pyinstaller-script'):
pyi_script = a
elif o in ('--stages'):
stages = a
if spec is None or pyi_script is None:
# we need BOTH the specfile and pyinstaller script
usage()
return 1
# dependency checking
if os.name == 'nt':
if not windows_prereq_check():
print PPF, "ERR: Windows prerequisites do not check out."
return 1
else:
if not posix_prereq_check():
print PPF, "ERR: POSIX prerequisites do not check out."
return 1
md_paths = MDPaths(spec, pyi_script)
stages = [i.strip() for i in stages.split(',')]
if S_CLEAN_PYI in stages:
clean_pyinstaller(md_paths)
if S_RUN_PYI in stages:
run_pyinstaller(md_paths)
if S_WRAPITK_TREE in stages:
wrapitk_tree(md_paths)
if S_REBASE_DLLS in stages:
rebase_dlls(md_paths)
if S_POSTPROC_SOS in stages:
postproc_sos(md_paths)
if S_PACKAGE_DIST in stages:
package_dist(md_paths)
if __name__ == '__main__':
main()
| Python |
hiddenimports = ['matplotlib.numerix',
'matplotlib.numerix.fft',
'matplotlib.numerix.linear_algebra',
'matplotlib.numerix.ma',
'matplotlib.numerix.mlab',
'matplotlib.numerix.npyma',
'matplotlib.numerix.random_array',
'matplotlib.backends.backend_wxagg']
print "[*] hook-matplotlib.py - HIDDENIMPORTS"
print hiddenimports
| Python |
# this hook is responsible for including everything that the DeVIDE
# modules could need. The top-level spec file explicitly excludes
# them.
import os
import sys
# normalize path of this file, get dirname
hookDir = os.path.dirname(os.path.normpath(__file__))
# split dirname, select everything except the ending "installer/hooks"
dd = hookDir.split(os.sep)[0:-2]
# we have to do this trick, since on windows os.path.join('c:', 'blaat')
# yields 'c:blaat', i.e. relative to current dir, and we know it's absolute
dd[0] = '%s%s' % (dd[0], os.sep)
# turn that into a path again by making use of join (the normpath will take
# care of redundant slashes on *ix due to the above windows trick)
devideDir = os.path.normpath(os.path.join(*dd))
# now we've inserted the devideDir into the module path, so
# import modules should work
sys.path.insert(0, devideDir)
import module_kits
# now also parse config file
import ConfigParser
config_defaults = {'nokits': ''}
cp = ConfigParser.ConfigParser(config_defaults)
cp.read(os.path.join(devideDir, 'devide.cfg'))
nokits = [i.strip() for i in cp.get('DEFAULT', 'nokits').split(',')]
module_kits_dir = os.path.join(devideDir, 'module_kits')
mkds = module_kits.get_sorted_mkds(module_kits_dir)
# 1. remove the no_kits
# 2. explicitly remove itk_kit, it's handled completely separately by
# the makePackage.sh script file
mkl = [i.name for i in mkds if i.name not in nokits and i.name not in
['itk_kit','itktudoss_kit']]
# other imports
other_imports = ['genMixins', 'gen_utils', 'ModuleBase', 'module_mixins',
'module_utils',
'modules.viewers.DICOMBrowser',
'modules.viewers.slice3dVWR',
'modules.viewers.histogram1D',
'modules.viewers.TransferFunctionEditor']
# seems on Linux we have to make sure readline comes along (else
# vtkObject introspection windows complain)
try:
import readline
except ImportError:
pass
else:
other_imports.append('readline')
hiddenimports = ['module_kits.%s' % (i,) for i in mkl] + other_imports
print "[*] hook-ModuleManager.py - HIDDENIMPORTS"
print hiddenimports
| Python |
# so vtktudoss.py uses a list of names to construct the various imports
# at runtime, installer doesn't see this. :(
import os
if os.name == 'posix':
hiddenimports = [
'libvtktudossGraphicsPython',
'libvtktudossWidgetsPython',
'libvtktudossSTLibPython']
else:
hiddenimports = [
'vtktudossGraphicsPython',
'vtktudossWidgetsPython',
'vtktudossSTLibPython']
print "[*] hook-vtktudoss.py - HIDDENIMPORTS"
print hiddenimports
| Python |
# miscellaneous imports used by snippets
hiddenimports = ['tempfile']
| Python |
hiddenimports = ['wx.aui', 'wx.lib.mixins']
print "[*] hook-wx.py - HIDDENIMPORTS"
print hiddenimports
| Python |
# so vtktud.py uses a list of names to construct the various imports
# at runtime, installer doesn't see this. :(
import os
if os.name == 'posix':
hiddenimports = ['libvtktudCommonPython',
'libvtktudImagingPython', 'libvtktudGraphicsPython',
'libvtktudWidgetsPython']
else:
hiddenimports = ['vtktudCommonPython',
'vtktudImagingPython', 'vtktudGraphicsPython',
'vtktudWidgetsPython']
print "[*] hook-vtktud.py - HIDDENIMPORTS"
print hiddenimports
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
# RESTRUCTURE:
# * remove_binaries (startswith and contains)
# * remove_pure (startswith and contains)
# NB: stay away from any absolute path dependencies!!!
import os
import fnmatch
import re
import sys
def helper_remove_start(name, remove_names):
"""Helper function used to remove libraries from list.
Returns true of name starts with anything from remove_names.
"""
name = name.lower()
for r in remove_names:
if name.startswith(r.lower()):
return True
return False
def helper_remove_finds(name, remove_finds):
"""Helper function that returns true if any item in remove_finds
(list) can be string-found in name. Everything is lowercased.
"""
name = name.lower()
for r in remove_finds:
if name.find(r.lower()) >= 0:
return True
return False
def helper_remove_regexp(name, remove_regexps):
"""Helper function to remove things from list.
Returns true if name matches any regexp in remove_regexps.
"""
for r in remove_regexps:
if re.match(r, name) is not None:
return True
return False
# argv[0] is the name of the Build.py script
INSTALLER_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
# argv[1] is the name of the spec file
# first we get the path of the spec file, then we have to go one up
specpath = os.path.abspath(os.path.dirname(sys.argv[1]))
APP_DIR = os.path.split(specpath)[0]
from distutils import sysconfig
MPL_DATA_DIR = os.path.join(sysconfig.get_python_lib(), 'matplotlib/mpl-data')
import gdcm
gdcm_p3xml_fn = os.path.join(gdcm.GDCM_SOURCE_DIR,
'Source/InformationObjectDefinition', 'Part3.xml')
if sys.platform.startswith('win'):
exeName = 'builddevide/devide.exe'
extraLibs = []
# we can keep msvcr71.dll and msvcp71.dll, in fact they should just
# go in the installation directory with the other DLLs, see:
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/
# vclib/html/_crt_c_run.2d.time_libraries.asp
remove_binaries = ['dciman32.dll', 'ddraw.dll', 'glu32.dll', 'msvcp60.dll',
'netapi32.dll', 'opengl32.dll', 'uxtheme.dll']
else:
exeName = 'builddevide/devide'
# under some linuxes, libpython is shared -- McMillan installer doesn't
# know about this...
extraLibs = []
# i'm hoping this isn't necessary anymore!
if False:
vi = sys.version_info
if (vi[0], vi[1]) == (2,4):
# ubuntu hoary
extraLibs = [('libpython2.4.so.1.0', '/usr/lib/libpython2.4.so.1.0', 'BINARY')]
elif (vi[0], vi[1]) == (2,2) and \
os.path.exists('/usr/lib/libpython2.2.so.0.0'):
# looks like debian woody
extraLibs = [('libpython2.2.so.0.0', '/usr/lib/libpython2.2.so.0.0',
'BINARY')]
# RHEL3 64 has a static python library.
#####################################################################
# on ubuntu 6.06, libdcmdata.so.1 and libofstd.so.1 could live in
# /usr/lib, and are therefore thrown out by the McMillan Installer
if os.path.exists('/usr/lib/libdcmdata.so.1') and \
os.path.exists('/usr/lib/libofstd.so.1'):
extraLibs.append(
('libdcmdata.so.1', '/usr/lib/libdcmdata.so.1', 'BINARY'))
extraLibs.append(
('libofstd.so.1', '/usr/lib/libofstd.so.1','BINARY'))
######################################################################
# to get this to work on Debian 3.1, we also need to ship libstdc++
# and libXinerama
# FIXME: figure some other way out to include the CORRECT libstdc++,
# the previous hardcoding of this caused problems with the VL-e POC
stdc = '/usr/lib/libstdc++.so.6'
if os.path.exists(stdc):
extraLibs.append((os.path.basename(stdc), stdc, 'BINARY'))
xine = '/usr/lib/libXinerama.so.1'
if os.path.exists(xine):
extraLibs.append((os.path.basename(xine), xine, 'BINARY'))
######################################################################
# ubuntu 7.10 has renumbered libtiff 3.7 (or .8) to 4. other dists of
# course don't have this, so we have to include it.
libtiff = '/usr/lib/libtiff.so.4'
if os.path.exists(libtiff):
extraLibs.append(
(os.path.basename(libtiff), libtiff, 'BINARY'))
######################################################################
# also add some binary dependencies of numpy that are normally ignored
# because they are in /lib and/or /usr/lib (see excludes in bindepend.py)
from distutils import sysconfig
npdir = os.path.join(sysconfig.get_python_lib(), 'numpy')
ladir = os.path.join(npdir, 'linalg')
lplpath = os.path.join(ladir, 'lapack_lite.so')
# use mcmillan function to get LDD dependencies of lapack_lite.so
import bindepend
lpl_deps = bindepend.getImports(lplpath)
for d in lpl_deps:
if d.find('lapack') > 0 or d.find('blas') > 0 or \
d.find('g2c') > 0 or d.find('atlas') > 0:
extraLibs.append(
(os.path.basename(d), d, 'BINARY'))
# end numpy-dependent extraLibs section
##################################################################
# these libs will be removed from the package
remove_binaries = ['libdl.so', 'libutil.so', 'libm.so', 'libc.so',
'libGLU.so', 'libGL.so', 'libGLcore.so',
'libnvidia-tls.so',
'ld-linux-x86-64.so.2', 'libgcc_s.so',
'libtermcap',
'libXft.so', 'libXrandr.so', 'libXrender.so',
'libpthread.so', 'libreadline.so',
'libICE.so',
'libSM.so', 'libX11.so',
'libXext.so', 'libXi.so',
'libXt.so',
'libpango', 'libfontconfig', 'libfreetype',
'libatk', 'libgtk', 'libgdk',
'libglib', 'libgmodule', 'libgobject', 'libgthread',
'librt',
'qt', '_tkinter']
# make sure remove_binaries is lowercase
remove_binaries = [i.lower() for i in remove_binaries]
# global removes: we want to include this file so that the user can edit it
#remove_binaries += ['defaults.py']
# we have to remove these nasty built-in dependencies EARLY in the game
dd = config['EXE_dependencies']
newdd = [i for i in dd
if not helper_remove_start(i[0].lower(), remove_binaries)]
config['EXE_dependencies'] = newdd
print "[*] APP_DIR == %s" % (APP_DIR)
print "[*] exeName == %s" % (exeName)
mainScript = os.path.join(APP_DIR, 'devide.py')
print "[*] mainScript == %s" % (mainScript)
# generate available kit list #########################################
# simple form of the checking done by the module_kits package itself
sys.path.insert(0, APP_DIR)
import module_kits
#######################################################################
# segments
segTree = Tree(os.path.join(APP_DIR, 'segments'), 'segments', ['.svn'])
# snippets
snipTree = Tree(os.path.join(APP_DIR, 'snippets'), 'snippets', ['.svn'])
# arb data
dataTree = Tree(os.path.join(APP_DIR, 'data'), 'data', ['.svn'])
# documents and help, exclude help source
docsTree = Tree(os.path.join(APP_DIR, 'docs'), 'docs', ['.svn', 'source'])
# all modules
modules_tree = Tree(os.path.join(APP_DIR, 'modules'), 'modules',
['.svn', '*~'])
# all module_kits
module_kits_tree = Tree(os.path.join(APP_DIR, 'module_kits'), 'module_kits',
['.svn', '*~'])
print "===== APP_DIR: ", APP_DIR
# VTKPIPELINE ICONS
# unfortunately, due to the vtkPipeline design, these want to live one
# down from the main dir
vpli_dir = os.path.join(APP_DIR, 'external/vtkPipeline/Icons')
vpli = [(os.path.join('Icons', i),
os.path.join(vpli_dir, i), 'DATA')
for i in os.listdir(vpli_dir) if fnmatch.fnmatch(i, '*.xpm')]
# MATPLOTLIB data dir
mpl_data_dir = Tree(MPL_DATA_DIR, 'matplotlibdata')
# GDCM Part3.xml
gdcm_tree = [('gdcmdata/XML/Part3.xml', gdcm_p3xml_fn, 'DATA')]
if False:
from distutils import sysconfig
numpy_tree = Tree(
os.path.join(sysconfig.get_python_lib(),'numpy'),
prefix=os.path.join('module_kits','numpy_kit','numpy'),
excludes=['*.pyc', '*.pyo', 'doc', 'docs'])
testing_tree = Tree(os.path.join(APP_DIR, 'testing'), 'testing',
['.svn', '*~', '*.pyc'])
# and some miscellaneous files
misc_tree = [('devide.cfg', '%s/devide.cfg' % (APP_DIR,), 'DATA')]
##########################################################################
SUPPORT_DIR = os.path.join(INSTALLER_DIR, 'support')
a = Analysis([os.path.join(SUPPORT_DIR, '_mountzlib.py'),
os.path.join(SUPPORT_DIR, 'useUnicode.py'),
mainScript],
pathex=[],
hookspath=[os.path.join(APP_DIR, 'installer', 'hooks')])
######################################################################
# sanitise a.pure
remove_pure_finds = []
# we remove all module and module_kits based things, because they're
# taken care of by hooks/hook-moduleManager.py
# we also remove itk (it seems to be slipping in in spite of the fact
# that I'm explicitly excluding it from module_kits)
remove_pure_starts = ['modules.', 'module_kits', 'testing', 'itk']
for i in range(len(a.pure)-1, -1, -1):
if helper_remove_finds(a.pure[i][1], remove_pure_finds) or \
helper_remove_start(a.pure[i][0], remove_pure_starts):
del a.pure[i]
######################################################################
# sanitise a.binaries
remove_binary_finds = []
for i in range(len(a.binaries)-1, -1, -1):
if helper_remove_finds(a.binaries[i][1], remove_binaries) or \
helper_remove_start(a.binaries[i][0], remove_binary_finds):
del a.binaries[i]
######################################################################
# create the compressed archive with all the other pyc files
# will be integrated with EXE archive
pyz = PYZ(a.pure)
# in Installer 6a2, the -f option is breaking things (the support directory
# is deleted after the first invocation!)
#options = [('f','','OPTION')] # LD_LIBRARY_PATH is correctly set on Linux
#options = [('v', '', 'OPTION')] # Python is ran with -v
options = []
# because we've already modified the config, we won't be pulling in
# hardcoded dependencies that we don't want.
exe = EXE(pyz,
a.scripts + options,
exclude_binaries=1,
name=exeName,
icon=os.path.join(APP_DIR, 'resources/graphics/devidelogo64x64.ico'),
debug=0,
strip=0,
console=True)
all_binaries = a.binaries + modules_tree + module_kits_tree + vpli + \
mpl_data_dir + \
extraLibs + segTree + snipTree + dataTree + docsTree + misc_tree + \
testing_tree + gdcm_tree
coll = COLLECT(exe,
all_binaries,
strip=0,
name='distdevide')
# wrapitk_tree is packaged completely separately
| Python |
"""Module for independently packaging up whole WrapITK tree.
"""
import itkConfig
import glob
import os
import shutil
import sys
# customise the following variables
if os.name == 'nt':
SO_EXT = 'dll'
SO_GLOB = '*.%s' % (SO_EXT,)
PYE_GLOB = '*.pyd'
# this should be c:/opt/ITK/bin
ITK_SO_DIR = os.path.normpath(
os.path.join(itkConfig.swig_lib, '../../../../bin'))
else:
SO_EXT = 'so'
SO_GLOB = '*.%s.*' % (SO_EXT,)
PYE_GLOB = '*.so'
curdir = os.path.abspath(os.curdir)
# first go down to Insight/lib/InsightToolkit/WrapITK/lib
os.chdir(itkConfig.swig_lib)
# then go up twice
os.chdir(os.path.join('..', '..'))
# then find the curdir
ITK_SO_DIR = os.path.abspath(os.curdir)
# change back to where we started
os.chdir(curdir)
# we want:
# itk_kit/wrapitk/py (*.py and Configuration and itkExtras subdirs from
# WrapITK/Python)
# itk_kit/wrapitk/lib (*.py and *.so from WrapITK/lib)
def get_wrapitk_tree():
"""Return tree relative to itk_kit/wrapitk top.
"""
# WrapITK/lib -> itk_kit/wrapitk/lib (py files, so/dll files)
lib_files = glob.glob('%s/*.py' % (itkConfig.swig_lib,))
# on linux there are Python SO files, on Windows they're actually
# all PYDs (and not DLLs) - these are ALL python extension modules
lib_files.extend(glob.glob('%s/%s' % (itkConfig.swig_lib, PYE_GLOB)))
# on Windows we also need the SwigRuntime.dll in c:/opt/WrapITK/bin!
# the files above on Windows are in:
# C:\\opt\\WrapITK\\lib\\InsightToolkit\\WrapITK\\lib
if os.name == 'nt':
lib_files.extend(glob.glob('%s/%s' % (ITK_SO_DIR, SO_GLOB)))
wrapitk_lib = [('lib/%s' %
(os.path.basename(i),), i) for i in lib_files]
# WrapITK/Python -> itk_kit/wrapitk/python (py files)
py_path = os.path.normpath(os.path.join(itkConfig.config_py, '..'))
py_files = glob.glob('%s/*.py' % (py_path,))
wrapitk_py = [('python/%s' %
(os.path.basename(i),), i) for i in py_files]
# WrapITK/Python/Configuration -> itk_kit/wrapitk/python/Configuration
config_files = glob.glob('%s/*.py' %
(os.path.join(py_path,'Configuration'),))
wrapitk_config = [('python/Configuration/%s' %
(os.path.basename(i),), i)
for i in config_files]
# itkExtras
extra_files = glob.glob('%s/*.py' % (os.path.join(py_path, 'itkExtras'),))
wrapitk_extra = [('python/itkExtras/%s' %
(os.path.basename(i),), i) for i in extra_files]
# complete tree
wrapitk_tree = wrapitk_lib + wrapitk_py + wrapitk_config + wrapitk_extra
return wrapitk_tree
def get_itk_so_tree():
"""Get the ITK DLLs themselves.
Return tree relative to itk_kit/wrapitk top.
"""
so_files = glob.glob('%s/%s' % (ITK_SO_DIR, SO_GLOB))
itk_so_tree = [('lib/%s' % (os.path.basename(i),), i) for i in so_files]
return itk_so_tree
def copy3(src, dst):
"""Same as shutil.copy2, but copies symlinks as they are, i.e. equivalent
to --no-dereference parameter of cp.
"""
dirname = os.path.dirname(dst)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, dst)
else:
shutil.copy2(src, dst)
def install(devide_app_dir):
"""Install a self-contained wrapitk installation in itk_kit_dir.
"""
itk_kit_dir = os.path.join(devide_app_dir, 'module_kits/itk_kit')
print "Deleting existing wrapitk dir."
sys.stdout.flush()
witk_dest_dir = os.path.join(itk_kit_dir, 'wrapitk')
if os.path.exists(witk_dest_dir):
shutil.rmtree(witk_dest_dir)
print "Creating list of WrapITK files..."
sys.stdout.flush()
wrapitk_tree = get_wrapitk_tree()
print "Copying WrapITK files..."
sys.stdout.flush()
for f in wrapitk_tree:
copy3(f[1], os.path.join(witk_dest_dir, f[0]))
print "Creating list of ITK shared objects..."
sys.stdout.flush()
itk_so_tree = get_itk_so_tree()
print "Copying ITK shared objects..."
sys.stdout.flush()
for f in itk_so_tree:
copy3(f[1], os.path.join(witk_dest_dir, f[0]))
if os.name == 'nt':
# on Windows, it's not easy setting the DLL load path in a running
# application. You could try SetDllDirectory, but that only works
# since XP SP1. You could also change the current dir, but our DLLs
# are lazy loaded, so no go. An invoking batchfile is out of the
# question.
print "Moving all SOs back to main DeVIDE dir [WINDOWS] ..."
lib_path = os.path.join(witk_dest_dir, 'lib')
so_files = glob.glob(os.path.join(lib_path, SO_GLOB))
so_files.extend(glob.glob(os.path.join(lib_path, PYE_GLOB)))
for so_file in so_files:
shutil.move(so_file, devide_app_dir)
#also write list of DLLs that were moved to lib_path/moved_dlls.txt
f = file(os.path.join(lib_path, 'moved_dlls.txt'), 'w')
f.writelines(['%s\n' % (os.path.basename(fn),) for fn in so_files])
f.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Specify devide app dir as argument."
else:
install(sys.argv[1])
| Python |
#!/usr/bin/env python
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import re
import getopt
import mutex
import os
import re
import stat
import string
import sys
import time
import traceback
import ConfigParser
# we need to import this explicitly, else the installer builder
# forgets it and the binary has e.g. no help() support.
import site
dev_version = False
try:
# devide_versions.py is written by johannes during building DeVIDE distribution
import devide_versions
except ImportError:
dev_version = True
else:
# check if devide_version.py comes from the same dir as this devide.py
dv_path = os.path.abspath(os.path.dirname(devide_versions.__file__))
d_path = os.path.abspath(os.path.dirname(sys.argv[0]))
if dv_path != d_path:
# devide_versions.py is imported from a different dir than this file, so DEV
dev_version = True
if dev_version:
# if there's no valid versions.py, we have these defaults
# DEVIDE_VERSION is usually y.m.d of the release, or y.m.D if
# development version
DEVIDE_VERSION = "12.3.D"
DEVIDE_REVISION_ID = "DEV"
JOHANNES_REVISION_ID = "DEV"
else:
DEVIDE_VERSION = devide_versions.DEVIDE_VERSION
DEVIDE_REVISION_ID = devide_versions.DEVIDE_REVISION_ID
JOHANNES_REVISION_ID = devide_versions.JOHANNES_REVISION_ID
############################################################################
class MainConfigClass(object):
def __init__(self, appdir):
# first need to parse command-line to get possible --config-profile
# we store all parsing results in pcl_data structure
##############################################################
pcl_data = self._parseCommandLine()
config_defaults = {
'nokits': '',
'interface' : 'wx',
'scheduler' : 'hybrid',
'extra_module_paths' : '',
'streaming_pieces' : 5,
'streaming_memory' : 100000}
cp = ConfigParser.ConfigParser(config_defaults)
cp.read(os.path.join(appdir, 'devide.cfg'))
CSEC = pcl_data.config_profile
# then apply configuration file and defaults #################
##############################################################
nokits = [i.strip() for i in cp.get(CSEC, \
'nokits').split(',')]
# get rid of empty strings (this is not as critical here as it
# is for emps later, but we like to be consistent)
self.nokits = [i for i in nokits if i]
self.streaming_pieces = cp.getint(CSEC, 'streaming_pieces')
self.streaming_memory = cp.getint(CSEC, 'streaming_memory')
self.interface = cp.get(CSEC, 'interface')
self.scheduler = cp.get(CSEC, 'scheduler')
emps = [i.strip() for i in cp.get(CSEC, \
'extra_module_paths').split(',')]
# ''.split(',') will yield [''], which we have to get rid of
self.extra_module_paths = [i for i in emps if i]
# finally apply command line switches ############################
##################################################################
# these ones can be specified in config file or parameters, so
# we have to check first if parameter has been specified, in
# which case it overrides config file specs
if pcl_data.nokits:
self.nokits = pcl_data.nokits
if pcl_data.scheduler:
self.scheduler = pcl_data.scheduler
if pcl_data.extra_module_paths:
self.extra_module_paths = pcl_data.extra_module_paths
# command-line only, defaults set in PCLData ctor
# so we DON'T have to check if config file has already set
# them
self.interface = pcl_data.interface
self.stereo = pcl_data.stereo
self.test = pcl_data.test
self.script = pcl_data.script
self.script_params = pcl_data.script_params
self.load_network = pcl_data.load_network
self.hide_devide_ui = pcl_data.hide_devide_ui
# now sanitise some options
if type(self.nokits) != type([]):
self.nokits = []
def dispUsage(self):
self.disp_version()
print ""
print "-h or --help : Display this message."
print "-v or --version : Display DeVIDE version."
print "--version-more : Display more DeVIDE version info."
print "--config-profile name : Use config profile with name."
print "--no-kits kit1,kit2 : Don't load the specified kits."
print "--kits kit1,kit2 : Load the specified kits."
print "--scheduler hybrid|event"
print " : Select scheduler (def: hybrid)"
print "--extra-module-paths path1,path2"
print " : Specify extra module paths."
print "--interface wx|script"
print " : Load 'wx' or 'script' interface."
print "--stereo : Allocate stereo visuals."
print "--test : Perform built-in unit testing."
print "--script : Run specified .py in script mode."
print "--load-network : Load specified DVN after startup."
print "--hide-devide-ui : Hide the DeVIDE UI at startup."
def disp_version(self):
print "DeVIDE v%s" % (DEVIDE_VERSION,)
def disp_more_version_info(self):
print "DeVIDE rID:", DEVIDE_REVISION_ID
print "Constructed by johannes:", JOHANNES_REVISION_ID
def _parseCommandLine(self):
"""Parse command-line, return all parsed parameters in
PCLData class.
"""
class PCLData:
def __init__(self):
self.config_profile = 'DEFAULT'
self.nokits = None
self.interface = None
self.scheduler = None
self.extra_module_paths = None
self.stereo = False
self.test = False
self.script = None
self.script_params = None
self.load_network = None
self.hide_devide_ui = None
pcl_data = PCLData()
try:
# 'p:' means -p with something after
optlist, args = getopt.getopt(
sys.argv[1:], 'hv',
['help', 'version', 'version-more', 'no-kits=', 'kits=', 'stereo', 'interface=', 'test',
'script=', 'script-params=', 'config-profile=',
'scheduler=', 'extra-module-paths=', 'load-network='])
except getopt.GetoptError,e:
self.dispUsage()
sys.exit(1)
for o, a in optlist:
if o in ('-h', '--help'):
self.dispUsage()
sys.exit(0)
elif o in ('-v', '--version'):
self.disp_version()
sys.exit(0)
elif o in ('--version-more',):
self.disp_more_version_info()
sys.exit(0)
elif o in ('--config-profile',):
pcl_data.config_profile = a
elif o in ('--no-kits',):
pcl_data.nokits = [i.strip() for i in a.split(',')]
elif o in ('--kits',):
# this actually removes the listed kits from the nokits list
kits = [i.strip() for i in a.split(',')]
for kit in kits:
try:
del pcl_data.nokits[pcl_data.nokits.index(kit)]
except ValueError:
pass
elif o in ('--interface',):
if a == 'pyro':
pcl_data.interface = 'pyro'
elif a == 'xmlrpc':
pcl_data.interface = 'xmlrpc'
elif a == 'script':
pcl_data.interface = 'script'
else:
pcl_data.interface = 'wx'
elif o in ('--scheduler',):
if a == 'event':
pcl_data.scheduler = 'event'
else:
pcl_data.scheduler = 'hybrid'
elif o in ('--extra-module-paths',):
emps = [i.strip() for i in a.split(',')]
# get rid of empty paths
pcl_data.extra_module_paths = [i for i in emps if i]
elif o in ('--stereo',):
pcl_data.stereo = True
elif o in ('--test',):
pcl_data.test = True
elif o in ('--script',):
pcl_data.script = a
elif o in ('--script-params',):
pcl_data.script_params = a
elif o in ('--load-network',):
pcl_data.load_network = a
elif o in ('--hide-devide-ui',):
pcl_data.hide_devide_ui = a
return pcl_data
############################################################################
class DeVIDEApp:
"""Main devide application class.
This instantiates the necessary main loop class (wx or headless pyro) and
acts as communications hub for the rest of DeVIDE. It also instantiates
and owns the major components: Scheduler, ModuleManager, etc.
"""
def __init__(self):
"""Construct DeVIDEApp.
Parse command-line arguments, read configuration. Instantiate and
configure relevant main-loop / interface class.
"""
self._inProgress = mutex.mutex()
self._previousProgressTime = 0
self._currentProgress = -1
self._currentProgressMsg = ''
#self._appdir, exe = os.path.split(sys.executable)
if hasattr(sys, 'frozen') and sys.frozen:
self._appdir, exe = os.path.split(sys.executable)
else:
dirname = os.path.dirname(sys.argv[0])
if dirname and dirname != os.curdir:
self._appdir = os.path.abspath(dirname)
else:
self._appdir = os.getcwd()
sys.path.insert(0, self._appdir) # for cx_Freeze
# before this is instantiated, we need to have the paths
self.main_config = MainConfigClass(self._appdir)
####
# startup relevant interface instance
if self.main_config.interface == 'pyro':
from interfaces.pyro_interface import PyroInterface
self._interface = PyroInterface(self)
# this is a GUI-less interface, so wx_kit has to go
self.main_config.nokits.append('wx_kit')
elif self.main_config.interface == 'xmlrpc':
from interfaces.xmlrpc_interface import XMLRPCInterface
self._interface = XMLRPCInterface(self)
# this is a GUI-less interface, so wx_kit has to go
self.main_config.nokits.append('wx_kit')
elif self.main_config.interface == 'script':
from interfaces.script_interface import ScriptInterface
self._interface = ScriptInterface(self)
self.main_config.nokits.append('wx_kit')
else:
from interfaces.wx_interface import WXInterface
self._interface = WXInterface(self)
if 'wx_kit' in self.main_config.nokits:
self.view_mode = False
else:
self.view_mode = True
####
# now startup module manager
try:
# load up the ModuleManager; we do that here as the ModuleManager
# needs to give feedback via the GUI (when it's available)
global module_manager
import module_manager
self.module_manager = module_manager.ModuleManager(self)
except Exception, e:
es = 'Unable to startup the ModuleManager: %s. Terminating.' % \
(str(e),)
self.log_error_with_exception(es)
# this is a critical error: if the ModuleManager raised an
# exception during construction, we have no ModuleManager
# return False, thus terminating the application
return False
####
# start network manager
import network_manager
self.network_manager = network_manager.NetworkManager(self)
####
# start scheduler
import scheduler
self.scheduler = scheduler.SchedulerProxy(self)
if self.main_config.scheduler == 'event':
self.scheduler.mode = \
scheduler.SchedulerProxy.EVENT_DRIVEN_MODE
self.log_info('Selected event-driven scheduler.')
else:
self.scheduler.mode = \
scheduler.SchedulerProxy.HYBRID_MODE
self.log_info('Selected hybrid scheduler.')
####
# call post-module manager interface hook
self._interface.handler_post_app_init()
self.setProgress(100, 'Started up')
def close(self):
"""Quit application.
"""
self._interface.close()
self.network_manager.close()
self.module_manager.close()
# and make 100% we're done
sys.exit()
def get_devide_version(self):
return DEVIDE_VERSION
def get_module_manager(self):
return self.module_manager
def log_error(self, msg):
"""Report error.
In general this will be brought to the user's attention immediately.
"""
self._interface.log_error(msg)
def log_error_list(self, msgs):
self._interface.log_error_list(msgs)
def log_error_with_exception(self, msg):
"""Can be used by DeVIDE components to log an error message along
with all information about current exception.
"""
import gen_utils
emsgs = gen_utils.exceptionToMsgs()
self.log_error_list(emsgs + [msg])
def log_info(self, message, timeStamp=True):
"""Log informative message to the log file or log window.
"""
self._interface.log_info(message, timeStamp)
def log_message(self, message, timeStamp=True):
"""Log a message that will also be brought to the user's attention,
for example in a dialog box.
"""
self._interface.log_message(message, timeStamp)
def log_warning(self, message, timeStamp=True):
"""Log warning message.
This is not as serious as an error condition, but it should also be
brought to the user's attention.
"""
self._interface.log_warning(message, timeStamp)
def get_progress(self):
return self._currentProgress
def set_progress(self, progress, message, noTime=False):
# 1. we shouldn't call setProgress whilst busy with setProgress
# 2. only do something if the message or the progress has changed
# 3. we only perform an update if a second or more has passed
# since the previous update, unless this is the final
# (i.e. 100% update) or noTime is True
# the testandset() method of mutex.mutex is atomic... this will grab
# the lock and set it if it isn't locked alread and then return true.
# returns false otherwise
if self._inProgress.testandset():
if message != self._currentProgressMsg or \
progress != self._currentProgress:
if abs(progress - 100.0) < 0.01 or noTime or \
time.time() - self._previousProgressTime >= 1:
self._previousProgressTime = time.time()
self._currentProgressMsg = message
self._currentProgress = progress
self._interface.set_progress(progress, message, noTime)
# unset the mutex thingy
self._inProgress.unlock()
setProgress = set_progress
def start_main_loop(self):
"""Start the main execution loop.
This will thunk through to the contained interface object.
"""
self._interface.start_main_loop()
def get_appdir(self):
"""Return directory from which DeVIDE has been invoked.
"""
return self._appdir
def get_interface(self):
"""Return binding to the current interface.
"""
return self._interface
############################################################################
def main():
devide_app = DeVIDEApp()
devide_app.start_main_loop()
if __name__ == '__main__':
main()
| Python |
# example driver script for offline / command-line processing with DeVIDE
# the following variables are magically set in this script:
# interface - instance of ScriptInterface, with the following calls:
# meta_modules = load_and_realise_network()
# execute_network(self, meta_modules)
# clear_network(self)
# instance = get_module_instance(self, module_name)
# config = get_module_config(self, module_name)
# set_module_config(self, module_name, config)
# See devide/interfaces/simple_api_mixin.py for details.
# start the script with:
# dre devide --interface script --script example_offline_driver.py --script-params 0.0,100.0
def main():
# script_params is everything that gets passed on the DeVIDE
# commandline after --script-params
# first get the two strings split by a comma
l,u = script_params.split(',')
# then cast to float
LOWER = float(l)
UPPER = float(u)
print "offline_driver.py starting"
# load the DVN that you prepared
# load_and_realise_network returns module dictionary + connections
mdict,conn = interface.load_and_realise_network(
'BatchModeWithoutUI-ex.dvn')
# parameter is the module name that you assigned in DeVIDE
# using right-click on the module, then "Rename"
thresh_conf = interface.get_module_config('threshold')
# what's returned is module_instance._config (try this in the
# devide module introspection interface by introspecting "Module
# (self)" and then typing "dir(obj._config)"
thresh_conf.lowerThreshold = LOWER
thresh_conf.upperThreshold = UPPER
# set module config back again
interface.set_module_config('threshold', thresh_conf)
# get, change and set writer config to change filename
writer_conf = interface.get_module_config('vtp_wrt')
writer_conf.filename = 'result_%s-%s.vtp' % (str(LOWER),str(UPPER))
interface.set_module_config('vtp_wrt', writer_conf)
# run the network
interface.execute_network(mdict.values())
print "offline_driver.py done."
main()
| Python |
# dummy
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import wx # todo: this should go away...
import string
import sys
import traceback
# todo: remove all VTK dependencies from this file!!
def clampVariable(v, min, max):
"""Make sure variable is on the range [min,max]. Return clamped variable.
"""
if v < min:
v = min
elif v > max:
v = max
return v
def exceptionToMsgs():
# create nice formatted string with tracebacks and all
ei = sys.exc_info()
#dmsg = \
# string.join(traceback.format_exception(ei[0],
# ei[1],
# ei[2]))
dmsgs = traceback.format_exception(ei[0], ei[1], ei[2])
return dmsgs
def logError_DEPRECATED(msg):
"""DEPRECATED. Rather use devide_app.log_error().
"""
# create nice formatted string with tracebacks and all
ei = sys.exc_info()
#dmsg = \
# string.join(traceback.format_exception(ei[0],
# ei[1],
# ei[2]))
dmsgs = traceback.format_exception(ei[0], ei[1], ei[2])
# we can't disable the timestamp yet
# wxLog_SetTimestamp()
# set the detail message
for dmsg in dmsgs:
wx.LogError(dmsg)
# then the most recent
wx.LogError(msg)
print msg
# and flush... the last message will be the actual error
# message, what we did before will add to it to become the
# detail message
wx.Log_FlushActive()
def logWarning_DEPRECATED(msg):
"""DEPRECATED. Rather use devide_app.logWarning().
"""
# create nice formatted string with tracebacks and all
ei = sys.exc_info()
#dmsg = \
# string.join(traceback.format_exception(ei[0],
# ei[1],
# ei[2]))
dmsgs = traceback.format_exception(ei[0], ei[1], ei[2])
# we can't disable the timestamp yet
# wxLog_SetTimestamp()
# set the detail message
for dmsg in dmsgs:
wx.LogWarning(dmsg)
# then the most recent
wx.LogWarning(msg)
# and flush... the last message will be the actual error
# message, what we did before will add to it to become the
# detail message
wx.Log_FlushActive()
def setGridCellYesNo(grid, row, col, yes=True):
if yes:
colour = wx.Colour(0,255,0)
text = '1'
else:
colour = wx.Colour(255,0,0)
text = '0'
grid.SetCellValue(row, col, text)
grid.SetCellBackgroundColour(row, col, colour)
def textToFloat(text, defaultFloat):
"""Converts text to a float by using an eval and returns the float.
If something goes wrong, returns defaultFloat.
"""
try:
returnFloat = float(text)
except Exception:
returnFloat = defaultFloat
return returnFloat
def textToInt(text, defaultInt):
"""Converts text to an integer by using an eval and returns the integer.
If something goes wrong, returns default Int.
"""
try:
returnInt = int(text)
except Exception:
returnInt = defaultInt
return returnInt
def textToTuple(text, defaultTuple):
"""This will convert the text representation of a tuple into a real
tuple. No checking for type or number of elements is done. See
textToTypeTuple for that.
"""
# first make sure that the text starts and ends with brackets
text = text.strip()
if text[0] != '(':
text = '(%s' % (text,)
if text[-1] != ')':
text = '%s)' % (text,)
try:
returnTuple = eval('tuple(%s)' % (text,))
except Exception:
returnTuple = defaultTuple
return returnTuple
def textToTypeTuple(text, defaultTuple, numberOfElements, aType):
"""This will convert the text representation of a tuple into a real
tuple with numberOfElements elements, all of type aType. If the required
number of elements isn't available, or they can't all be casted to the
correct type, the defaultTuple will be returned.
"""
aTuple = textToTuple(text, defaultTuple)
if len(aTuple) != numberOfElements:
returnTuple = defaultTuple
else:
try:
returnTuple = tuple([aType(e) for e in aTuple])
except ValueError:
returnTuple = defaultTuple
return returnTuple
| Python |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import vtk
from module_kits.misc_kit.mixins import SubjectMixin
from devide_canvas_object import DeVIDECanvasGlyph
import operator
import wx # we're going to use this for event handling
from module_kits.misc_kit import dprint
# think about turning this into a singleton.
class DeVIDECanvasEvent:
def __init__(self):
# last event information ############
self.wx_event = None
self.name = None
# pos is in wx-coords, i.e. top-left is 0,0
self.pos = (0,0)
# last_pos and pos_delta follow same convention
self.last_pos = (0,0)
self.pos_delta = (0,0)
# disp_pos is in VTK display coords: bottom-left is 0,0
self.disp_pos = (0,0)
self.world_pos = (0,0,0)
# state information #################
self.left_button = False
self.middle_button = False
self.right_button = False
self.clicked_object = None
# which cobject has the mouse
self.picked_cobject = None
self.picked_sub_prop = None
class DeVIDECanvas(SubjectMixin):
"""Give me a vtkRenderWindowInteractor with a Renderer, and I'll
do the rest. YEAH.
"""
def __init__(self, renderwindowinteractor, renderer):
self._rwi = renderwindowinteractor
self._ren = renderer
# need this to do same mouse capturing as original RWI under Win
self._rwi_use_capture = \
vtk.wx.wxVTKRenderWindowInteractor._useCapture
# we can't switch on Line/Point/Polygon smoothing here,
# because the renderwindow has already been initialised
# we do it in main_frame.py right after we create the RWI
# parent 2 ctor
SubjectMixin.__init__(self)
self._cobjects = []
# dict for mapping from prop back to cobject
self.prop_to_glyph = {}
self._previousRealCoords = None
self._potentiallyDraggedObject = None
self._draggedObject = None
self._ren.SetBackground(1.0,1.0,1.0)
self._ren.GetActiveCamera().SetParallelProjection(1)
# set a sensible initial zoom
self._zoom(0.004)
istyle = vtk.vtkInteractorStyleUser()
#istyle = vtk.vtkInteractorStyleImage()
self._rwi.SetInteractorStyle(istyle)
self._rwi.Bind(wx.EVT_RIGHT_DOWN, self._handler_rd)
self._rwi.Bind(wx.EVT_RIGHT_UP, self._handler_ru)
self._rwi.Bind(wx.EVT_LEFT_DOWN, self._handler_ld)
self._rwi.Bind(wx.EVT_LEFT_UP, self._handler_lu)
self._rwi.Bind(wx.EVT_MIDDLE_DOWN, self._handler_md)
self._rwi.Bind(wx.EVT_MIDDLE_UP, self._handler_mu)
self._rwi.Bind(wx.EVT_MOUSEWHEEL, self._handler_wheel)
self._rwi.Bind(wx.EVT_MOTION, self._handler_motion)
self._rwi.Bind(wx.EVT_LEFT_DCLICK, self._handler_ldc)
#self._rwi.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
#self._rwi.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions
# of all characters are always returned. EVT_CHAR also performs
# other necessary keyboard-dependent translations.
# * we unbind the char handler added by the wxRWI (else alt-w
# for example gets interpreted as w for wireframe e.g.)
self._rwi.Unbind(wx.EVT_CHAR)
self._rwi.Bind(wx.EVT_CHAR, self._handler_char)
#self._rwi.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self._observer_ids = []
self.event = DeVIDECanvasEvent()
# do initial drawing here.
def close(self):
# first remove all objects
# (we could do this more quickly, but we're opting for neatly)
for i in range(len(self._cobjects)-1,-1,-1):
cobj = self._cobjects[i]
self.remove_object(cobj)
for i in self._observer_ids:
self._rwi.RemoveObserver(i)
del self._rwi
del self._ren
# nuke this function, replace with display_to_world.
# events are in display, everything else in world.
# go back to graph_editor
def eventToRealCoords_DEPRECATED(self, ex, ey):
"""Convert window event coordinates to canvas relative coordinates.
"""
# get canvas parameters
vsx, vsy = self.GetViewStart()
dx, dy = self.GetScrollPixelsPerUnit()
# calculate REAL coords
rx = ex + vsx * dx
ry = ey + vsy * dy
return (rx, ry)
def display_to_world(self, dpt):
"""Takes 3-D display point as input, returns 3-D world point.
"""
# make sure we have 3 elements
if len(dpt) < 3:
dpt = tuple(dpt) + (0.0,)
elif len(dpt) > 3:
dpt = tuple(dpt[0:3])
self._ren.SetDisplayPoint(dpt)
self._ren.DisplayToWorld()
return self._ren.GetWorldPoint()[0:3]
def world_to_display(self, wpt):
"""Takes 3-D world point as input, returns 3-D display point.
"""
self._ren.SetWorldPoint(tuple(wpt) + (0.0,)) # this takes 4-vec
self._ren.WorldToDisplay()
return self._ren.GetDisplayPoint()
def flip_y(self, y):
return self._rwi.GetSize()[1] - y - 1
def wx_to_world(self, wx_x, wx_y):
disp_x = wx_x
disp_y = self.flip_y(wx_y)
world_depth = 0.0
disp_z = self.world_to_display((0.0,0.0, world_depth))[2]
wex, wey, wez = self.display_to_world((disp_x,disp_y,disp_z))
return (wex, wey, wez)
def _helper_handler_capture_release(self, button):
"""Helper method to be called directly after preamble
helper in button up handlers in order to release mouse.
@param button Text description of which button was pressed,
e.g. 'left'
"""
# if the same button is released that captured the mouse,
# and we have the mouse, release it. (we need to get rid
# of this as soon as possible; if we don't and one of the
# event handlers raises an exception, mouse is never
# released.)
if self._rwi_use_capture and self._rwi._own_mouse and \
button==self._rwi._mouse_capture_button:
self._rwi.ReleaseMouse()
self._rwi._own_mouse = False
def _helper_handler_capture(self, button):
"""Helper method to be called at end after button down
helpers.
@param button Text description of button that was pressed,
e.g. 'left'.
"""
# save the button and capture mouse until the button is
# released we only capture the mouse if it hasn't already
# been captured
if self._rwi_use_capture and not self._rwi._own_mouse:
self._rwi._own_mouse = True
self._rwi._mouse_capture_button = button
self._rwi.CaptureMouse()
def _helper_handler_preamble(self, e, focus=True):
e.Skip(False)
# Skip(False) won't search for other event
# handlers
self.event.wx_event = e
if focus:
# we need to take focus... else some other subwindow keeps it
# once we've been there to select a module for example
self._rwi.SetFocus()
def _helper_glyph_button_down(self, event_name):
ex, ey = self.event.disp_pos
ret = self._pick_glyph(ex,ey)
if ret:
pc, psp = ret
self.event.clicked_object = pc
self.event.name = event_name
pc.notify(event_name)
else:
self.event.clicked_object = None
# we only give the canvas the event if the glyph didn't
# take it
self.event.name = event_name
self.notify(event_name)
def _helper_glyph_button_up(self, event_name):
ex, ey = self.event.disp_pos
ret = self._pick_glyph(ex,ey)
if ret:
pc, psp = ret
self.event.name = event_name
pc.notify(event_name)
else:
self.event.name = event_name
self.notify(event_name)
# button goes up, object is not clicked anymore
self.event.clicked_object = None
def _handler_char(self, e):
# we're disabling all VTK. if we don't, the standard
# VTK keys such as 'r' (reset), '3' (stereo) and especially
# 'f' (fly to) can screw up things quite badly.
# if ctrl, shift or alt is involved, we should pass it on to
# wx (could be menu keys for example).
# if not, we just eat up the event.
if e.ControlDown() or e.ShiftDown() or e.AltDown():
e.Skip()
def _handler_ld(self, e):
self._helper_handler_preamble(e)
#ctrl, shift = event.ControlDown(), event.ShiftDown()
#self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(),
# ctrl, shift, chr(0), 0, None)
self.event.left_button = True
self._helper_glyph_button_down('left_button_down')
self._helper_handler_capture('l')
def _handler_lu(self, e):
dprint("_handler_lu::")
self._helper_handler_preamble(e, focus=False)
self._helper_handler_capture_release('l')
self.event.left_button = False
self._helper_glyph_button_up('left_button_up')
def _handler_ldc(self, e):
self._helper_handler_preamble(e)
self._helper_glyph_button_down('left_button_dclick')
def _handler_md(self, e):
self._helper_handler_preamble(e)
self.event.middle_button = True
self._helper_glyph_button_down('middle_button_down')
def _handler_mu(self, e):
self._helper_handler_preamble(e, focus=False)
self.event.middle_button = False
self._helper_glyph_button_up('middle_button_up')
def _handler_rd(self, e):
self._helper_handler_preamble(e)
if e.Dragging():
return
self.event.right_button = True
self._helper_glyph_button_down('right_button_down')
def _handler_ru(self, e):
self._helper_handler_preamble(e, focus=False)
if e.Dragging():
return
self.event.right_button = False
self._helper_glyph_button_up('right_button_up')
def _pick_glyph(self, ex, ey):
"""Give current VTK display position.
"""
p = vtk.vtkPicker()
p.SetTolerance(0.00001) # this is perhaps still too large
for i in self._cobjects:
if isinstance(i, DeVIDECanvasGlyph):
for prop in i.props:
p.AddPickList(prop)
p.PickFromListOn()
ret = p.Pick((ex, ey, 0), self._ren)
if ret:
#pc = p.GetProp3Ds()
#pc.InitTraversal()
#prop = pc.GetNextItemAsObject()
prop = p.GetAssembly() # for now we only want this.
try:
picked_cobject = self.prop_to_glyph[prop]
except KeyError:
dprint("_pick_glyph:: couldn't find prop in p2g dict")
return None
else:
# need to find out WHICH sub-actor was picked.
if p.GetPath().GetNumberOfItems() == 2:
sub_prop = \
p.GetPath().GetItemAsObject(1).GetViewProp()
else:
sub_prop = None
# our assembly is one level deep, so 1 is the one we
# want (actor at leaf node)
return (picked_cobject, sub_prop)
return None
def _zoom(self, amount):
cam = self._ren.GetActiveCamera()
if cam.GetParallelProjection():
cam.SetParallelScale(cam.GetParallelScale() / amount)
else:
self._ren.GetActiveCamera().Dolly(amount)
self._ren.ResetCameraClippingRange()
self._ren.UpdateLightsGeometryToFollowCamera()
self.redraw()
def _handler_wheel(self, event):
# wheel forward = zoom in
# wheel backward = zoom out
factor = [-2.0, 2.0][event.GetWheelRotation() > 0.0]
self._zoom(1.1 ** factor)
#event.GetWheelDelta()
def get_top_left_world(self):
"""Return top-left of canvas (0,0 in wx) in world coords.
In world coordinates, top_y > bottom_y.
"""
return self.wx_to_world(0,0)
def get_bottom_right_world(self):
"""Return bottom-right of canvas (sizex, sizey in wx) in world
coords.
In world coordinates, bottom_y < top_y.
"""
x,y = self._rwi.GetSize()
return self.wx_to_world(x-1, y-1)
def get_wh_world(self):
"""Return width and height of visible canvas in world
coordinates.
"""
tl = self.get_top_left_world()
br = self.get_bottom_right_world()
return br[0] - tl[0], tl[1] - br[1]
def get_motion_vector_world(self, world_depth):
"""Calculate motion vector in world space represented by last
mouse delta.
"""
c = self._ren.GetActiveCamera()
display_depth = self.world_to_display((0.0,0.0, world_depth))[2]
new_pick_pt = self.display_to_world(self.event.disp_pos +
(display_depth,))
fy = self.flip_y(self.event.last_pos[1])
old_pick_pt = self.display_to_world((self.event.last_pos[0], fy,
display_depth))
# old_pick_pt - new_pick_pt (reverse of camera!)
motion_vector = map(operator.sub, new_pick_pt,
old_pick_pt)
return motion_vector
def _handler_motion(self, event):
"""MouseMoveEvent observer for RWI.
o contains a binding to the RWI.
"""
#self._helper_handler_preamble(event)
self.event.wx_event = event
# event position is viewport relative (i.e. in pixels,
# top-left is 0,0)
ex, ey = event.GetX(), event.GetY()
# we need to flip Y to get VTK display coords
self.event.disp_pos = ex, self._rwi.GetSize()[1] - ey - 1
# before setting the new pos, record the delta
self.event.pos_delta = (ex - self.event.pos[0],
ey - self.event.pos[1])
self.event.last_pos = self.event.pos
self.event.pos = ex, ey
wex, wey, wez = self.display_to_world(self.event.disp_pos)
self.event.world_pos = wex, wey, wez
# add the "real" coords to the event structure
self.event.realX = wex
self.event.realY = wey
self.event.realZ = wez
# dragging gets preference...
if event.Dragging() and event.MiddleIsDown() and event.ShiftDown():
centre = self._ren.GetCenter()
# drag up = zoom in
# drag down = zoom out
dyf = - 10.0 * self.event.pos_delta[1] / centre[1]
self._zoom(1.1 ** dyf)
elif event.Dragging() and event.MiddleIsDown():
# move camera, according to self.event.pos_delta
c = self._ren.GetActiveCamera()
cfp = list(c.GetFocalPoint())
cp = list(c.GetPosition())
focal_depth = self.world_to_display(cfp)[2]
new_pick_pt = self.display_to_world(self.event.disp_pos +
(focal_depth,))
fy = self.flip_y(self.event.last_pos[1])
old_pick_pt = self.display_to_world((self.event.last_pos[0], fy,
focal_depth))
# old_pick_pt - new_pick_pt (reverse of camera!)
motion_vector = map(operator.sub, old_pick_pt,
new_pick_pt)
new_cfp = map(operator.add, cfp, motion_vector)
new_cp = map(operator.add, cp, motion_vector)
c.SetFocalPoint(new_cfp)
c.SetPosition(new_cp)
self.redraw()
else: # none of the preference events want this...
pg_ret = self._pick_glyph(ex, self.flip_y(ey))
if pg_ret:
picked_cobject, self.event.picked_sub_prop = pg_ret
if self.event.left_button and event.Dragging() and \
self.event.clicked_object == picked_cobject:
# left dragging on a glyph only works if THAT
# glyph was clicked (and the mouse button is still
# down)
self.event.name = 'dragging'
if self._draggedObject is None:
self._draggedObject = picked_cobject
# the actual event will be fired further below
if not picked_cobject is self.event.picked_cobject:
self.event.picked_cobject = picked_cobject
self.event.name = 'enter'
picked_cobject.notify('enter')
else:
self.event.name = 'motion'
picked_cobject.notify('motion')
else:
# nothing under the mouse...
if self.event.picked_cobject:
self.event.name = 'exit'
self.event.picked_cobject.notify('exit')
self.event.picked_cobject = None
if event.Dragging() and self._draggedObject:
# so we are Dragging() and there is a draggedObject...
# whether draggedObject was set above, or in a
# previous call of this event handler, we have to keep
# on firing these drag events until draggedObject is
# canceled.
self.event.name = 'dragging'
self._draggedObject.notify('dragging')
if event.Dragging and not self._draggedObject:
# user is dragging on canvas (no draggedObject!)
self.event.name = 'dragging'
self.notify(self.event.name)
if not event.Dragging():
# when user stops dragging the mouse, lose the object
if not self._draggedObject is None:
dprint("_handler_motion:: dragging -> off")
self._draggedObject.draggedPort = None
self._draggedObject = None
def add_object(self, cobj):
if cobj and cobj not in self._cobjects:
cobj.canvas = self
self._cobjects.append(cobj)
for prop in cobj.props:
self._ren.AddViewProp(prop)
# we only add prop to cobject if it's a glyph
if isinstance(cobj, DeVIDECanvasGlyph):
self.prop_to_glyph[prop] = cobj
cobj.__hasMouse = False
def redraw(self):
"""Redraw the whole scene.
"""
self._rwi.Render()
def update_all_geometry(self):
"""Update all geometry.
This is useful if many of the objects states have been changed
(e.g. new connections) and the connection visual states have
to be updated.
"""
for o in self._cobjects:
o.update_geometry()
def update_picked_cobject_at_drop(self, ex, ey):
"""Method to be used in the GraphEditor DropTarget
(geCanvasDropTarget) to make sure that the correct glyph is
selected.
Problem is that the application gets blocked during
wxDropSource.DoDragDrop(), so that if the user drags things
from for example the DICOMBrowser to a DICOMReader on the
canvas, the canvas doesn't know that the DICOMReader has been
picked.
If this method is called at drop time, all is well.
"""
pg_ret = self._pick_glyph(ex, self.flip_y(ey))
if pg_ret:
self.event.picked_cobject, self.event.picked_sub_prop = pg_ret
def remove_object(self, cobj):
if cobj and cobj in self._cobjects:
for prop in cobj.props:
self._ren.RemoveViewProp(prop)
# it's only in here if it's a glyph
if isinstance(cobj, DeVIDECanvasGlyph):
del self.prop_to_glyph[prop]
cobj.canvas = None
if self._draggedObject == cobj:
self._draggedObject = None
del self._cobjects[self._cobjects.index(cobj)]
def reset_view(self):
"""Make sure that all actors (glyphs, connections, etc.) are
visible.
"""
self._ren.ResetCamera()
self.redraw()
def getDraggedObject(self):
return self._draggedObject
def getObjectsOfClass(self, classt):
return [i for i in self._cobjects if isinstance(i, classt)]
def getObjectWithMouse(self):
"""Return object currently containing mouse, None if no object has
the mouse.
"""
for cobject in self._cobjects:
if cobject.__hasMouse:
return cobject
return None
def drag_object(self, cobj, delta):
"""Move object with delta in world space.
"""
cpos = cobj.get_position() # this gives us 2D in world space
npos = (cpos[0] + delta[0], cpos[1] + delta[1])
cobj.set_position(npos)
cobj.update_geometry()
def pan_canvas_world(self, delta_x, delta_y):
c = self._ren.GetActiveCamera()
cfp = list(c.GetFocalPoint())
cfp[0] += delta_x
cfp[1] += delta_y
c.SetFocalPoint(cfp)
cp = list(c.GetPosition())
cp[0] += delta_x
cp[1] += delta_y
c.SetPosition(cp)
self.redraw()
| Python |
# dummy
| Python |
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
from module_kits.misc_kit.mixins import SubjectMixin
import vtk
# z-coord of RBB box
# when this is 1.0, the box does not appear until a canvas reset has
# been done...
RBBOX_HEIGHT = 0.9
class UnfilledBlock:
"""Create block outline.
"""
def __init__(self):
self.polydata = lp = vtk.vtkPolyData()
pts = vtk.vtkPoints()
pts.InsertPoint(0, 0, 0, 0)
pts.InsertPoint(1, 1, 0, 0)
pts.InsertPoint(2, 1, 1, 0)
pts.InsertPoint(3, 0, 1, 0)
lp.SetPoints(pts)
cells = vtk.vtkCellArray()
cells.InsertNextCell(5)
cells.InsertCellPoint(0)
cells.InsertCellPoint(1)
cells.InsertCellPoint(2)
cells.InsertCellPoint(3)
cells.InsertCellPoint(0)
lp.SetLines(cells)
def update_geometry(self, width, height, z):
lp = self.polydata
pts = lp.GetPoints()
pts.SetPoint(0, 0,0,z)
pts.SetPoint(1, width, 0, z)
pts.SetPoint(2, width, height, z)
pts.SetPoint(3, 0, height, c)
# FIXME: is there no cleaner way of explaining to the polydata
# that it has been updated?
lp.SetPoints(None)
lp.SetPoints(pts)
class FilledBlock:
"""Create filled block.
"""
def __init__(self):
self.polydata = lp = vtk.vtkPolyData()
pts = vtk.vtkPoints()
pts.InsertPoint(0, 0, 0, 0)
pts.InsertPoint(1, 1, 0, 0)
pts.InsertPoint(2, 1, 1, 0)
pts.InsertPoint(3, 0, 1, 0)
lp.SetPoints(pts)
cells = vtk.vtkCellArray()
cells.InsertNextCell(4)
cells.InsertCellPoint(0)
cells.InsertCellPoint(1)
cells.InsertCellPoint(2)
cells.InsertCellPoint(3)
lp.SetPolys(cells)
def update_geometry(self, width, height, z):
lp = self.polydata
pts = lp.GetPoints()
pts.SetPoint(0, 0,0,z)
pts.SetPoint(1, width, 0, z)
pts.SetPoint(2, width, height, z)
pts.SetPoint(3, 0, height, z)
# FIXME: is there no cleaner way of explaining to the polydata
# that it has been updated?
lp.SetPoints(None)
lp.SetPoints(pts)
class BeveledEdgeBlock:
"""Create PolyData beveled edge block.
"""
def __init__(self):
"""Create all required geometry according to default size.
Call update_geometry to update this to new specifications.
"""
self.polydata = vtk.vtkPolyData()
# width of the edge
self.edge = edge = 5
# how much higher is inside rectangle than the outside (this
# is what creates the beveled effect)
self.eps = eps = 1.0/100.0
# dummy variable for now
width = 1
# create points defining the geometry
pts = vtk.vtkPoints()
# InsertPoint takes care of memory allocation
pts.InsertPoint(0, 0, 0, 0)
pts.InsertPoint(1, width, 0, 0)
pts.InsertPoint(2, width, 1, 0)
pts.InsertPoint(3, 0, 1, 0)
pts.InsertPoint(4, 0+edge, 0+edge, eps)
pts.InsertPoint(5, width-edge, 0+edge, eps)
pts.InsertPoint(6, width-edge, 1-edge, eps)
pts.InsertPoint(7, 0+edge, 1-edge, eps)
self.polydata.SetPoints(pts) # assign to the polydata
self.pts = pts
# create cells connecting points to each other
cells = vtk.vtkCellArray()
cells.InsertNextCell(4)
cells.InsertCellPoint(0)
cells.InsertCellPoint(1)
cells.InsertCellPoint(2)
cells.InsertCellPoint(3)
cells.InsertNextCell(4)
cells.InsertCellPoint(4)
cells.InsertCellPoint(5)
cells.InsertCellPoint(6)
cells.InsertCellPoint(7)
self.polydata.SetPolys(cells) # assign to the polydata
# create pointdata
arr = vtk.vtkUnsignedCharArray()
arr.SetNumberOfComponents(4)
arr.SetNumberOfTuples(8)
arr.SetTuple4(0, 92,92,92,255)
arr.SetTuple4(1, 130,130,130,255)
arr.SetTuple4(2, 92,92,92,255)
arr.SetTuple4(3, 0,0,0,255)
arr.SetTuple4(4, 92,92,92,255)
arr.SetTuple4(5, 0,0,0,255)
arr.SetTuple4(6, 92,92,92,255)
arr.SetTuple4(7, 192,192,192,255)
arr.SetName('my_array')
# and assign it as "scalars"
self.polydata.GetPointData().SetScalars(arr)
def update_geometry(self, width, height, z):
"""Update the geometry to the given specs. self.polydata will
be modified so that any downstream logic knows to update.
"""
pts = self.pts
edge = self.edge
eps = self.eps
# outer rectangle
pts.SetPoint(0, 0,0,z)
pts.SetPoint(1, width, 0, z)
pts.SetPoint(2, width, height, z)
pts.SetPoint(3, 0, height, z)
# inner rectangle
pts.SetPoint(4, 0+edge, 0+edge, z+eps)
pts.SetPoint(5, width-edge, 0+edge, z+eps)
pts.SetPoint(6, width-edge, height-edge, z+eps)
pts.SetPoint(7, 0+edge, height-edge, z+eps)
self.polydata.SetPoints(None)
self.polydata.SetPoints(pts)
#############################################################################
class DeVIDECanvasObject(SubjectMixin):
def __init__(self, canvas, position):
# call parent ctor
SubjectMixin.__init__(self)
self.canvas = canvas
self._position = position
self._observers = {'enter' : [],
'exit' : [],
'drag' : [],
'buttonDown' : [],
'buttonUp' : [],
'buttonDClick' : [],
'motion' : []}
# all canvas objects have a vtk prop that can be added to a
# vtk renderer.
self.props = []
def close(self):
"""Take care of any cleanup here.
"""
SubjectMixin.close(self)
def get_bounds(self):
raise NotImplementedError
def get_position(self):
return self._position
def set_position(self, destination):
self._position = destination
def hit_test(self, x, y):
return False
def is_inside_rect(self, x, y, width, height):
return False
class DeVIDECanvasRBBox(DeVIDECanvasObject):
"""Rubber-band box that can be used to give feedback rubber-band
selection interaction. Thingy.
"""
def __init__(self, canvas, corner_bl, (width, height)):
"""ctor. corner_bl is the bottom-left corner and corner_tr
the top-right corner of the rbbox in world coords.
"""
self.corner_bl = corner_bl
self.width, self.height = width, height
DeVIDECanvasObject.__init__(self, canvas, corner_bl)
self._create_geometry()
self.update_geometry()
def _create_geometry(self):
self._plane_source = vtk.vtkPlaneSource()
self._plane_source.SetNormal((0.0,0.0,1.0))
self._plane_source.SetXResolution(1)
self._plane_source.SetYResolution(1)
m = vtk.vtkPolyDataMapper()
m.SetInput(self._plane_source.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
a.GetProperty().SetOpacity(0.3)
a.GetProperty().SetColor(0.0, 0.0, 0.7)
self.props = [a]
def update_geometry(self):
# bring everything up to the correct height (it should be in
# front of all other objects)
corner_bl = tuple(self.corner_bl[0:2]) + (RBBOX_HEIGHT,)
self._plane_source.SetOrigin(corner_bl)
if self.width == 0:
self.width = 0.1
if self.height == 0:
self.height = 0.1
pos1 = [i+j for i,j in zip(corner_bl, (0.0, self.height, 0.0))]
pos2 = [i+j for i,j in zip(corner_bl, (self.width, 0.0, 0.0))]
self._plane_source.SetPoint1(pos1)
self._plane_source.SetPoint2(pos2)
#############################################################################
class DeVIDECanvasSimpleLine(DeVIDECanvasObject):
def __init__(self, canvas, src, dst):
"""src and dst are 3D world space coordinates.
"""
self.src = src
self.dst = dst
# call parent CTOR
DeVIDECanvasObject.__init__(self, canvas, src)
self._create_geometry()
self.update_geometry()
def _create_geometry(self):
self._line_source = vtk.vtkLineSource()
m = vtk.vtkPolyDataMapper()
m.SetInput(self._line_source.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
a.GetProperty().SetColor(0.0, 0.0, 0.0)
self.props = [a]
def update_geometry(self):
self._line_source.SetPoint1(self.src)
self._line_source.SetPoint2(self.dst)
self._line_source.Update()
#############################################################################
class DeVIDECanvasLine(DeVIDECanvasObject):
# this is used by the routing algorithm to route lines around glyphs
# with a certain border; this is also used by updateEndPoints to bring
# the connection out of the connection port initially
routingOvershoot = 5
_normal_width = 2
_highlight_width = 2
def __init__(self, canvas, fromGlyph, fromOutputIdx, toGlyph, toInputIdx):
"""A line object for the canvas.
linePoints is just a list of python tuples, each representing a
coordinate of a node in the line. The position is assumed to be
the first point.
"""
self.fromGlyph = fromGlyph
self.fromOutputIdx = fromOutputIdx
self.toGlyph = toGlyph
self.toInputIdx = toInputIdx
colours = [(0, 0, 255), # blue
(128, 64, 0), # brown
(0, 128, 0), # green
(255, 128, 64), # orange
(128, 0, 255), # purple
(128, 128, 64)] # mustard
col = colours[self.toInputIdx % (len(colours))]
self.line_colour = [i / 255.0 for i in col]
# any line begins with 4 (four) points
self.updateEndPoints()
# now we call the parent ctor
DeVIDECanvasObject.__init__(self, canvas, self._line_points[0])
self._create_geometry()
self.update_geometry()
def close(self):
# delete things that shouldn't be left hanging around
del self.fromGlyph
del self.toGlyph
def _create_geometry(self):
self._spline_source = vtk.vtkParametricFunctionSource()
s = vtk.vtkParametricSpline()
if False:
# these are quite ugly...
# later: factor this out into method, so that we can
# experiment live with different spline params. For now
# the vtkCardinal spline that is used is muuuch prettier.
ksplines = []
for i in range(3):
ksplines.append(vtk.vtkKochanekSpline())
ksplines[-1].SetDefaultTension(0)
ksplines[-1].SetDefaultContinuity(0)
ksplines[-1].SetDefaultBias(0)
s.SetXSpline(ksplines[0])
s.SetYSpline(ksplines[1])
s.SetZSpline(ksplines[2])
pts = vtk.vtkPoints()
s.SetPoints(pts)
self._spline_source.SetParametricFunction(s)
m = vtk.vtkPolyDataMapper()
m.SetInput(self._spline_source.GetOutput())
a = vtk.vtkActor()
a.SetMapper(m)
a.GetProperty().SetColor(self.line_colour)
a.GetProperty().SetLineWidth(self._normal_width)
self.props = [a]
def update_geometry(self):
pts = vtk.vtkPoints()
for p in self._line_points:
pts.InsertNextPoint(p + (0.0,))
self._spline_source.GetParametricFunction().SetPoints(pts)
self._spline_source.Update()
def get_bounds(self):
# totally hokey: for now we just return the bounding box surrounding
# the first two points - ideally we should iterate through the lines,
# find extents and pick a position and bounds accordingly
return (self._line_points[-1][0] - self._line_points[0][0],
self._line_points[-1][1] - self._line_points[0][1])
def getUpperLeftWidthHeight(self):
"""This returns the upperLeft coordinate and the width and height of
the bounding box enclosing the third-last and second-last points.
This is used for fast intersection checking with rectangles.
"""
p3 = self._line_points[-3]
p2 = self._line_points[-2]
upperLeftX = [p3[0], p2[0]][bool(p2[0] < p3[0])]
upperLeftY = [p3[1], p2[1]][bool(p2[1] < p3[1])]
width = abs(p2[0] - p3[0])
height = abs(p2[1] - p3[1])
return ((upperLeftX, upperLeftY), (width, height))
def getThirdLastSecondLast(self):
return (self._line_points[-3], self._line_points[-2])
def hitTest(self, x, y):
# maybe one day we will make the hitTest work, not tonight
# I don't need it
return False
def insertRoutingPoint(self, x, y):
"""Insert new point x,y before second-last point, i.e. the new point
becomes the third-last point.
"""
if (x,y) not in self._line_points:
self._line_points.insert(len(self._line_points) - 2, (x, y))
return True
else:
return False
def set_highlight(self):
prop = self.props[0].GetProperty()
# for more stipple patterns, see:
# http://fly.cc.fer.hr/~unreal/theredbook/chapter02.html
prop.SetLineStipplePattern(0xAAAA)
prop.SetLineStippleRepeatFactor(2)
prop.SetLineWidth(self._highlight_width)
def set_normal(self):
prop = self.props[0].GetProperty()
prop.SetLineStipplePattern(0xFFFF)
prop.SetLineStippleRepeatFactor(1)
prop.SetLineWidth(self._normal_width)
def updateEndPoints(self):
# first get us just out of the port, then create margin between
# us and glyph
dcg = DeVIDECanvasGlyph
boostFromPort = dcg._pHeight / 2 + self.routingOvershoot
self._line_points = [(), (), (), ()]
self._line_points[0] = self.fromGlyph.get_centre_of_port(
1, self.fromOutputIdx)[0:2]
self._line_points[1] = (self._line_points[0][0],
self._line_points[0][1] - boostFromPort)
self._line_points[-1] = self.toGlyph.get_centre_of_port(
0, self.toInputIdx)[0:2]
self._line_points[-2] = (self._line_points[-1][0],
self._line_points[-1][1] + boostFromPort)
#############################################################################
class DeVIDECanvasGlyph(DeVIDECanvasObject):
"""Object representing glyph on canvas.
@ivar inputLines: list of self._numInputs DeVIDECanvasLine
instances that connect to this glyph's inputs.
@ivar outputLines: list of self._numOutputs lists of
DeVIDECanvasLine instances that originate from this glyphs
outputs.
@ivar position: this is the position of the bottom left corner of
the glyph in world space. Remember that (0,0) is also bottom left
of the canvas.
"""
# at start and end of glyph
# this has to take into account the bevel edge too
_horizBorder = 12
# between ports
_horizSpacing = 10
# at top and bottom of glyph
_vertBorder = 20
_pWidth = 20
_pHeight = 20
_glyph_bevel_edge = 7
_glyph_z = 0.1
_glyph_outline_z = 0.15
_glyph_selection_z = 0.6
_glyph_blocked_z = 0.7
_port_z = 0.8
_text_z = 0.4
_glyph_normal_col = (0.75, 0.75, 0.75)
_glyph_selected_col = (0.2, 0.367, 0.656)
_glyph_blocked_col = (0.06, 0.06, 0.06)
_text_normal_col = (0.0, 0.0, 0.0)
# text_selected_col used to be white, but the vtkTextActor3D()
# has broken aliasing that is more visible on a darker
# background.
#text_selected_col = (1.0, 1.0, 1.0)
_text_selected_col = (0.0, 0.0, 0.0)
# dark green to light green
_port_conn_col = (0.0, 218 / 255.0, 25 / 255.0)
_port_disconn_col = (0, 93 / 255.0, 11 / 255.0)
def __init__(self, canvas, position, numInputs, numOutputs,
labelList, module_instance):
# parent constructor
DeVIDECanvasObject.__init__(self, canvas, position)
# we'll fill this out later
self._size = (0,0)
self._numInputs = numInputs
self.inputLines = [None] * self._numInputs
self._numOutputs = numOutputs
# be careful with list concatenation!
self.outputLines = [[] for i in range(self._numOutputs)]
self._labelList = labelList
self.module_instance = module_instance
# usually 2-element list. elem0 is 0 for input port and 1 for
# output port. elem1 is the index.
self.draggedPort = None
self.enteredPort = None
self.selected = False
self.blocked = False
# we'll collect the glyph and its ports in this assembly
self.prop1 = vtk.vtkAssembly()
# the main body glyph
self._beb = BeveledEdgeBlock()
self._selection_block = FilledBlock()
self._blocked_block = FilledBlock()
self._rbsa = vtk.vtkActor()
# and of course the label
self._tsa = vtk.vtkTextActor3D()
self._iportssa = \
[(vtk.vtkCubeSource(),vtk.vtkActor()) for _ in
range(self._numInputs)]
self._oportssa = \
[(vtk.vtkCubeSource(),vtk.vtkActor()) for _ in
range(self._numOutputs)]
self._create_geometry()
self.update_geometry()
def close(self):
del self.module_instance
del self.inputLines
del self.outputLines
def _create_geometry(self):
# TEXT LABEL ##############################################
tprop = self._tsa.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(24)
tprop.SetBold(0)
tprop.SetItalic(0)
tprop.SetShadow(0)
tprop.SetColor((0,0,0))
# GLYPH BLOCK ##############################################
# remember this depth, others things have to be 'above' this
# to be visible (such as the text!)
m = vtk.vtkPolyDataMapper()
m.SetInput(self._beb.polydata)
self._rbsa.SetMapper(m)
# we need Phong shading for the gradients
p = self._rbsa.GetProperty()
p.SetInterpolationToPhong()
# Ka, background lighting coefficient
p.SetAmbient(0.1)
# light reflectance
p.SetDiffuse(0.6)
# the higher Ks, the more intense the highlights
p.SetSpecular(0.4)
# the higher the power, the more localised the
# highlights
p.SetSpecularPower(100)
self.prop1.AddPart(self._rbsa)
# GLYPH SELECTION OVERLAY #######################################
m = vtk.vtkPolyDataMapper()
m.SetInput(self._selection_block.polydata)
a = vtk.vtkActor()
a.SetMapper(m)
a.GetProperty().SetOpacity(0.3)
a.GetProperty().SetColor(self._glyph_selected_col)
self.prop1.AddPart(a)
self._selection_actor = a
# GLYPH BLOCKED OVERLAY #######################################
m = vtk.vtkPolyDataMapper()
m.SetInput(self._blocked_block.polydata)
a = vtk.vtkActor()
a.SetMapper(m)
a.GetProperty().SetOpacity(0.3)
a.GetProperty().SetColor(self._glyph_blocked_col)
self.prop1.AddPart(a)
self._blocked_actor = a
# you should really turn this into a class
# let's make a line from scratch
#m = vtk.vtkPolyDataMapper()
#m.SetInput(lp)
#a = vtk.vtkActor()
#a.SetMapper(m)
#self.prop1.AddPart(a)
#prop = a.GetProperty()
#prop.SetColor(0.1,0.1,0.1)
#prop.SetLineWidth(1)
#self._glyph_outline_polydata = lp
# INPUTS ####################################################
for i in range(self._numInputs):
s,a = self._iportssa[i]
s.SetYLength(self._pHeight)
s.SetXLength(self._pWidth)
m = vtk.vtkPolyDataMapper()
m.SetInput(s.GetOutput())
a.SetMapper(m)
self.prop1.AddPart(a)
for i in range(self._numOutputs):
s,a = self._oportssa[i]
s.SetYLength(self._pHeight)
s.SetXLength(self._pWidth)
m = vtk.vtkPolyDataMapper()
m.SetInput(s.GetOutput())
a.SetMapper(m)
self.prop1.AddPart(a)
self.prop1.SetPosition(self._position + (0.0,))
self.props = [self.prop1, self._tsa]
def update_geometry(self):
# update text label ###################################
# update the text caption
# experiments with inserting spaces in front of text were not
# successful (sizing still screws up)
#nll = [' %s' % (l,) for l in self._labelList]
nll = self._labelList
self._tsa.SetInput('\n'.join(nll))
# self._position is the bottom left corner of the button face
ap = self._position[0] + self._horizBorder, \
self._position[1] + self._vertBorder, self._text_z
self._tsa.SetPosition(ap)
tprop = self._tsa.GetTextProperty()
tcol = [self._text_normal_col, self._text_selected_col]\
[self.selected]
tprop.SetColor(tcol)
# also get the text dimensions
bb = [0,0,0,0]
self._tsa.GetBoundingBox(bb)
text_width, text_height = bb[1] - bb[0], bb[3] - bb[2]
# update glyph position and size ######################
self.props[0].SetPosition(self._position + (0.0,))
# calculate our size
# the width is the maximum(textWidth + twice the horizontal border,
# all ports, horizontal borders and inter-port borders added up)
maxPorts = max(self._numInputs, self._numOutputs)
portsWidth = 2 * self._horizBorder + \
maxPorts * self._pWidth + \
(maxPorts - 1 ) * self._horizSpacing
label_and_borders = text_width + 2 * self._horizBorder
self._size = max(portsWidth, label_and_borders), \
text_height + \
2 * self._vertBorder
# usually the position is the CENTRE of the button, so we
# adjust so that the bottom left corner ends up at 0,0
# (this is all relative to the Assembly)
self._beb.update_geometry(
self._size[0], self._size[1], self._glyph_z)
self._selection_block.update_geometry(
self._size[0], self._size[1], self._glyph_selection_z)
self._blocked_block.update_geometry(
self._size[0], self._size[1], self._glyph_blocked_z)
# calc and update glyph colour ########################
self._selection_actor.SetVisibility(self.selected)
self._blocked_actor.SetVisibility(self.blocked)
# position and colour all the inputs and outputs #####
horizOffset = self._horizBorder
horizStep = self._pWidth + self._horizSpacing
for i in range(self._numInputs):
col = [self._port_disconn_col,
self._port_conn_col][bool(self.inputLines[i])]
s,a = self._iportssa[i]
a.GetProperty().SetColor(col)
a.SetPosition(
(horizOffset + i * horizStep + 0.5 * self._pWidth,
self._size[1], self._port_z))
for i in range(self._numOutputs):
col = [self._port_disconn_col,
self._port_conn_col][bool(self.outputLines[i])]
s,a = self._oportssa[i]
a.GetProperty().SetColor(col)
a.SetPosition(
(horizOffset + i * horizStep + 0.5 * self._pWidth,
0, self._port_z))
def get_port_containing_mouse(self):
"""Given the current has_mouse and has_mouse_sub_prop
information in canvas.event, determine the port side (input,
output) and index of the port represented by the sub_prop.
gah.
@returns: tuple (inout, idx), where inout is 0 for input (top)
and 1 for output (bottom). Returns (-1,-1) if nothing was
found.
"""
if not self.canvas.event.picked_cobject is self:
return (-1, -1)
sp = self.canvas.event.picked_sub_prop
if not sp:
return (-1, -1)
for i in range(len(self._iportssa)):
s, a = self._iportssa[i]
if sp is a:
return (0,i)
for i in range(len(self._oportssa)):
s, a = self._oportssa[i]
if sp is a:
return (1,i)
return (-1, -1)
def get_bounds(self):
return self._size
def get_centre_of_port(self, inOrOut, idx):
"""Given the side of the module and the index of the port,
return the centre of the port in 3-D world coordinates.
@param inOrOut: 0 is input side (top), 1 is output side
(bottom).
@param idx: zero-based index of the port.
"""
horizOffset = self._position[0] + self._horizBorder
horizStep = self._pWidth + self._horizSpacing
cy = self._position[1] #+ self._pHeight / 2
# remember, in world-space, y=0 is at the bottom!
if inOrOut == 0:
cy += self._size[1]
cx = horizOffset + idx * horizStep + self._pWidth / 2
return (cx, cy, 0.0)
def get_bottom_left_top_right(self):
return ((self._position[0],
self._position[1]),
(self._position[0] + self._size[0] - 1,
self._position[1] + self._size[1] - 1))
def getLabel(self):
return ' '.join(self._labelList)
def is_inside_rect(self, bottom_left, w, h):
"""Given world coordinates for the bottom left corner and a
width and a height, determine if the complete glyph is inside.
This method will ensure that bottom-left is bottom-left by
swapping coordinates around
"""
bl = list(bottom_left)
tr = list((bl[0] + w, bl[1] + h))
if bl[0] > tr[0]:
# swap!
bl[0],tr[0] = tr[0],bl[0]
if bl[1] > tr[1]:
bl[1],tr[1] = tr[1],bl[1]
inside = True
if self._position[0] < bl[0] or self._position[1] < bl[1]:
inside = False
elif (self._position[0] + self._size[0]) > tr[0] or \
(self._position[1] + self._size[1]) > tr[1]:
inside = False
return inside
def is_origin_inside_rect(self, bottom_left, w, h):
"""Only check origin (bottom-left) of glyph for containtment
in specified rectangle.
"""
bl = list(bottom_left)
tr = list((bl[0] + w, bl[1] + h))
if bl[0] > tr[0]:
# swap!
bl[0],tr[0] = tr[0],bl[0]
if bl[1] > tr[1]:
bl[1],tr[1] = tr[1],bl[1]
inside = True
if self._position[0] < bl[0] or self._position[1] < bl[1]:
inside = False
return inside
def setLabelList(self,labelList):
self._labelList = labelList
| Python |
class canvasSubject:
def __init__(self):
self._observers = {}
def addObserver(self, eventName, observer):
"""Add an observer for a particular event.
eventName can be one of 'enter', 'exit', 'drag', 'buttonDown'
or 'buttonUp'. observer is a callable object that will be
invoked at event time with parameters canvas object,
eventName, and event.
"""
self._observers[eventName].append(observer)
def notifyObservers(self, eventName, event):
for observer in self._observers[eventName]:
observer(self, eventName, event)
| Python |
from canvasObject import *
from canvas import *
| Python |
from wxPython import wx
from canvasSubject import canvasSubject
#############################################################################
class canvasObject(canvasSubject):
def __init__(self, position):
# call parent ctor
canvasSubject.__init__(self)
self._position = position
self._canvas = None
self._observers = {'enter' : [],
'exit' : [],
'drag' : [],
'buttonDown' : [],
'buttonUp' : [],
'buttonDClick' : [],
'motion' : []}
def close(self):
"""Take care of any cleanup here.
"""
pass
def draw(self, dc):
pass
def getBounds(self):
raise NotImplementedError
def getPosition(self):
return self._position
def setPosition(self, destination):
self._position = destination
def getCanvas(self):
return self._canvas
def hitTest(self, x, y):
return False
def isInsideRect(self, x, y, width, height):
return False
def setCanvas(self, canvas):
self._canvas = canvas
#############################################################################
class coRectangle(canvasObject):
def __init__(self, position, size):
canvasObject.__init__(self, position)
self._size = size
def draw(self, dc):
# drawing rectangle!
dc.SetBrush(wx.wxBrush(wx.wxColour(192,192,192), wx.wxSOLID))
dc.DrawRectangle(self._position[0], self._position[1],
self._size[0], self._size[1])
def getBounds(self):
return (self._size)
def getTopLeftBottomRight(self):
return ((self._position[0], self._position[1]),
(self._position[0] + self._size[0] - 1,
self._position[1] + self._size[1] - 1))
def hitTest(self, x, y):
# think carefully about the size of the rectangle...
# e.g. from 0 to 2 is size 2 (spaces between vertices)
return x >= self._position[0] and \
x <= self._position[0] + self._size[0] and \
y >= self._position[1] and \
y <= self._position[1] + self._size[1]
def isInsideRect(self, x, y, width, height):
x0 = (self._position[0] - x)
y0 = (self._position[1] - y)
return x0 >= 0 and x0 <= width and \
y0 >= 0 and y0 <= height and \
x0 + self._size[0] <= width and \
y0 + self._size[1] <= height
#############################################################################
class coLine(canvasObject):
# this is used by the routing algorithm to route lines around glyphs
# with a certain border; this is also used by updateEndPoints to bring
# the connection out of the connection port initially
routingOvershoot = 10
def __init__(self, fromGlyph, fromOutputIdx, toGlyph, toInputIdx):
"""A line object for the canvas.
linePoints is just a list of python tuples, each representing a
coordinate of a node in the line. The position is assumed to be
the first point.
"""
self.fromGlyph = fromGlyph
self.fromOutputIdx = fromOutputIdx
self.toGlyph = toGlyph
self.toInputIdx = toInputIdx
# 'BLACK' removed
colourNames = ['BLUE', 'BROWN', 'MEDIUM FOREST GREEN',
'DARKORANGE1']
self.lineColourName = colourNames[self.toInputIdx % (len(colourNames))]
# any line begins with 4 (four) points
self.updateEndPoints()
canvasObject.__init__(self, self._linePoints[0])
def close(self):
# delete things that shouldn't be left hanging around
del self.fromGlyph
del self.toGlyph
def draw(self, dc):
# lines are 2 pixels thick
dc.SetPen(wx.wxPen(self.lineColourName, 2, wx.wxSOLID))
# simple mode: just the lines thanks.
#dc.DrawLines(self._linePoints)
# spline mode for N points:
# 1. Only 4 points: drawlines. DONE
# 2. Draw line from 0 to 1
# 3. Draw line from N-2 to N-1 (second last to last)
# 4. Draw spline from 1 to N-2 (second to second last)
# if len(self._linePoints) > 4:
# dc.DrawLines(self._linePoints[0:2]) # 0 - 1
# dc.DrawLines(self._linePoints[-2:]) # second last to last
# dc.DrawSpline(self._linePoints[1:-1])
# else:
# dc.DrawLines(self._linePoints)
dc.SetPen(wx.wxPen('BLACK', 4, wx.wxSOLID))
dc.DrawSpline(self._linePoints)
dc.SetPen(wx.wxPen(self.lineColourName, 2, wx.wxSOLID))
dc.DrawSpline(self._linePoints)
def getBounds(self):
# totally hokey: for now we just return the bounding box surrounding
# the first two points - ideally we should iterate through the lines,
# find extents and pick a position and bounds accordingly
return (self._linePoints[-1][0] - self._linePoints[0][0],
self._linePoints[-1][1] - self._linePoints[0][1])
def getUpperLeftWidthHeight(self):
"""This returns the upperLeft coordinate and the width and height of
the bounding box enclosing the third-last and second-last points.
This is used for fast intersection checking with rectangles.
"""
p3 = self._linePoints[-3]
p2 = self._linePoints[-2]
upperLeftX = [p3[0], p2[0]][bool(p2[0] < p3[0])]
upperLeftY = [p3[1], p2[1]][bool(p2[1] < p3[1])]
width = abs(p2[0] - p3[0])
height = abs(p2[1] - p3[1])
return ((upperLeftX, upperLeftY), (width, height))
def getThirdLastSecondLast(self):
return (self._linePoints[-3], self._linePoints[-2])
def hitTest(self, x, y):
# maybe one day we will make the hitTest work, not tonight
# I don't need it
return False
def insertRoutingPoint(self, x, y):
"""Insert new point x,y before second-last point, i.e. the new point
becomes the third-last point.
"""
if (x,y) not in self._linePoints:
self._linePoints.insert(len(self._linePoints) - 2, (x, y))
return True
else:
return False
def updateEndPoints(self):
# first get us just out of the port, then create margin between
# us and glyph
boostFromPort = coGlyph._pHeight / 2 + coLine.routingOvershoot
self._linePoints = [(), (), (), ()]
self._linePoints[0] = self.fromGlyph.getCenterOfPort(
1, self.fromOutputIdx)
self._linePoints[1] = (self._linePoints[0][0],
self._linePoints[0][1] + boostFromPort)
self._linePoints[-1] = self.toGlyph.getCenterOfPort(
0, self.toInputIdx)
self._linePoints[-2] = (self._linePoints[-1][0],
self._linePoints[-1][1] - boostFromPort)
#############################################################################
class coGlyph(coRectangle):
# at start and end of glyph
_horizBorder = 5
# between ports
_horizSpacing = 5
# at top and bottom of glyph
_vertBorder = 15
_pWidth = 10
_pHeight = 10
def __init__(self, position, numInputs, numOutputs,
labelList, module_instance):
# parent constructor
coRectangle.__init__(self, position, (0,0))
# we'll fill this out later
self._size = (0,0)
self._numInputs = numInputs
self.inputLines = [None] * self._numInputs
self._numOutputs = numOutputs
# be careful with list concatenation!
self.outputLines = [[] for i in range(self._numOutputs)]
self._labelList = labelList
self.module_instance = module_instance
self.draggedPort = None
self.enteredPort = None
self.selected = False
self.blocked = False
def close(self):
del self.module_instance
del self.inputLines
del self.outputLines
def draw(self, dc):
normal_colour = (192, 192, 192)
selected_colour = (255, 0, 246)
blocked_colour = (16, 16, 16)
colour = normal_colour
if self.selected:
colour = [selected_colour[i] * 0.5 + colour[i] * 0.5
for i in range(3)]
if self.blocked:
colour = [blocked_colour[i] * 0.5 + colour[i] * 0.5
for i in range(3)]
colour = tuple([int(i) for i in colour])
blockFillColour = wx.wxColour(*colour)
# # we're going to alpha blend a purplish sheen if this glyph is active
# if self.selected:
# # sheen: 255, 0, 246
# # alpha-blend with 192, 192, 192 with alpha 0.5 yields
# # 224, 96, 219
# blockFillColour = wx.wxColour(224, 96, 219)
# else:
# blockFillColour = wx.wxColour(192, 192, 192)
# default pen and font
dc.SetBrush(wx.wxBrush(blockFillColour, wx.wxSOLID))
dc.SetPen(wx.wxPen('BLACK', 1, wx.wxSOLID))
dc.SetFont(wx.wxNORMAL_FONT)
# calculate our size
# the width is the maximum(textWidth + twice the horizontal border,
# all ports, horizontal borders and inter-port borders added up)
maxPorts = max(self._numInputs, self._numOutputs)
portsWidth = 2 * coGlyph._horizBorder + \
maxPorts * coGlyph._pWidth + \
(maxPorts - 1 ) * coGlyph._horizSpacing
# determine maximum textwidth and height
tex = 0
tey = 0
for l in self._labelList:
temptx, tempty = dc.GetTextExtent(l)
if temptx > tex:
tex = temptx
if tempty > tey:
tey = tempty
# this will be calculated with the max width, so fine
textWidth = tex + 2 * coGlyph._horizBorder
self._size = (max(textWidth, portsWidth),
tey * len(self._labelList) + 2 * coGlyph._vertBorder)
# draw the main rectangle
dc.DrawRectangle(self._position[0], self._position[1],
self._size[0], self._size[1])
#dc.DrawRoundedRectangle(self._position[0], self._position[1],
# self._size[0], self._size[1], radius=5)
initY = self._position[1] + coGlyph._vertBorder
for l in self._labelList:
dc.DrawText(l,
self._position[0] + coGlyph._horizSpacing,
initY)
initY += tey
# then the inputs
horizOffset = self._position[0] + coGlyph._horizBorder
horizStep = coGlyph._pWidth + coGlyph._horizSpacing
connBrush = wx.wxBrush("GREEN")
disconnBrush = wx.wxBrush("RED")
for i in range(self._numInputs):
brush = [disconnBrush, connBrush][bool(self.inputLines[i])]
self.drawPort(dc, brush,
(horizOffset + i * horizStep,
self._position[1]))
lx = self._position[1] + self._size[1] - coGlyph._pHeight
for i in range(self._numOutputs):
brush = [disconnBrush, connBrush][bool(self.outputLines[i])]
self.drawPort(dc, brush,
(horizOffset + i * horizStep,
lx))
def drawPort(self, dc, brush, pos):
dc.SetBrush(brush)
dc.DrawRectangle(pos[0], pos[1], coGlyph._pWidth, coGlyph._pHeight)
#dc.DrawEllipse(pos[0], pos[1], coGlyph._pWidth, coGlyph._pHeight)
def findPortContainingMouse(self, x, y):
"""Find port that contains the mouse pointer. Returns tuple
containing inOut and port index.
"""
horizOffset = self._position[0] + coGlyph._horizBorder
horizStep = coGlyph._pWidth + coGlyph._horizSpacing
bx = horizOffset
by = self._position[1]
for i in range(self._numInputs):
if x >= bx and x <= bx + self._pWidth and \
y >= by and y < by + self._pHeight:
return (0, i)
bx += horizStep
bx = horizOffset
by = self._position[1] + self._size[1] - coGlyph._pHeight
for i in range(self._numOutputs):
if x >= bx and x <= bx + self._pWidth and \
y >= by and y < by + self._pHeight:
return (1, i)
bx += horizStep
return None
def getCenterOfPort(self, inOrOut, idx):
horizOffset = self._position[0] + coGlyph._horizBorder
horizStep = coGlyph._pWidth + coGlyph._horizSpacing
cy = self._position[1] + coGlyph._pHeight / 2
if inOrOut:
cy += self._size[1] - coGlyph._pHeight
cx = horizOffset + idx * horizStep + coGlyph._pWidth / 2
return (cx, cy)
def getLabel(self):
return ' '.join(self._labelList)
def setLabelList(self,labelList):
self._labelList = labelList
| Python |
import wx
from canvasSubject import canvasSubject
from canvasObject import *
class canvas(wx.wxScrolledWindow, canvasSubject):
def __init__(self, parent, id = -1, size = wx.wxDefaultSize):
# parent 1 ctor
wx.wxScrolledWindow.__init__(self, parent, id, wx.wxPoint(0, 0), size,
wx.wxSUNKEN_BORDER)
# parent 2 ctor
canvasSubject.__init__(self)
self._cobjects = []
self._previousRealCoords = None
self._mouseDelta = (0,0)
self._potentiallyDraggedObject = None
self._draggedObject = None
self._observers = {'drag' : [],
'buttonDown' : [],
'buttonUp' : []}
self.SetBackgroundColour("WHITE")
wx.EVT_MOUSE_EVENTS(self, self.OnMouseEvent)
wx.EVT_PAINT(self, self.OnPaint)
self.virtualWidth = 2048
self.virtualHeight = 2048
self._buffer = None
self._buffer = wx.wxEmptyBitmap(self.virtualWidth, self.virtualHeight)
# we're only going to draw into the buffer, so no real client DC
dc = wx.wxBufferedDC(None, self._buffer)
dc.SetBackground(wx.wxBrush(self.GetBackgroundColour()))
dc.Clear()
self.doDrawing(dc)
self.SetVirtualSize((self.virtualWidth, self.virtualHeight))
self.SetScrollRate(20,20)
def eventToRealCoords(self, ex, ey):
"""Convert window event coordinates to canvas relative coordinates.
"""
# get canvas parameters
vsx, vsy = self.GetViewStart()
dx, dy = self.GetScrollPixelsPerUnit()
# calculate REAL coords
rx = ex + vsx * dx
ry = ey + vsy * dy
return (rx, ry)
def getDC(self):
"""Returns DC which can be used by the outside to draw to our buffer.
As soon as dc dies (and it will at the end of the calling function)
the contents of self._buffer will be blitted to the screen.
"""
cdc = wx.wxClientDC(self)
# set device origin according to scroll position
self.PrepareDC(cdc)
dc = wx.wxBufferedDC(cdc, self._buffer)
return dc
def get_glyph_on_coords(self, rx, ry):
"""If rx,ry falls on a glyph, return that glyph, else return
None.
"""
for cobject in self._cobjects:
if cobject.hitTest(rx, ry) and isinstance(cobject,
coGlyph):
return cobject
return None
def OnMouseEvent(self, event):
# this seems to work fine under windows. If we don't do this, and the
# mouse leaves the canvas, the rubber band remains and no selection
# is made.
if event.ButtonDown():
if not self.HasCapture():
self.CaptureMouse()
elif event.ButtonUp():
if self.HasCapture():
self.ReleaseMouse()
# these coordinates are relative to the visible part of the canvas
ex, ey = event.GetX(), event.GetY()
rx, ry = self.eventToRealCoords(ex, ey)
# add the "real" coords to the event structure
event.realX = rx
event.realY = ry
if self._previousRealCoords:
self._mouseDelta = (rx - self._previousRealCoords[0],
ry - self._previousRealCoords[1])
else:
self._mouseDelta = (0,0)
# FIXME: store binding to object which "has" the mouse
# on subsequent tries, we DON'T have to check all objects, only the
# one which had the mouse on the previous try... only if it "loses"
# the mouse, do we enter the mean loop again.
mouseOnObject = False
# the following three clauses, i.e. the hitTest, mouseOnObject and
# draggedObject should be kept in this order, unless you know
# EXACTLY what you're doing. If you're going to change anything, test
# that connects, disconnects (of all kinds) and rubber-banding still
# work.
# we need to do this expensive hit test every time, because the user
# wants to know when he mouses over the input port of a destination
# module
for cobject in self._cobjects:
if cobject.hitTest(rx, ry):
mouseOnObject = True
cobject.notifyObservers('motion', event)
if not cobject.__hasMouse:
cobject.__hasMouse = True
cobject.notifyObservers('enter', event)
if event.Dragging():
if not self._draggedObject:
if self._potentiallyDraggedObject == cobject:
# the user is dragging inside an object inside
# of which he has previously clicked... this
# definitely means he's dragging the object
mouseOnObject = True
self._draggedObject = cobject
else:
# this means the user has dragged the mouse
# over an object... which means mouseOnObject
# is technically true, but because we want the
# canvas to get this kind of dragEvent, we
# set it to false
mouseOnObject = False
elif event.ButtonUp():
cobject.notifyObservers('buttonUp', event)
elif event.ButtonDown():
if event.LeftDown():
# this means EVERY buttonDown in an object classifies
# as a potential drag. if the user now drags, we
# have a winner
self._potentiallyDraggedObject = cobject
cobject.notifyObservers('buttonDown', event)
elif event.ButtonDClick():
cobject.notifyObservers('buttonDClick', event)
# ends if cobject.hitTest(ex, ey)
else:
if cobject.__hasMouse:
cobject.__hasMouse = False
cobject.notifyObservers('exit', event)
if not mouseOnObject:
# we only get here if the mouse is not inside any canvasObject
# (but it could be dragging a canvasObject!)
if event.Dragging():
self.notifyObservers('drag', event)
elif event.ButtonUp():
self.notifyObservers('buttonUp', event)
elif event.ButtonDown():
self.notifyObservers('buttonDown', event)
if self._draggedObject:
# dragging locks onto an object, even if the mouse pointer
# is not inside that object - it will keep receiving drag
# events!
draggedObject = self._draggedObject
if event.ButtonUp():
# a button up anywhere cancels any drag
self._draggedObject = None
# so, the object can query canvas.getDraggedObject: if it's
# none, it means the drag has ended; if not, the drag is
# ongoing
draggedObject.notifyObservers('drag', event)
if event.ButtonUp():
# each and every ButtonUp cancels the current potential drag object
self._potentiallyDraggedObject = None
# store the previous real coordinates for mouse deltas
self._previousRealCoords = (rx, ry)
def OnPaint(self, event):
# as soon as dc is unbound and destroyed, buffer is blit
# BUFFER_VIRTUAL_AREA indicates that the buffer bitmap is for the
# whole virtual area, not just the client area of the window
dc = wx.wxBufferedPaintDC(self, self._buffer,
style=wx.wxBUFFER_VIRTUAL_AREA)
def doDrawing(self, dc):
"""This function actually draws the complete shebang to the passed
dc.
"""
dc.BeginDrawing()
# clear the whole shebang to background
dc.SetBackground(wx.wxBrush(self.GetBackgroundColour(), wx.wxSOLID))
dc.Clear()
# draw glyphs last (always)
glyphs = []
theRest = []
for i in self._cobjects:
if isinstance(i, coGlyph):
glyphs.append(i)
else:
theRest.append(i)
for cobj in theRest:
cobj.draw(dc)
for cobj in glyphs:
cobj.draw(dc)
# draw all objects
#for cobj in self._cobjects:
# cobj.draw(dc)
dc.EndDrawing()
def addObject(self, cobj):
if cobj and cobj not in self._cobjects:
cobj.setCanvas(self)
self._cobjects.append(cobj)
cobj.__hasMouse = False
def drawObject(self, cobj):
"""Use this if you want to redraw a single canvas object.
"""
dc = self.getDC()
cobj.draw(dc)
def redraw(self):
"""Redraw the whole scene.
"""
dc = self.getDC()
self.doDrawing(dc)
def removeObject(self, cobj):
if cobj and cobj in self._cobjects:
cobj.setCanvas(None)
if self._draggedObject == cobj:
self._draggedObject = None
del self._cobjects[self._cobjects.index(cobj)]
def getMouseDelta(self):
return self._mouseDelta
def getDraggedObject(self):
return self._draggedObject
def getObjectsOfClass(self, classt):
return [i for i in self._cobjects if isinstance(i, classt)]
def getObjectWithMouse(self):
"""Return object currently containing mouse, None if no object has
the mouse.
"""
for cobject in self._cobjects:
if cobject.__hasMouse:
return cobject
return None
def dragObject(self, cobj, delta):
if abs(delta[0]) > 0 or abs(delta[1]) > 0:
# calculate new position
cpos = cobj.getPosition()
npos = (cpos[0] + delta[0], cpos[1] + delta[1])
cobj.setPosition(npos)
# setup DC
dc = self.getDC()
dc.BeginDrawing()
# we're only going to draw a dotted outline
dc.SetBrush(wx.wxBrush('WHITE', wx.wxTRANSPARENT))
dc.SetPen(wx.wxPen('BLACK', 1, wx.wxDOT))
dc.SetLogicalFunction(wx.wxINVERT)
bounds = cobj.getBounds()
# first delete the old rectangle
dc.DrawRectangle(cpos[0], cpos[1], bounds[0], bounds[1])
# then draw the new one
dc.DrawRectangle(npos[0], npos[1], bounds[0], bounds[1])
# thar she goes
dc.EndDrawing()
#############################################################################
| Python |
# dummy
| Python |
# dummy
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import ConfigParser
from ConfigParser import NoOptionError
import copy
from module_kits.misc_kit.mixins import SubjectMixin
from module_manager import PickledModuleState, PickledConnection
import os
import time
import types
class NetworkManager(SubjectMixin):
"""Contains all logic to do with network handling.
This is still work in progress: code has to be refactored out of the
ModuleManager and the GraphEditor.
"""
def __init__(self, devide_app):
self._devide_app = devide_app
SubjectMixin.__init__(self)
def close(self):
SubjectMixin.close(self)
def execute_network(self, meta_modules):
"""Execute network represented by all modules in the list
meta_modules.
"""
# trigger start event so that our observers can auto-save and
# whatnot
self.notify('execute_network_start')
# convert all MetaModules to schedulerModules
sms = self._devide_app.scheduler.meta_modules_to_scheduler_modules(
meta_modules)
print "STARTING network execute ----------------------------"
print time.ctime()
self._devide_app.scheduler.execute_modules(sms)
self._devide_app.set_progress(100.0, 'Network execution complete.')
print "ENDING network execute ------------------------------"
def load_network_DEPRECATED(self, filename):
"""Given a filename, read it as a DVN file and return a tuple with
(pmsDict, connectionList, glyphPosDict) if successful. If not
successful, an exception will be raised.
"""
f = None
try:
# load the fileData
f = open(filename, 'rb')
fileData = f.read()
except Exception, e:
if f:
f.close()
raise RuntimeError, 'Could not load network from %s:\n%s' % \
(filename,str(e))
f.close()
try:
(headerTuple, dataTuple) = cPickle.loads(fileData)
magic, major, minor, patch = headerTuple
pmsDict, connectionList, glyphPosDict = dataTuple
except Exception, e:
raise RuntimeError, 'Could not interpret network from %s:\n%s' % \
(filename,str(e))
if magic != 'DVN' and magic != 'D3N' or (major,minor,patch) != (1,0,0):
raise RuntimeError, '%s is not a valid DeVIDE network file.' % \
(filename,)
return (pmsDict, connectionList, glyphPosDict)
def load_network(self, filename):
"""Given a filename, read it as a DVN file and return a tuple with
(pmsDict, connectionList, glyphPosDict) if successful. If not
successful, an exception will be raised.
All occurrences of %(dvn_dir)s will be expanded to the
directory that the DVN file is being loaded from.
"""
# need this for substitution during reading of
# module_config_dict
dvn_dir = os.path.dirname(filename)
cp = ConfigParser.ConfigParser({'dvn_dir' : dvn_dir})
try:
# load the fileData
cfp = open(filename, 'rb')
except Exception, e:
raise RuntimeError, 'Could not open network file %s:\n%s' % \
(filename,str(e))
try:
cp.readfp(cfp)
except Exception, e:
raise RuntimeError, 'Could not load network from %s:\n%s' % \
(filename,str(e))
finally:
cfp.close()
pms_dict = {}
connection_list = []
glyph_pos_dict = {}
sections = cp.sections()
# we use this dictionary to determine which ConfigParser get
# method to use for the specific connection attribute.
conn_attrs = {
'source_instance_name' : 'get',
'output_idx' : 'getint',
'target_instance_name' : 'get',
'input_idx' : 'getint',
'connection_type' : 'getint'
}
for sec in sections:
if sec.startswith('modules/'):
pms = PickledModuleState()
pms.instance_name = sec.split('/')[-1]
try:
pms.module_name = cp.get(sec, 'module_name')
except NoOptionError:
# there's no module name, so we're ignoring this
# section
continue
try:
mcd = cp.get(sec, 'module_config_dict')
except NoOptionError:
# no config in DVN file, pms will have default
# module_config
pass
else:
# we have to use this relatively safe eval trick to
# unpack and interpret the dict
cd = eval(mcd,
{"__builtins__": {},
'True' : True, 'False' : False})
pms.module_config.__dict__.update(cd)
# store in main pms dict
pms_dict[pms.instance_name] = pms
try:
# same eval trick to get out the glyph position
gp = eval(cp.get(sec, 'glyph_position'),
{"__builtins__": {}})
except NoOptionError:
# no glyph_pos, so we assign it the default origin
gp = (0,0)
glyph_pos_dict[pms.instance_name] = gp
elif sec.startswith('connections/'):
pc = PickledConnection()
for a, getter in conn_attrs.items():
get_method = getattr(cp, getter)
try:
setattr(pc, a, get_method(sec, a))
except NoOptionError:
# if an option is missing, we discard the
# whole connection
break
else:
# this else clause is only entered if the for loop
# above was NOT broken out of, i.e. we only store
# valid connections
connection_list.append(pc)
return pms_dict, connection_list, glyph_pos_dict
def realise_network(self, pms_dict, connection_list):
"""Given pms_dict and connection_list as returned by load_network,
realise the given network and return the realised new_modules_dict and
new_connections.
@TODO: move network-related code from mm.deserialise_module_instances
here.
"""
mm = self._devide_app.get_module_manager()
new_modules_dict, new_connections = mm.deserialise_module_instances(
pms_dict, connection_list)
return new_modules_dict, new_connections
def _transform_relative_paths(self, module_config_dict, dvn_dir):
"""Given a module_config_dict and the directory that a DVN is
being saved to, transform all values of which the keys contain
'filename' or 'file_name' so that:
* if the value is a directory somewhere under the dvn_dir,
replace the dvn_dir part with %(dvn_dir)s
* if the value is a list of directories, do the substitution
for each element.
"""
def transform_single_path(p):
p = os.path.abspath(p)
if p.find(dvn_dir) == 0:
# do the modification in the copy.
# (probably not necessary to be this
# careful)
p = p.replace(dvn_dir, '%(dvn_dir)s')
p = p.replace('\\', '/')
return p
# make a copy, we don't want to modify what the user
# gave us.
new_mcd = copy.deepcopy(module_config_dict)
# then we iterate through the original
for k in module_config_dict:
if k.find('filename') >= 0 or \
k.find('file_name') >= 0:
v = module_config_dict[k]
if type(v) in [
types.StringType,
types.UnicodeType]:
new_mcd[k] = transform_single_path(v)
elif type(v) == types.ListType:
# it's a list, so try to transform every element
# copy everything into a new list new_v
new_v = v[:]
for i,p in enumerate(v):
if type(p) in [
types.StringType,
types.UnicodeType]:
new_v[i] = transform_single_path(p)
new_mcd[k] = new_v
return new_mcd
def save_network(self, pms_dict, connection_list, glyph_pos_dict,
filename, export=False):
"""Given the serialised network representation as returned by
ModuleManager._serialise_network, write the whole thing to disk
as a config-style DVN file.
@param export: If True, will transform all filenames that are
below the network directory to relative pathnames. These will
be expanded (relative to the loaded network) at load-time.
"""
cp = ConfigParser.ConfigParser()
# general section with network configuration
sec = 'general'
cp.add_section(sec)
cp.set(sec, 'export', export)
if export:
# convert all stored filenames, if they are below the
# network directory, to relative pathnames with
# substitutions: $(dvn_dir)s/the/rest/somefile.txt
# on ConfigParser.read we'll supply the NEW dvn_dir
dvn_dir = os.path.abspath(os.path.dirname(filename))
# create a section for each module
for pms in pms_dict.values():
sec = 'modules/%s' % (pms.instance_name,)
cp.add_section(sec)
cp.set(sec, 'module_name', pms.module_name)
if export:
mcd = self._transform_relative_paths(
pms.module_config.__dict__, dvn_dir)
else:
# no export, so we don't have to transform anything
mcd = pms.module_config.__dict__
cp.set(sec, 'module_config_dict', mcd)
cp.set(sec, 'glyph_position', glyph_pos_dict[pms.instance_name])
for idx, pconn in enumerate(connection_list):
sec = 'connections/%d' % (idx,)
cp.add_section(sec)
attrs = pconn.__dict__.keys()
for a in attrs:
cp.set(sec, a, getattr(pconn, a))
cfp = file(filename, 'wb')
cp.write(cfp)
cfp.close()
def clear_network(self):
"""Remove/close complete network.
This method is only called during the non-view mode of operation by
the scripting interface for example.
"""
mm = self._devide_app.get_module_manager()
mm.delete_all_modules()
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
"""Module containing base class for devide modules.
author: Charl P. Botha <cpbotha@ieee.org>
"""
#########################################################################
class GenericObject(object):
"""Generic object into which we can stuff whichever attributes we want.
"""
pass
#########################################################################
class DefaultConfigClass(object):
pass
#########################################################################
class ModuleBase(object):
"""Base class for all modules.
Any module wishing to take part in the devide party will have to offer all
of these methods.
"""
def __init__(self, module_manager):
"""Perform your module initialisation here.
Please also call this init method
(i.e. ModuleBase.__init__(self)). In your own __init__, you
should create your view and show it to the user.
"""
self._module_manager = module_manager
self._config = DefaultConfigClass()
# modules should toggle this variable to True once they have
# initialised and shown their view once.
self.view_initialised = False
def close(self):
"""Idempotent method for de-initialising module as far as possible.
We can't guarantee the calling of __del__, as Python does garbage
collection and the object might destruct a while after we've removed
all references to it.
In addition, with python garbage collection, __del__ can cause
uncollectable objects, so try to avoid it as far as possible.
"""
# we neatly get rid of some references
del self._module_manager
def get_input_descriptions(self):
"""Returns tuple of input descriptions, mostly used by the graph editor
to make a nice glyph for this module."""
raise NotImplementedError
def set_input(self, idx, input_stream):
"""Attaches input_stream (which is e.g. the output of a previous
module) to this module's input at position idx.
If the previous value was None and the current value is not None, it
signifies a connect and the module should initialise as if it's
getting a new input. This usually happens during the first network
execution AFTER a connection.
If the previous value was not-None and the new value is None, it
signifies a disconnect and the module should take the necessary
actions. This usually happens immediatly when the user disconnects an
input
If the previous value was not-None and the current value is not-None,
the module should take actions as for a changed input. This event
signifies a re-transfer on an already existing connection. This can
be considered an event for which this module is an observer.
"""
raise NotImplementedError
def get_output_descriptions(self):
"""Returns a tuple of output descriptions.
Mostly used by the graph editor to make a nice glyph for this module.
These are also clues to the user as to which glyphs can be connected.
"""
raise NotImplementedError
def get_output(self, idx):
"""Get the n-th output.
This will be used for connecting this output to the input of another
module. Whatever is returned by this object MUST have an Update()
method. However you choose to implement it, the Update() should make
sure that the whole chain of logic resulting in the data object has
executed so that the data object is up to date.
"""
raise NotImplementedError
def logic_to_config(self):
"""Synchronise internal configuration information (usually
self._config)with underlying system.
You only need to implement this if you make use of the standard ECASH
controls.
"""
raise NotImplementedError
def config_to_logic(self):
"""Apply internal configuration information (usually self._config) to
the underlying logic.
If this has resulted in changes to the logic, return True, otherwise
return False
You only need to implement this if you make use of the standard ECASH
controls.
"""
raise NotImplementedError
def view_to_config(self):
"""Synchronise internal configuration information with the view (GUI)
of this module.
If this has resulted in changes to the config, return True,
otherwise return False.
You only need to implement this if you make use of the standard ECASH
controls.
"""
raise NotImplementedError
def config_to_view(self):
"""Make the view reflect the internal configuration information.
You only need to implement this if you make use of the standard ECASH
controls.
"""
raise NotImplementedError
def execute_module(self):
"""This should make the model do whatever processing it was designed
to do.
It's important that when this method is called, the module should be
able to cause ALL of the modules preceding it in a glyph chain to
execute (if necessary). If the whole chain consists of VTK objects,
this is easy.
If not, extra measures need to be taken. According to API,
each output/input data object MUST have an Update() method
that can ensure that the logic responsible for that object has
executed thus making the data object current.
In short, execute_module() should call Update() on all of this modules
input objects, directly or indirectly.
"""
raise NotImplementedError
def view(self):
"""Pop up a dialog with all config possibilities, including optional
use of the pipeline browser.
If the dialog is already visible, do something to draw the user's
attention to it. For a wxFrame-based view, you can do something like:
if not frame.Show(True):
frame.Raise()
If the frame is already visible, this will bring it to the front.
"""
raise NotImplementedError
def get_config(self):
"""Returns current configuration of module.
This should return a pickle()able object that encapsulates all
configuration information of this module. The default just returns
self._config, which is None by default. You can override get_config()
and set_config(), or just make sure that your config info always goes
via self._config
In general, you should never need to override this.
"""
# make sure that the config reflects the state of the underlying logic
self.logic_to_config()
# and then return the config struct.
return self._config
def set_config(self, aConfig):
"""Change configuration of module to that stored in aConfig.
If set_config is called with the object previously returned by
get_config(), the module should be in exactly the same state as it was
when get_config() was called. The default sets the default
self._config and applies it to the underlying logic.
In general, you should never need to override this.
"""
# we update the dict of the existing config with the passed
# parameter. This means that the new config is merged with
# the old, but all new members overwrite old one. This is
# more robust.
self._config.__dict__.update(aConfig.__dict__)
# apply the config to the underlying logic
self.config_to_logic()
# bring it back all the way up to the view
self.logic_to_config()
# but only if we are in view mode
if self.view_initialised:
self.config_to_view()
# the config has been set, so we assumem that the module has
# now been modified.
self._module_manager.modify_module(self)
# convenience functions
def sync_module_logic_with_config(self):
self._module_manager.sync_module_logic_with_config(self)
def sync_module_view_with_config(self):
self._module_manager.sync_module_view_with_config(self)
def sync_module_view_with_logic(self):
self._module_manager.sync_module_view_with_logic(self)
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
"""
"""
import mutex
#########################################################################
class SchedulerException(Exception):
pass
class CyclesDetectedException(SchedulerException):
pass
#########################################################################
class SchedulerModuleWrapper:
"""Wrapper class that adapts module instance to scheduler-usable
object.
We can use this to handle exceptions, such as the viewer
split. Module instances are wrapped on an ad hoc basis, so you CAN'T
use equality testing or 'in' tests to check for matches. Use the
L{matches} method.
@ivar instance: the module instance, e.g. instance of child of ModuleBase
@ivar input_independent_part: part of module that is not input dependent,
e.g. in the case of purely interaction-dependent outputs
@ivar input_independent_outputs: list of outputs that are input-dependent.
This has to be set for both dependent and independent parts of a module.
@todo: functionality in this class has been reduced to such an
extent that we should throw it OUT in favour of just working with
(meta_module, part) tuples. These we CAN use for hashing and
equality tests.
@author: Charl P. Botha <http://cpbotha.net/>
"""
def __init__(self, meta_module, part):
self.meta_module = meta_module
self.part = part
def matches(self, otherModule):
"""Checks if two schedulerModules are equivalent.
Module instances are wrapped with this class on an ad hoc basis,
so you can not check for equivalency with the equality or 'in'
operators for example. Use this method instead.
@param otherModule: module with which equivalency should be tested.
@return: True if equivalent, False otherwise.
"""
eq = self.meta_module == otherModule.meta_module and \
self.part == otherModule.part
return eq
#########################################################################
class Scheduler:
"""Coordinates event-driven network execution.
DeVIDE currently supports two main scheduling modes: event-driven
and demand-driven. [1] contains a concise overview of the
scheduling approach, but we'll go into some more detail in this
in-code documentation.
Event-driven scheduling:
This is the default scheduling mode - the network is analysed and
all modules are iterated through in topological order. For each
module, its inputs are transferred from its producer modules if
necessary (i.e. a producer module has been executed since the
previous transfer, or this (consumer) module has been newly
connected (in which case the producer module's output t-time to
this module is set to 0)). All transfers are timestamped. In
event-driven mode, after every transfer, the streaming transfer
timestamp for that connection is set to 0 so that subsequent
hybrid scheduling runs will re-transfer all relevant data. If the
module has been modified, or inputs have been transferred to it
(in which case it is also explicitly modified), its
execute_module() method is then called.
Hybrid scheduling:
This mode of scheduling has to be explicitly invoked by the user.
All modules with a streaming_execute_module() are considered
streamable. The largest subsets of streamable modules are found
(see [1] for details on this algorithm). All modules are iterated
through in topological order and execution continues as for
event-driven scheduling, except when a streamable module is
encountered. In that case, we use a different set of
streaming_transfer_times to check whether we should transfer its
producers' output data pointers (WITHOUT disconnect workaround).
In every case that we do a transfer, the usual transfer timestamps
are set to 0 so that any subsequent event-driven scheduling will
re-transfer. For each re-transfer, the module will be modified,
thus also causing a re-execute when we change to event-driven mode.
Only if the current streamable module is at one of the end points
of the streamable subset and its execute_timestamp is
older than the normal modification time-stamp, is its
streaming_execute_module() method called and the
streaming_execute_timestamp touched.
Timestamps:
There are four collections of timestamps:
1. per module modified_time (initvalue 0)
2. per module execute_time (initvalue 0)
3. per output connection transfer_time
4. per module streaming touch time (initvalue 0)
When a module's configuration is changed by the user (the user
somehow interacts with the module), the module's modified_time is
set to current_time.
When a module execution is scheduled:
* For each supplying connection, the data is transferred if
transfer_time(connection) < execute_time(producer_module), or in
the hybrid case, if transfer_time(connection) <
touch_time(producer_module)
* If data is transferred to a module, that module's modified_time
is set to current_time.
* The module is then executed if modified_time > execute_time.
* If the module is executed, execute_time is set to current_time.
Notes:
* there are two sets of transfer_time timestamps,
one set each for event-driven and hybrid
* there is only ONE set of modified times and of execute_times
* See the timestamp description above, as well as the descriptions
for hybrid and event-driven to see how the scheduler makes sure
that switching between execution models automatically results in
re-execution of modules that are adaptively scheduled.
* in the case that illegal cycles are found, network execution is
aborted.
[1] C.P. Botha and F.H. Post, "Hybrid Scheduling in the DeVIDE
Dataflow Visualisation Environment", accepted for SimVis 2008
This should be a singleton, as we're using a mutex to protect per-
process network execution.
@author: Charl P. Botha <http://cpbotha.net/>
"""
_execute_mutex = mutex.mutex()
def __init__(self, devideApp):
"""Initialise scheduler instance.
@param devideApp: an instance of the devideApplication that we'll use
to communicate with the outside world.
"""
self._devideApp = devideApp
def meta_modules_to_scheduler_modules(self, meta_modules):
"""Preprocess module instance list before cycle detection or
topological sorting to take care of exceptions.
Note that the modules are wrapped anew by this method, so equality
tests with previously existing scheduleModules will not work. You have
to use the L{SchedulerModuleWrapper.matches()} method.
@param module_instances: list of raw module instances
@return: list with SchedulerModuleWrappers
"""
# replace every view module with two segments: final and initial
SchedulerModuleWrappers = []
for mModule in meta_modules:
# wrap every part separately
for part in range(mModule.numParts):
SchedulerModuleWrappers.append(
SchedulerModuleWrapper(mModule, part))
return SchedulerModuleWrappers
def getConsumerModules(self, schedulerModule):
"""Return consumers of schedulerModule as a list of schedulerModules.
The consumers that are returned have been wrapped on an ad hoc basis,
so you can't trust normal equality or 'in' tests. Use the
L{SchedulerModuleWrapper.matches} method instead.
@param schedulerModule: determine modules that are connected to outputs
of this instance.
@param part: Only return modules that are dependent on this part.
@return: list of consumer schedulerModules, ad hoc wrappings.
"""
# get the producer meta module
p_meta_module = schedulerModule.meta_module
# only consumers that are dependent on p_part are relevant
p_part = schedulerModule.part
# consumers is a list of (output_idx, consumerMetaModule,
# consumerInputIdx) tuples
mm = self._devideApp.get_module_manager()
consumers = mm.get_consumers(p_meta_module)
sConsumers = []
for output_idx, consumerMetaModule, consumerInputIdx in consumers:
if p_meta_module.getPartForOutput(output_idx) == p_part:
# now see which part of the consumerMetaModule is dependent
cPart = consumerMetaModule.getPartForInput(consumerInputIdx)
sConsumers.append(
SchedulerModuleWrapper(consumerMetaModule, cPart))
return sConsumers
def getProducerModules(self, schedulerModule):
"""Return producer modules and indices that supply schedulerModule
with data.
The producers that are returned have been wrapped on an ad hoc basis,
so you can't trust normal equality or 'in' tests. Use the
L{SchedulerModuleWrapper.matches} method instead.
@param schedulerModule: determine modules that are connected to inputs
of this instance.
@return: list of tuples with (producer schedulerModule, output
index, consumer input index).
"""
# get the consumer meta module
c_meta_module = schedulerModule.meta_module
# only producers that supply this part are relevant
c_part = schedulerModule.part
# producers is a list of (producerMetaModule, output_idx, input_idx)
# tuples
mm = self._devideApp.get_module_manager()
producers = mm.get_producers(c_meta_module)
sProducers = []
for p_meta_module, outputIndex, consumerInputIdx in producers:
if c_meta_module.getPartForInput(consumerInputIdx) == c_part:
# find part of producer meta module that is actually
# producing for schedulerModule
p_part = p_meta_module.getPartForOutput(outputIndex)
sProducers.append(
(SchedulerModuleWrapper(p_meta_module, p_part),
outputIndex, consumerInputIdx))
return sProducers
def detectCycles(self, schedulerModules):
"""Given a list of moduleWrappers, detect cycles in the topology
of the modules.
@param schedulerModules: list of module instances that has to be
checked.
@return: True if cycles detected, False otherwise.
@todo: check should really be limited to modules in selection.
"""
def detectCycleMatch(visited, currentModule):
"""Recursive function used to check for cycles in the module
network starting from initial module currentModule.
@param visited: list of schedulerModules used during recursion.
@param currentModule: initial schedulerModule
@return: True if cycle detected starting from currentModule
"""
consumers = self.getConsumerModules(currentModule)
for consumer in consumers:
for v in visited:
if consumer.matches(v):
return True
else:
# we need to make a copy of visited and send it along
# if we don't, changes to visit are shared between
# different branches of the recursion; we only want
# it to aggregate per recursion branch
visited_copy = {}
visited_copy.update(visited)
visited_copy[consumer] = 1
if detectCycleMatch(visited_copy, consumer):
return True
# the recursion ends when there are no consumers and
return False
for schedulerModule in schedulerModules:
if detectCycleMatch({schedulerModule : 1},
schedulerModule):
return True
return False
def topoSort(self, schedulerModules):
"""Perform topological sort on list of modules.
Given a list of module instances, this will perform a
topological sort that can be used to determine the execution
order of the give modules. The modules are checked beforehand
for cycles. If any cycles are found, an exception is raised.
@param schedulerModules: list of module instance to be sorted
@return: modules in topological order; in this case the instances DO
match the input instances.
@todo: separate topologically independent trees
"""
def isFinalVertex(schedulerModule, currentList):
"""Determines whether schedulerModule is a final vertex relative
to the currentList.
A final vertex is a vertex/module with no consumers in the
currentList.
@param schedulerModule: module whose finalness is determined
@param currentList: list relative to which the finalness is
determined.
@return: True if final, False if not.
"""
# find consumers
consumers = self.getConsumerModules(schedulerModule)
# now check if any one of these consumers is present in currentList
for consumer in consumers:
for cm in currentList:
if consumer.matches(cm):
return False
return True
if self.detectCycles(schedulerModules):
raise CyclesDetectedException(
'Cycles detected in network. Unable to schedule.')
# keep on finding final vertices, move to final list
scheduleList = [] # this will be the actual schedules list
tempList = schedulerModules[:] # copy of list so we can futz around
while tempList:
finalVertices = [sm for sm in tempList
if isFinalVertex(sm, tempList)]
scheduleList.extend(finalVertices)
for fv in finalVertices:
tempList.remove(fv)
scheduleList.reverse()
return scheduleList
def execute_modules(self, schedulerModules):
"""Execute the modules in schedulerModules in topological order.
For each module, all output is transferred from its consumers and then
it's executed. I'm still thinking about the implications of doing
this the other way round, i.e. each module is executed and its output
is transferred.
Called by SchedulerProxy.execute_modules().
@param schedulerModules: list of modules that should be executed in
order.
@raise CyclesDetectedException: This exception is raised if any
cycles are detected in the modules that have to be executed.
@todo: add start_module parameter, execution skips all modules before
this module in the topologically sorted execution list.
"""
# stop concurrent calls of execute_modules.
if not Scheduler._execute_mutex.testandset():
return
# first remove all blocked modules from the list, before we do any
# kind of analysis.
blocked_module_indices = []
for i in range(len(schedulerModules)):
if schedulerModules[i].meta_module.blocked:
blocked_module_indices.append(i)
blocked_module_indices.reverse()
for i in blocked_module_indices:
del(schedulerModules[i])
# finally start with execution.
try:
if self.detectCycles(schedulerModules):
raise CyclesDetectedException(
'Cycles detected in selected network modules. '
'Unable to execute.')
# this will also check for cycles...
schedList = self.topoSort(schedulerModules)
mm = self._devideApp.get_module_manager()
for sm in schedList:
print "### sched:", sm.meta_module.instance.__class__.__name__
# find all producer modules
producers = self.getProducerModules(sm)
# transfer relevant data
for pmodule, output_index, input_index in producers:
if mm.should_transfer_output(
pmodule.meta_module, output_index,
sm.meta_module, input_index):
print 'transferring output: %s:%d to %s:%d' % \
(pmodule.meta_module.instance.__class__.__name__,
output_index,
sm.meta_module.instance.__class__.__name__,
input_index)
mm.transfer_output(pmodule.meta_module, output_index,
sm.meta_module, input_index)
# finally: execute module if
# ModuleManager thinks it's necessary
if mm.should_execute_module(sm.meta_module, sm.part):
print 'executing part %d of %s' % \
(sm.part, sm.meta_module.instance.__class__.__name__)
mm.execute_module(sm.meta_module, sm.part)
finally:
# in whichever way execution terminates, we have to unlock the
# mutex.
Scheduler._execute_mutex.unlock()
#########################################################################
class EventDrivenScheduler(Scheduler):
pass
#########################################################################
class HybridScheduler(Scheduler):
def execute_modules(self, schedulerModules):
"""Execute the modules in schedulerModules according to hybrid
scheduling strategy. See documentation in Scheduler class and
the paper [1] for a complete description.
@param schedulerModules: list of modules that should be executed in
order.
@raise CyclesDetectedException: This exception is raised if any
cycles are detected in the modules that have to be executed.
@todo: add start_module parameter, execution skips all modules before
this module in the topologically sorted execution list.
"""
# stop concurrent calls of execute_modules.
if not Scheduler._execute_mutex.testandset():
return
# first remove all blocked modules from the list, before we do any
# kind of analysis.
blocked_module_indices = []
for i in range(len(schedulerModules)):
if schedulerModules[i].meta_module.blocked:
blocked_module_indices.append(i)
blocked_module_indices.reverse()
for i in blocked_module_indices:
del(schedulerModules[i])
# finally start with execution.
try:
if self.detectCycles(schedulerModules):
raise CyclesDetectedException(
'Cycles detected in selected network modules. '
'Unable to execute.')
# this will also check for cycles...
schedList = self.topoSort(schedulerModules)
mm = self._devideApp.get_module_manager()
# find largest streamable subsets
streamables_dict, streamable_subsets = \
self.find_streamable_subsets(schedulerModules)
for sm in schedList:
smt = (sm.meta_module, sm.part)
if smt in streamables_dict:
streaming_module = True
print "### streaming ",
else:
streaming_module = False
print "### ",
print "sched:", sm.meta_module.instance.__class__.__name__
# find all producer modules
producers = self.getProducerModules(sm)
# transfer relevant data
for pmodule, output_index, input_index in producers:
pmt = (pmodule.meta_module, pmodule.part)
if streaming_module and pmt in streamables_dict:
streaming_transfer = True
else:
streaming_transfer = False
if mm.should_transfer_output(
pmodule.meta_module, output_index,
sm.meta_module, input_index,
streaming_transfer):
if streaming_transfer:
print 'streaming ',
print 'transferring output: %s:%d to %s:%d' % \
(pmodule.meta_module.instance.__class__.__name__,
output_index,
sm.meta_module.instance.__class__.__name__,
input_index)
mm.transfer_output(pmodule.meta_module, output_index,
sm.meta_module, input_index,
streaming_transfer)
# finally: execute module if
# ModuleManager thinks it's necessary
if streaming_module:
if streamables_dict[smt] == 2:
# terminating module in streamable subset
if mm.should_execute_module(sm.meta_module, sm.part):
print 'streaming executing part %d of %s' % \
(sm.part, \
sm.meta_module.instance.__class__.__name__)
mm.execute_module(sm.meta_module, sm.part,
streaming=True)
# if the module has been
# streaming_executed, it has also been
# touched.
sm.meta_module.streaming_touch_timestamp_module(sm.part)
# make sure we touch the module even if we don't
# execute it. this is used in the transfer
# caching
elif sm.meta_module.should_touch(sm.part):
sm.meta_module.streaming_touch_timestamp_module(sm.part)
else:
# this is not a streaming module, normal semantics
if mm.should_execute_module(sm.meta_module, sm.part):
print 'executing part %d of %s' % \
(sm.part, \
sm.meta_module.instance.__class__.__name__)
mm.execute_module(sm.meta_module, sm.part)
finally:
# in whichever way execution terminates, we have to unlock the
# mutex.
Scheduler._execute_mutex.unlock()
def find_streamable_subsets(self, scheduler_modules):
"""
Algorithm for finding streamable subsets in a network. Also
see Algorithm 2 in the paper [1].
@param scheduler_modules: topologically sorted list of
SchedulerModuleWrapper instances (S).
@return: dictionary of streamable MetaModule bindings (V_ss)
mapping to 1 (non-terminating) or 2 (terminating) and list of
streamable subsets, each an array (M_ss).
"""
# get all streaming modules from S and keep topological
# ordering (S_s == streaming_scheduler_modules)
streamable_modules = []
streamable_modules_dict = {}
for sm in scheduler_modules:
if hasattr(sm.meta_module.instance,
'streaming_execute_module'):
streamable_modules.append((sm.meta_module, sm.part))
# we want to use this to check for streamability later
streamable_modules_dict[(sm.meta_module, sm.part)] = 1
# now the fun begins:
streamables_dict = {} # this is V_ss
streamable_subsets = [] # M_ss
def handle_new_streamable(smt, streamable_subset):
"""Recursive method to do depth-first search for largest
streamable subset.
This is actually the infamous line 9 in the article.
@param: smt is a streamable module tuple (meta_module,
part)
"""
# get all consumers of sm
# getConsumerModules returns ad hoc wrappings!
sm = SchedulerModuleWrapper(smt[0], smt[1])
consumers = self.getConsumerModules(sm)
# if there are no consumers, per def a terminating module
if len(consumers) == 0:
terminating = True
else:
# check if ANY of the the consumers is non-streamable
# in which case sm is also terminating
terminating = False
for c in consumers:
if (c.meta_module,c.part) not in \
streamable_modules_dict:
terminating = True
break
if terminating:
# set sm as the terminating module
streamables_dict[smt] = 2
else:
# add all consumers to streamable_subset M
ctuples = [(i.meta_module, i.part) for i in consumers]
streamable_subset.append(ctuples)
# also add them all to V_ss
streamables_dict.fromkeys(ctuples, 1)
for c in consumers:
handle_new_streamable((c.meta_module, c.part),
streamable_subset)
# smt is a streamable module tuple (meta_module, part)
for smt in streamable_modules:
if not smt in streamables_dict:
# this is a NEW streamable module!
# create new streamable subset
streamable_subset = [smt]
streamables_dict[smt] = 1
# handle this new streamable
handle_new_streamable(smt, streamable_subset)
# handle_new_streamable recursion is done, add
# this subset list of subsets
streamable_subsets.append(streamable_subset)
return streamables_dict, streamable_subsets
#########################################################################
class SchedulerProxy:
"""Proxy class for all schedulers.
Each scheduler mode is represented by a different class, but we
want to use a common instance to access functionality, hence this
proxy.
"""
EVENT_DRIVEN_MODE = 0
HYBRID_MODE = 1
def __init__(self, devide_app):
self.event_driven_scheduler = EventDrivenScheduler(devide_app)
self.hybrid_scheduler = HybridScheduler(devide_app)
# default mode
self.mode = SchedulerProxy.EVENT_DRIVEN_MODE
def get_scheduler(self):
"""Return the correct scheduler instance, dependent on the
current mode.
"""
s = [self.event_driven_scheduler, self.hybrid_scheduler][self.mode]
return s
def execute_modules(self, scheduler_modules):
"""Thunks through to the correct scheduler instance's
execute_modules.
This is called by NetworkManager.execute_network()
"""
self.get_scheduler().execute_modules(scheduler_modules)
def meta_modules_to_scheduler_modules(self, meta_modules):
return self.get_scheduler().meta_modules_to_scheduler_modules(meta_modules)
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
import os, sys, time
# usage: parse_log.py log-file [socket-index to focus on]
socket_filter = None
if len(sys.argv) >= 3:
socket_filter = sys.argv[2].strip()
if socket_filter == None:
print "scanning for socket with the most packets"
file = open(sys.argv[1], 'rb')
sockets = {}
for l in file:
if not 'our_delay' in l: continue
try:
a = l.strip().split(" ")
socket_index = a[1][:-1]
except:
continue
# msvc's runtime library doesn't prefix pointers
# with '0x'
# if socket_index[:2] != '0x':
# continue
if socket_index in sockets:
sockets[socket_index] += 1
else:
sockets[socket_index] = 1
items = sockets.items()
items.sort(lambda x, y: y[1] - x[1])
count = 0
for i in items:
print '%s: %d' % (i[0], i[1])
count += 1
if count > 5: break
file.close()
socket_filter = items[0][0]
print '\nfocusing on socket %s' % socket_filter
file = open(sys.argv[1], 'rb')
out_file = 'utp.out%s' % socket_filter;
out = open(out_file, 'wb')
delay_samples = 'dots lc rgb "blue"'
delay_base = 'steps lw 2 lc rgb "purple"'
target_delay = 'steps lw 2 lc rgb "red"'
off_target = 'dots lc rgb "blue"'
cwnd = 'steps lc rgb "green"'
window_size = 'steps lc rgb "sea-green"'
rtt = 'lines lc rgb "light-blue"'
metrics = {
'our_delay':['our delay (ms)', 'x1y2', delay_samples],
'upload_rate':['send rate (B/s)', 'x1y1', 'lines'],
'max_window':['cwnd (B)', 'x1y1', cwnd],
'target_delay':['target delay (ms)', 'x1y2', target_delay],
'cur_window':['bytes in-flight (B)', 'x1y1', window_size],
'cur_window_packets':['number of packets in-flight', 'x1y2', 'steps'],
'packet_size':['current packet size (B)', 'x1y2', 'steps'],
'rtt':['rtt (ms)', 'x1y2', rtt],
'off_target':['off-target (ms)', 'x1y2', off_target],
'delay_sum':['delay sum (ms)', 'x1y2', 'steps'],
'their_delay':['their delay (ms)', 'x1y2', delay_samples],
'get_microseconds':['clock (us)', 'x1y1', 'steps'],
'wnduser':['advertised window size (B)', 'x1y1', 'steps'],
'delay_base':['delay base (us)', 'x1y1', delay_base],
'their_delay_base':['their delay base (us)', 'x1y1', delay_base],
'their_actual_delay':['their actual delay (us)', 'x1y1', delay_samples],
'actual_delay':['actual_delay (us)', 'x1y1', delay_samples]
}
histogram_quantization = 1
socket_index = None
columns = []
begin = None
title = "-"
packet_loss = 0
packet_timeout = 0
delay_histogram = {}
window_size = {'0': 0, '1': 0}
# [35301484] 0x00ec1190: actual_delay:1021583 our_delay:102 their_delay:-1021345 off_target:297 max_window:2687 upload_rate:18942 delay_base:1021481154 delay_sum:-1021242 target_delay:400 acked_bytes:1441 cur_window:2882 scaled_gain:2.432
counter = 0
print "reading log file"
for l in file:
if "UTP_Connect" in l:
title = l[:-2]
if socket_filter != None:
title += ' socket: %s' % socket_filter
else:
title += ' sum of all sockets'
continue
try:
a = l.strip().split(" ")
t = a[0][1:-1]
socket_index = a[1][:-1]
except:
continue
# if socket_index[:2] != '0x':
# continue
if socket_filter != None and socket_index != socket_filter:
continue
counter += 1
if (counter % 300 == 0):
print "\r%d " % counter,
if "lost." in l:
packet_loss = packet_loss + 1
continue
if "Packet timeout" in l:
packet_timeout = packet_timeout + 1
continue
if "our_delay:" not in l:
continue
# used for Logf timestamps
# t, m = t.split(".")
# t = time.strptime(t, "%H:%M:%S")
# t = list(t)
# t[0] += 107
# t = tuple(t)
# m = float(m)
# m /= 1000.0
# t = time.mktime(t) + m
# used for tick count timestamps
t = int(t)
if begin is None:
begin = t
t = t - begin
# print time. Convert from milliseconds to seconds
print >>out, '%f\t' % (float(t)/1000.),
#if t > 200000:
# break
fill_columns = not columns
for i in a[2:]:
try:
n, v = i.split(':')
except:
continue
v = float(v)
if n == "our_delay":
bucket = v / histogram_quantization
delay_histogram[bucket] = 1 + delay_histogram.get(bucket, 0)
if not n in metrics: continue
if fill_columns:
columns.append(n)
if n == "max_window":
window_size[socket_index] = v
print >>out, '%f\t' % int(reduce(lambda a,b: a+b, window_size.values())),
else:
print >>out, '%f\t' % v,
print >>out, float(packet_loss * 8000), float(packet_timeout * 8000)
packet_loss = 0
packet_timeout = 0
out.close()
out = open('%s.histogram' % out_file, 'wb')
for d,f in delay_histogram.iteritems():
print >>out, float(d*histogram_quantization) + histogram_quantization / 2, f
out.close()
plot = [
{
'data': ['upload_rate', 'max_window', 'cur_window', 'wnduser', 'cur_window_packets', 'packet_size', 'rtt'],
'title': 'send-packet-size',
'y1': 'Bytes',
'y2': 'Time (ms)'
},
{
'data': ['our_delay', 'max_window', 'target_delay', 'cur_window', 'wnduser', 'cur_window_packets'],
'title': 'uploading',
'y1': 'Bytes',
'y2': 'Time (ms)'
},
{
'data': ['our_delay', 'max_window', 'target_delay', 'cur_window', 'cur_window_packets'],
'title': 'uploading_packets',
'y1': 'Bytes',
'y2': 'Time (ms)'
},
{
'data': ['get_microseconds'],
'title': 'timer',
'y1': 'Time microseconds',
'y2': 'Time (ms)'
},
{
'data': ['their_delay', 'target_delay', 'rtt'],
'title': 'their_delay',
'y1': '',
'y2': 'Time (ms)'
},
{
'data': ['their_actual_delay','their_delay_base'],
'title': 'their_delay_base',
'y1': 'Time (us)',
'y2': ''
},
{
'data': ['our_delay', 'target_delay', 'rtt'],
'title': 'our-delay',
'y1': '',
'y2': 'Time (ms)'
},
{
'data': ['actual_delay', 'delay_base'],
'title': 'our_delay_base',
'y1': 'Time (us)',
'y2': ''
}
]
out = open('utp.gnuplot', 'w+')
files = ''
#print >>out, 'set xtics 0, 20'
print >>out, "set term png size 1280,800"
print >>out, 'set output "%s.delays.png"' % out_file
print >>out, 'set xrange [0:250]'
print >>out, 'set xlabel "delay (ms)"'
print >>out, 'set boxwidth 1'
print >>out, 'set style fill solid'
print >>out, 'set ylabel "number of packets"'
print >>out, 'plot "%s.histogram" using 1:2 with boxes' % out_file
print >>out, "set style data steps"
#print >>out, "set yrange [0:*]"
print >>out, "set y2range [*:*]"
files += out_file + '.delays.png '
#set hidden3d
#set title "Peer bandwidth distribution"
#set xlabel "Ratio"
for p in plot:
print >>out, 'set title "%s %s"' % (p['title'], title)
print >>out, 'set xlabel "time (s)"'
print >>out, 'set ylabel "%s"' % p['y1']
print >>out, "set tics nomirror"
print >>out, 'set y2tics'
print >>out, 'set y2label "%s"' % p['y2']
print >>out, 'set xrange [0:*]'
print >>out, "set key box"
print >>out, "set term png size 1280,800"
print >>out, 'set output "%s-%s.png"' % (out_file, p['title'])
files += '%s-%s.png ' % (out_file, p['title'])
comma = ''
print >>out, "plot",
for c in p['data']:
if not c in metrics: continue
i = columns.index(c)
print >>out, '%s"%s" using 1:%d title "%s-%s" axes %s with %s' % (comma, out_file, i + 2, metrics[c][0], metrics[c][1], metrics[c][1], metrics[c][2]),
comma = ', '
print >>out, ''
out.close()
os.system("gnuplot utp.gnuplot")
os.system("open %s" % files)
| Python |
# -*- coding: utf-8 -*-
import os
from setuptools import setup, Library
from utp import VERSION
sources = [os.path.join("..", "utp.cpp"),
os.path.join("..", "utp_utils.cpp")]
include_dirs = ["..", os.path.join("..", "utp_config_lib")]
define_macros = []
libraries = []
extra_link_args = []
if os.name == "nt":
define_macros.append(("WIN32", 1))
libraries.append("ws2_32")
sources.append(os.path.join("..", "win32_inet_ntop.cpp"))
extra_link_args.append('/DEF:"../utp.def"')
else:
define_macros.append(("POSIX", 1))
r = os.system('echo "int main() {}"|gcc -x c - -lrt 2>/dev/null')
if r == 0:
libraries.append("rt")
# http://bugs.python.org/issue9023
sources = [os.path.abspath(x) for x in sources]
ext = Library(name="utp",
sources=sources,
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
extra_link_args=extra_link_args
)
setup(name="utp",
version=VERSION,
description="The uTorrent Transport Protocol library",
author="Greg Hazel",
author_email="greg@bittorrent.com",
maintainer="Greg Hazel",
maintainer_email="greg@bittorrent.com",
url="http://github.com/bittorrent/libutp",
packages=['utp',
'utp.tests'],
ext_modules=[ext],
zip_safe=False,
license='MIT'
)
| Python |
import os
import ctypes
import socket
import platform
from utp.utp_h import *
from utp.sockaddr_types import *
basepath = os.path.join(os.path.dirname(__file__), "..")
if platform.system() == "Windows":
utp = ctypes.cdll.LoadLibrary(os.path.join(basepath, "utp.dll"))
elif platform.system() == "Darwin":
utp = ctypes.cdll.LoadLibrary(os.path.join(basepath, "libutp.dylib"))
else:
utp = ctypes.cdll.LoadLibrary(os.path.join(basepath, "libutp.so"))
from utp.inet_ntop import inet_ntop, inet_pton
CONNECT = UTP_STATE_CONNECT
WRITABLE = UTP_STATE_WRITABLE
EOF = UTP_STATE_EOF
DESTROYING = UTP_STATE_DESTROYING
CheckTimeouts = utp.UTP_CheckTimeouts
def to_sockaddr(ip, port):
if ":" not in ip:
sin = sockaddr_in()
ctypes.memset(ctypes.byref(sin), 0, ctypes.sizeof(sin))
sin.sin_family = socket.AF_INET
sin.sin_addr.s_addr = inet_addr(ip)
sin.sin_port = socket.htons(port)
return sin
else:
sin6 = sockaddr_in6()
ctypes.memset(ctypes.byref(sin6), 0, ctypes.sizeof(sin6))
sin6.sin6_family = socket.AF_INET6
d = inet_pton(socket.AF_INET6, ip)
# it seems like there should be a better way to do this...
ctypes.memmove(sin6.sin6_addr.Byte, d, ctypes.sizeof(sin6.sin6_addr.Byte))
sin6.sin6_port = socket.htons(port)
return sin6
def from_lpsockaddr(sa, salen):
if sa.contents.ss_family == socket.AF_INET:
assert salen >= ctypes.sizeof(sockaddr_in)
sin = ctypes.cast(sa, psockaddr_in).contents
ip = str(sin.sin_addr.s_addr)
port = socket.ntohs(sin.sin_port)
elif sa.contents.ss_family == socket.AF_INET6:
assert salen >= ctypes.sizeof(sockaddr_in6)
sin6 = ctypes.cast(sa, psockaddr_in6).contents
ip = inet_ntop(socket.AF_INET6, sin6.sin6_addr.Byte)
port = socket.ntohs(sin6.sin6_port)
else:
raise ValueError("unknown address family " + str(sa.contents.family))
return (ip, port)
def wrap_send_to(f):
def unwrap_send_to(userdata, ptr, count, to, tolen):
sa = ctypes.cast(to, LPSOCKADDR_STORAGE)
f(ctypes.string_at(ptr, count), from_lpsockaddr(sa, tolen))
return unwrap_send_to
def wrap_callback(f):
def unwrap_callback(userdata, *a, **kw):
return f(*a, **kw)
return unwrap_callback
class Socket(object):
def set_socket(self, utp_socket, send_to_proc):
self.utp_socket = utp_socket
self.send_to_proc = send_to_proc
def init_outgoing(self, send_to, addr):
send_to_proc = SendToProc(wrap_send_to(send_to))
sin = to_sockaddr(*addr)
utp_socket = utp.UTP_Create(send_to_proc, ctypes.py_object(self),
ctypes.byref(sin), ctypes.sizeof(sin))
self.set_socket(utp_socket, send_to_proc)
def set_callbacks(self, callbacks):
self.callbacks = callbacks
f = UTPFunctionTable(UTPOnReadProc(wrap_callback(self.on_read)),
UTPOnWriteProc(wrap_callback(self.on_write)),
UTPGetRBSize(wrap_callback(callbacks.get_rb_size)),
UTPOnStateChangeProc(wrap_callback(callbacks.on_state)),
UTPOnErrorProc(wrap_callback(callbacks.on_error)),
UTPOnOverheadProc(wrap_callback(callbacks.on_overhead)))
self.functable = f
utp.UTP_SetCallbacks(self.utp_socket,
ctypes.byref(f), ctypes.py_object(self))
def on_read(self, bytes, count):
self.callbacks.on_read(ctypes.string_at(bytes, count))
def on_write(self, bytes, count):
d = self.callbacks.on_write(count)
dst = ctypes.cast(bytes, ctypes.c_void_p).value
ctypes.memmove(dst, d, count)
def connect(self):
if not hasattr(self, "callbacks"):
raise ValueError("Callbacks must be set before connecting")
utp.UTP_Connect(self.utp_socket)
def getpeername(self):
sa = SOCKADDR_STORAGE()
salen = socklen_t(ctypes.sizeof(sa))
utp.UTP_GetPeerName(self.utp_socket, ctypes.byref(sa), ctypes.byref(salen))
return from_lpsockaddr(ctypes.pointer(sa), salen.value)
def rbdrained(self):
utp.UTP_RBDrained(self.utp_socket)
def write(self, to_write):
return utp.UTP_Write(self.utp_socket, to_write)
def close(self):
utp.UTP_Close(self.utp_socket)
# This is just an interface example. You do not have to subclass from it,
# but you do need to pass an object which has this interface.
class Callbacks(object):
def on_read(self, data):
pass
def on_write(self, count):
pass
def get_rb_size(self):
return 0
def on_state(self, state):
pass
def on_error(self, errcode):
pass
def on_overhead(self, send, count, type):
pass
def wrap_incoming(f, send_to_proc):
def unwrap_incoming(userdata, utp_socket):
us = Socket()
us.set_socket(utp_socket, send_to_proc)
f(us)
return unwrap_incoming
def IsIncomingUTP(incoming_connection, send_to, d, addr):
send_to_proc = SendToProc(wrap_send_to(send_to))
if incoming_connection:
incoming_proc = UTPGotIncomingConnection(wrap_incoming(incoming_connection, send_to_proc))
else:
incoming_proc = None
sa = to_sockaddr(*addr)
return utp.UTP_IsIncomingUTP(incoming_proc, send_to_proc, 1, d, len(d),
ctypes.byref(sa), ctypes.sizeof(sa))
| Python |
# This module can go away when Python supports IPv6 (meaning inet_ntop and inet_pton on all platforms)
# http://bugs.python.org/issue7171
import socket
import ctypes
from utp.utp_socket import utp
# XXX: the exception types vary from socket.inet_ntop
def inet_ntop(address_family, packed_ip):
if address_family == socket.AF_INET:
# The totals are derived from the following data:
# 15: IPv4 address
# 1: Terminating null byte
length = 16
packed_length = 4
elif address_family == socket.AF_INET6:
# The totals are derived from the following data:
# 45: IPv6 address including embedded IPv4 address
# 11: Scope Id
# 1: Terminating null byte
length = 57
packed_length = 16
else:
raise ValueError("unknown address family " + str(address_family))
if len(packed_ip) != packed_length:
raise ValueError("invalid length of packed IP address string")
dest = ctypes.create_string_buffer(length)
r = utp.inet_ntop(address_family, packed_ip, dest, length)
if r is None:
raise ValueError
return dest.value
# XXX: the exception types vary from socket.inet_pton
def inet_pton(address_family, ip_string):
if address_family == socket.AF_INET:
length = 4
elif address_family == socket.AF_INET6:
length = 16
else:
raise ValueError("unknown address family " + str(address_family))
dest = ctypes.create_string_buffer(length)
r = utp.inet_pton(address_family, ip_string.encode(), dest)
if r != 1:
raise ValueError("illegal IP address string passed to inet_pton")
return dest.raw
inet_ntop = getattr(socket, "inet_ntop", inet_ntop)
inet_pton = getattr(socket, "inet_pton", inet_pton)
| Python |
import ctypes
from utp.sockaddr_types import *
# hork
if not hasattr(ctypes, "c_bool"):
ctypes.c_bool = ctypes.c_byte
# Lots of stuff which has to be kept in sync with utp.h...
# I wish ctypes had a C header parser.
UTP_STATE_CONNECT = 1
UTP_STATE_WRITABLE = 2
UTP_STATE_EOF = 3
UTP_STATE_DESTROYING = 4
# typedef void UTPOnReadProc(void *userdata, const byte *bytes, size_t count);
UTPOnReadProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_byte), ctypes.c_size_t)
# typedef void UTPOnWriteProc(void *userdata, byte *bytes, size_t count);
UTPOnWriteProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_byte), ctypes.c_size_t)
# typedef size_t UTPGetRBSize(void *userdata);
UTPGetRBSize = ctypes.CFUNCTYPE(ctypes.c_size_t, ctypes.c_void_p)
# typedef void UTPOnStateChangeProc(void *userdata, int state);
UTPOnStateChangeProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int)
# typedef void UTPOnErrorProc(void *userdata, int errcode);
UTPOnErrorProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int)
# typedef void UTPOnOverheadProc(void *userdata, bool send, size_t count, int type);
UTPOnOverheadProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_bool, ctypes.c_size_t, ctypes.c_int)
class UTPFunctionTable(ctypes.Structure):
_fields_ = (
("on_read", UTPOnReadProc),
("on_write", UTPOnWriteProc),
("get_rb_size", UTPGetRBSize),
("on_state", UTPOnStateChangeProc),
("on_error", UTPOnErrorProc),
("on_overhead", UTPOnOverheadProc),
)
# typedef void UTPGotIncomingConnection(UTPSocket* s);
UTPGotIncomingConnection = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
# typedef void SendToProc(void *userdata, const byte *p, size_t len, const struct sockaddr *to, socklen_t tolen);
SendToProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.POINTER(ctypes.c_byte), ctypes.c_size_t, LPSOCKADDR, socklen_t)
| Python |
VERSION = '0.1'
| Python |
import sys
import utp.utp_socket as utp
import types
import socket
from cStringIO import StringIO
from zope.interface import implements
from twisted.python import failure, log
from twisted.python.util import unsignedID
from twisted.internet import abstract, main, interfaces, error, base, task
from twisted.internet import address, defer
from twisted.internet.tcp import ECONNRESET
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.protocol import DatagramProtocol
def makeAddr(addr):
return address.IPv4Address('UDP', *(addr + ('INET',)))
def _disconnectSelectable(selectable, why, isRead, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
}):
"""
Utility function for disconnecting a selectable.
Supports half-close notification, isRead should be boolean indicating
whether error resulted from doRead().
"""
f = faildict.get(why.__class__)
if f:
if (isRead and why.__class__ == error.ConnectionDone
and interfaces.IHalfCloseableDescriptor.providedBy(selectable)):
selectable.readConnectionLost(f)
else:
selectable.connectionLost(f)
else:
selectable.connectionLost(failure.Failure(why))
class Connection(abstract.FileDescriptor, utp.Callbacks):
def __init__(self, adapter, utp_socket, reactor):
abstract.FileDescriptor.__init__(self, reactor=reactor)
self.reactor.addUTPConnection(self)
self.adapter = adapter
self.protocol = None
self.utp_socket = utp_socket
self.utp_socket.set_callbacks(self)
self.writeTriggered = False
self.writing = False
self.reading = False
logstr = "Uninitialized"
def logPrefix(self):
"""Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def on_read(self, data):
if self.reading:
assert not hasattr(self, "_readBuffer")
self._readBuffer = data
log.callWithLogger(self, self._doReadOrWrite, "doRead")
def doRead(self):
data = self._readBuffer
del self._readBuffer
self.protocol.dataReceived(data)
def get_rb_size(self):
# TODO: extend producer/consumer interfaces in Twisted to support
# fetching the number of bytes before a pauseProducing would happen.
# Then this number, x, would be used like this: rcvbuf - min(x, rcvbuf)
# (so that: rcvbuf-(rcvbuf-x) == x)
return 0
def on_write(self, count):
d = buffer(self._writeBuffer, 0, count)
self._writeBuffer = buffer(self._writeBuffer, count)
return str(d)
def writeSomeData(self, data):
"""
Write as much as possible of the given data to this UTP connection.
The number of bytes successfully written is returned.
"""
if not hasattr(self, "utp_socket"):
return main.CONNECTION_LOST
assert not hasattr(self, "_writeBuffer")
self._writeBuffer = data
self.utp_socket.write(len(data))
sent = len(data) - len(self._writeBuffer)
del self._writeBuffer
return sent
def on_state(self, state):
if state == utp.CONNECT:
self._connectDone()
elif state == utp.WRITABLE:
if self.writing:
self.triggerWrite()
elif state == utp.EOF:
self.loseConnection()
elif state == utp.DESTROYING:
self.reactor.removeUTPConnection(self)
df = maybeDeferred(self.adapter.removeSocket, self.dying_utp_socket)
df.addCallback(self._finishConnectionLost)
del self.dying_utp_socket
def on_error(self, errcode):
if errcode == ECONNRESET:
err = main.CONNECTION_LOST
else:
err = error.errnoMapping.get(errcode, errcode)
self.connectionLost(failure.Failure(err))
def stopReading(self):
self.reading = False
def stopWriting(self):
self.writing = False
def startReading(self):
self.reading = True
def _doReadOrWrite(self, method):
try:
why = getattr(self, method)()
except:
why = sys.exc_info()[1]
log.err()
if why:
_disconnectSelectable(self, why, method=="doRead")
def triggerWrite(self):
self.writeTriggered = False
log.callWithLogger(self, self._doReadOrWrite, "doWrite")
def startWriting(self):
self.writing = True
# UTP socket write state is edge triggered, so we may or may not be
# writable right now. So, just try it. We use reactor.callLater so
# functions like abstract.FileDescriptor.loseConnection don't start
# doWrite before setting self.disconnecting to True.
if not self.writeTriggered:
self.writeTriggered = True
self.reactor.callLater(0, self.triggerWrite)
# These are here because abstract.FileDescriptor claims to implement
# IHalfCloseableDescriptor, but we can not support IHalfCloseableProtocol
def writeConnectionLost(self, reason):
self.connectionLost(reason)
# These are here because abstract.FileDescriptor claims to implement
# IHalfCloseableDescriptor, but we can not support IHalfCloseableProtocol
def readConnectionLost(self, reason):
self.connectionLost(reason)
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
if hasattr(self, "utp_socket"):
self.reason = reason
self.utp_socket.close()
self.dying_utp_socket = self.utp_socket
del self.utp_socket
self.closing_df = Deferred()
if hasattr(self, "closing_df"):
return self.closing_df
def _finishConnectionLost(self, r):
protocol = self.protocol
reason = self.reason
df = self.closing_df
del self.protocol
del self.reason
del self.closing_df
if protocol:
protocol.connectionLost(reason)
return df.callback(r)
def getPeer(self):
return makeAddr(self.utp_socket.getpeername())
def getHost(self):
return self.adapter.getHost()
class Client(Connection):
def __init__(self, host, port, connector, adapter, reactor=None, soError=None):
# Connection.__init__ is invoked later in doConnect
self.connector = connector
self.addr = (host, port)
self.adapter = adapter
self.reactor = reactor
self.soError = soError
# ack, twisted. what the heck.
self.reactor.callLater(0, self.resolveAddress)
def __repr__(self):
s = '<%s to %s at %x>' % (self.__class__, self.addr, unsignedID(self))
return s
def stopConnecting(self):
"""Stop attempt to connect."""
return self.failIfNotConnected(error.UserError())
def failIfNotConnected(self, err):
"""
Generic method called when the attemps to connect failed. It basically
cleans everything it can: call connectionFailed, stop read and write,
delete socket related members.
"""
if (self.connected or self.disconnected or
not hasattr(self, "connector")):
return
# HM: maybe call loseConnection, maybe make a new function
self.disconnecting = 1
reason = failure.Failure(err)
# we might not have an adapter if there was a bind error
# but we need to notify the adapter if we failed before connecting
stop = (self.adapter and not hasattr(self, "utp_socket"))
df = maybeDeferred(Connection.connectionLost, self, reason)
if stop:
df.addCallback(lambda r: self.adapter.maybeStopUDPPort())
def more(r):
self.connector.connectionFailed(reason)
del self.connector
self.disconnecting = 0
df.addCallback(more)
return df
def resolveAddress(self):
if abstract.isIPAddress(self.addr[0]):
self._setRealAddress(self.addr[0])
else:
d = self.reactor.resolve(self.addr[0])
d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
def _setRealAddress(self, address):
self.realAddress = (address, self.addr[1])
self.doConnect()
def doConnect(self):
"""I connect the socket.
Then, call the protocol's makeConnection, and start waiting for data.
"""
if self.disconnecting or not hasattr(self, "connector"):
# this happens when the connection was stopped but doConnect
# was scheduled via the resolveAddress callLater
return
if self.soError:
self.failIfNotConnected(self.soError)
return
utp_socket = utp.Socket()
utp_socket.init_outgoing(self.adapter.udpPort.write, self.realAddress)
self.adapter.addSocket(utp_socket)
Connection.__init__(self, self.adapter, utp_socket, self.reactor)
utp_socket.connect()
def _connectDone(self):
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = 1
self.logstr = self.protocol.__class__.__name__ + ",client"
self.startReading()
self.protocol.makeConnection(self)
def connectionLost(self, reason):
if not self.connected:
self.failIfNotConnected(error.ConnectError(string=reason))
else:
df = maybeDeferred(Connection.connectionLost, self, reason)
def more(r):
self.connector.connectionLost(reason)
df.addCallback(more)
class Server(Connection):
def __init__(self, utp_socket, protocol, adapter, sessionno, reactor):
Connection.__init__(self, adapter, utp_socket, reactor)
self.protocol = protocol
self.sessionno = sessionno
self.hostname = self.getPeer().host
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__,
sessionno,
self.hostname)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno,
self.adapter.udpPort._realPortNumber)
self.startReading()
self.connected = 1
def __repr__(self):
"""A string representation of this connection.
"""
return self.repstr
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, adapter, timeout, reactor=None):
self.host = host
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error:
e = sys.exc_info()[1]
raise error.ServiceNameUnknownError(string="%s (%r)" % (e, port))
self.port = port
self.adapter = adapter
self.soError = None
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self, self.adapter, self.reactor, self.soError)
def getDestination(self):
return address.IPv4Address('UDP', self.host, self.port, 'INET')
class Protocol(DatagramProtocol):
BUFFERSIZE = 2 * 1024 * 1024
def startProtocol(self):
if interfaces.ISystemHandle.providedBy(self.transport):
sock = self.transport.getHandle()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.BUFFERSIZE)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.BUFFERSIZE)
def datagramReceived(self, data, addr):
if self.adapter.acceptIncoming and self.adapter.listening:
cb = self.adapter.connectionReceived
else:
cb = None
utp.IsIncomingUTP(cb, self.transport.write, data, addr)
class Adapter:
def __init__(self, udpPort, acceptIncoming):
self.udpPort = udpPort
self.acceptIncoming = acceptIncoming
# HORK
udpPort.protocol.adapter = self
self.sockets = set()
def addSocket(self, utp_socket):
if not self.udpPort.connected:
assert not self.acceptIncoming
self.udpPort.startListening()
self.sockets.add(utp_socket)
def removeSocket(self, utp_socket):
self.sockets.remove(utp_socket)
return self.maybeStopUDPPort()
def maybeStopUDPPort(self):
if len(self.sockets) == 0:
assert self.udpPort.connected
return self.udpPort.stopListening()
def getHost(self):
return self.udpPort.getHost()
class Port(Adapter, base.BasePort):
implements(interfaces.IListeningPort)
sessionno = 0
def __init__(self, udpPort, factory, reactor):
self.factory = factory
self.reactor = reactor
Adapter.__init__(self, udpPort, acceptIncoming=True)
self.listening = False
def __repr__(self):
if self.udpPort._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__, self.factory.__class__,
self.udpPort._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__)
def maybeStopUDPPort(self):
if not self.listening:
return Adapter.maybeStopUDPPort(self)
def startListening(self):
if self.listening:
return
self.listening = True
if not self.udpPort.connected:
self.udpPort.startListening()
self.factory.doStart()
def stopListening(self):
if not self.listening:
return
self.listening = False
df = maybeDeferred(self.maybeStopUDPPort)
df.addCallback(lambda r: self._connectionLost())
return df
# this one is for stopListening
# the listening port has closed
def _connectionLost(self, reason=None):
assert not self.listening
base.BasePort.connectionLost(self, reason)
self.factory.doStop()
# this one is for calling directly
# the listening port has closed
def connectionLost(self, reason=None):
self.listening = False
self.udpPort.connectionLost(reason)
self._connectionLost(reason)
# a new incoming connection has arrived
def connectionReceived(self, utp_socket):
self.addSocket(utp_socket)
protocol = self.factory.buildProtocol(makeAddr(utp_socket.getpeername()))
if protocol is None:
# XXX: untested path
Connection(self, utp_socket, self.reactor).loseConnection()
return
s = self.sessionno
self.sessionno = s+1
transport = Server(utp_socket, protocol, self, s, self.reactor)
protocol.makeConnection(transport)
def listenUTP(self, port, factory, interface=''):
udpPort = self.listenUDP(port, Protocol(), interface=interface)
utpPort = Port(udpPort, factory, self)
utpPort.startListening()
return utpPort
def createUTPAdapter(self, port, protocol, interface=''):
udpPort = self.listenUDP(port, protocol, interface=interface)
return Adapter(udpPort, acceptIncoming=False)
def connectUTP(self, host, port, factory, timeout=30, bindAddress=None):
if bindAddress is None:
bindAddress = ['', 0]
adapter = None
try:
adapter = self.createUTPAdapter(bindAddress[1], Protocol(), interface=bindAddress[0])
except error.CannotListenError:
e = sys.exc_info()[1]
c = Connector(host, port, factory, None, timeout, self)
se = e.socketError
# We have to call connect to trigger the factory start and connection
# start events, but we already know the connection failed because the
# UDP socket couldn't bind. So we set soError, which causes the connect
# call to fail.
c.soError = error.ConnectBindError(se[0], se[1])
c.connect()
return c
try:
return self.connectUTPUsingAdapter(host, port, factory, adapter, timeout=timeout)
except:
adapter.maybeStopUDPPort()
raise
def connectUTPUsingAdapter(self, host, port, factory, adapter, timeout=30):
c = Connector(host, port, factory, adapter, timeout, self)
c.connect()
return c
# like addReader/addWriter, sort of
def addUTPConnection(self, connection):
if not hasattr(self, "_utp_task"):
self._utp_connections = set()
self._utp_task = task.LoopingCall(utp.CheckTimeouts)
self._utp_task.start(0.050)
self._utp_connections.add(connection)
# like removeReader/removeWriter, sort of
def removeUTPConnection(self, connection):
self._utp_connections.remove(connection)
if len(self._utp_connections) == 0:
self._utp_task.stop()
del self._utp_task
del self._utp_connections
# Ouch.
from twisted.internet.protocol import ClientCreator, _InstanceFactory
def clientCreatorConnectUTP(self, host, port, timeout=30, bindAddress=None):
"""Connect to remote host, return Deferred of resulting protocol instance."""
d = defer.Deferred()
f = _InstanceFactory(self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
self.reactor.connectUTP(host, port, f, timeout=timeout, bindAddress=bindAddress)
return d
ClientCreator.connectUTP = clientCreatorConnectUTP
# Owwww.
from twisted.internet import reactor
reactor.__class__.listenUTP = listenUTP
reactor.__class__.connectUTP = connectUTP
reactor.__class__.createUTPAdapter = createUTPAdapter
reactor.__class__.connectUTPUsingAdapter = connectUTPUsingAdapter
reactor.__class__.addUTPConnection = addUTPConnection
reactor.__class__.removeUTPConnection = removeUTPConnection
del reactor
| Python |
import ctypes
import socket
import struct
class SOCKADDR(ctypes.Structure):
_fields_ = (
('family', ctypes.c_ushort),
('data', ctypes.c_byte*14),
)
LPSOCKADDR = ctypes.POINTER(SOCKADDR)
class SOCKET_ADDRESS(ctypes.Structure):
_fields_ = (
('address', LPSOCKADDR),
('length', ctypes.c_int),
)
ADDRESS_FAMILY = ctypes.c_ushort
_SS_MAXSIZE = 128
_SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64)
_SS_PAD1SIZE = (_SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ushort))
_SS_PAD2SIZE = (_SS_MAXSIZE - (ctypes.sizeof(ctypes.c_ushort) + _SS_PAD1SIZE + _SS_ALIGNSIZE))
class SOCKADDR_STORAGE(ctypes.Structure):
_fields_ = (
('ss_family', ADDRESS_FAMILY),
('__ss_pad1', ctypes.c_char * _SS_PAD1SIZE),
('__ss_align', ctypes.c_int64),
('__ss_pad2', ctypes.c_char * _SS_PAD2SIZE),
)
LPSOCKADDR_STORAGE = ctypes.POINTER(SOCKADDR_STORAGE)
class IPAddr(ctypes.Structure):
_fields_ = (
("S_addr", ctypes.c_ulong),
)
def __str__(self):
return socket.inet_ntoa(struct.pack("L", self.S_addr))
class in_addr(ctypes.Structure):
_fields_ = (
("s_addr", IPAddr),
)
class in6_addr(ctypes.Structure):
_fields_ = (
("Byte", ctypes.c_ubyte * 16),
)
class sockaddr_in(ctypes.Structure):
_fields_ = (
("sin_family", ADDRESS_FAMILY),
("sin_port", ctypes.c_ushort),
("sin_addr", in_addr),
("szDescription", ctypes.c_char * 8),
)
psockaddr_in = ctypes.POINTER(sockaddr_in)
class sockaddr_in6(ctypes.Structure):
_fields_ = (
("sin6_family", ADDRESS_FAMILY),
("sin6_port", ctypes.c_ushort),
("sin6_flowinfo", ctypes.c_ulong),
("sin6_addr", in6_addr),
("sin6_scope_id", ctypes.c_ulong),
)
psockaddr_in6 = ctypes.POINTER(sockaddr_in6)
socklen_t = ctypes.c_int
def inet_addr(ip):
return IPAddr(struct.unpack("L", socket.inet_aton(ip))[0])
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
#Developed by Florin Nicusor Coada for 216 CR
#Based on the udp_chat_server2.py tutorial from professor Christopher Peters
import socket
HOST = '192.168.1.2' #Defaults to "this machine"
IPORT = 50007 #In
OPORT = 50008 #Out
in_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
in_socket.bind((HOST, IPORT))
out_socket= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#Who's who
#address to name dict
users={}
##send message to all users
def sendAll(data):
print "sending", data,"to",
for i in users:
out_socket.sendto(data,(i,OPORT))
print i
##kick user
def kick(user,address):
global users
temp={}
##reacreate a list of users without the user that is going to be kicked
for i in users:
if users[i]!=user:
temp[i]=users[i]
else:
##if the user is online, send a message telling him that
##he has been kicked
out_socket.sendto("You have been kicked",(i,OPORT))
if temp==users:
##if user not found return the message to admin
out_socket.sendto("User {0} not found".format(user),(address,OPORT))
else :
##recreate the list without the user that was kicked
users=temp
##ban user (works identical with kick, just different messages returned)
def ban(user,address):
global users
temp={}
for i in users:
if users[i]!=user:
temp[i]=users[i]
else:
out_socket.sendto("You have been banned",(i,OPORT))
if temp==users:
out_socket.sendto("User {0} not found".format(user),(address,OPORT))
else :
users=temp
##remove function used to remove users from the list when they quit
def remove(address):
global users
temp={}
##create a list of users without the quitting user
##and then reinitialise the users list
for i in users:
if i!=address:
temp[i]=users[i]
users=temp
##return a list of users using a special format
##used to identify the users online
def getUsers():
userList="ulist:"
for i in users:
userList+=" "+users[i]
userList+=" "
out_socket.sendto(userList,(i,OPORT))
##send message to a certain user
##used by the private message
def sendTo(user,message,address):
data="From "+user+": "+message
k=0
##look for the recieving user
for i in users:
if users[i]==user:
##if found send message
out_socket.sendto(data,(i,OPORT))
k=1
##if user not found return message to sender
if(k==0):
out_socket.sendto("User {0} not found".format(user),(address,OPORT))
while 1:
#step 1 - Get any data
data,address = in_socket.recvfrom(1024)
address=address[0] #Strip to just IP address
##step 2 - check for special messages
if data=="quit":
remove(address) #Make sure clients quit
#break #For debugging only
##kick case
elif data.startswith("/kick"):
user=data[data.find("-")+1:]
kick(user,address)
##ban case
elif data.startswith("/ban"):
user=data[data.find("-")+1:]
ban(user,address)
##update case
elif data.startswith("/update"):
getUsers()
##private message case
elif data.startswith("/pm"):
user=data[data.find("-")+1:data.find(":")]
message=data[data.find(":")+1:]
print user," ",message
sendTo(user,message,address)
##user case
elif data.startswith("user"):
#New user /change of name
# e.g user:james
username=data[data.find(":")+1:]
##change of name
if users.has_key(address):
sendAll("%s is now called %s"%(users[address],username))
##new users joining
else:
sendAll("%s(%s) has joined"%(username,address))
users[address]=username
## general message
else:
sendAll("%s: %s"%(users[address],data))
out_socket.close()
in_socket.close()
| Python |
##Developed by Florin Nicusor Coada for 216CR
##Based on the tutorial offered by Christopher Peters
import sqlite3 as sqlite
import SimpleXMLRPCServer
##connect to the database and create a simple users table
con = sqlite.connect(":memory:")
con.execute("create table users(name varchar primary key, pass varchar, ban integer, admin integer)")
cur=con.cursor()
cur.execute("insert into users values ('Florin','cocouta',0,1)")
cur.execute("insert into users values('Dudu','cainerosu',0,0)")
cur.execute("insert into users values('Coco','spammer',0,0)")
cur.execute("insert into users values('Fraguta','cocotat',0,0)")
cur.execute("insert into users values('q','q',1,0)")
class User:
def __init__(self):
self.users={}
##check to see if login details are correct
def checkUser(self,name,passw):
cur.execute("select * from users where name like '{0}'".format(name))
for item in cur:
if(item[0]==name and item[1]==passw):
if(item[2]==1):
return 2
return 1
return 0
##add another user to the database
def addUser(self,name,passw):
cur.execute("select name from users where name like '{0}'".format(name))
for item in cur:
if(str(item[0]).upper()==name.upper()):
return 0
cur.execute("insert into users values ('{0}','{1}',0,0)".format(name,passw))
return 1
##check to see if user has admin privileges
def userType(self,name):
cur.execute("select admin,name from users where name like '{0}'".format(name))
for item in cur:
if(item[0]==1):
return 1
else :
return 0
##change the name of an existing user
def newName(self,name,newname):
cur.execute("select name from users where name like '{0}'".format(newname))
for item in cur:
if(str(item[0]).upper()==newname.upper()):
return 0
cur.execute("update users set name='{0}' where name like '{1}'".format(newname,name))
return 1
##change the password of an existing user
def newPassword(self,oldpass,newpass,name):
cur.execute("select pass from users where name like '{0}'".format(name))
for item in cur:
if(item[0]==oldpass):
cur.execute("update users set pass='{0}' where name like '{1}'".format(newpass,name))
return 1
return 0
##ban a user
def banUser(self,name):
cur.execute("update users set ban=1 where name like '{0}'".format(name))
return 1
user_obj=User()
server=SimpleXMLRPCServer.SimpleXMLRPCServer(("192.168.1.2", 8888))
server.register_instance(user_obj)
#Go into the main listener loop
server.serve_forever()
| Python |
#!/usr/bin/python
#Developed by Florin Nicusor Coada for 216 CR
#Based on the udp_chat_client2.py tutorial from professor Christopher Peters
from Tkinter import *
import socket,select
from login import *
import sys
HOST = '192.168.1.2' #Server
OPORT = 50007 # The same port as used by the server
IPORT=50008 #Listening port
out_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
in_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
in_socket.bind(('', IPORT))
user=log()
out_socket.sendto("user:%s"%user,(HOST,OPORT))
##GUI interface
class MyMenu:
def __init__(self,parent):
self.mainframe=Frame(parent)
frame=Frame(self.mainframe)
frame.grid(row=0,column=0)
self.user=user
##Area above chat
self.titleLabel=Label(frame,text="Battle Botz",bg="LightGoldenrod2",font=("Helvetica",24))
self.titleLabel.grid(row=1,column=0,columnspan=5,sticky=NSEW)
self.meLabel=Label(frame,text="by Florin N. Coada",bg="LightGoldenrod2")
self.meLabel.grid(row=1,column=5,columnspan=2,sticky=NSEW)
self.createButton=Button(frame,text="Create")
self.createButton.grid(column=2,row=2,sticky=NSEW)
self.joinButton=Button(frame,text="Join",width=5)
self.joinButton.grid(column=3,columnspan=2,row=2,sticky=NSEW)
self.arangeLabel=Label(frame,width=50)
self.arangeLabel.grid(row=2,column=0,columnspan=2)
##Chat related area
self.textArea=Listbox(frame)
self.typeArea=Entry(frame)
self.sendButton=Button(frame,text="Send",command=self.addToChat,background = 'LightGoldenrod2')
self.users=Label(frame,text="Users:",background = 'LightGoldenrod')
self.userList=Listbox(frame,width=20)
self.scrollbar=Scrollbar(frame)
self.option=Button(frame,text="Options",command=self.newDetails)
self.exit=Button(frame,text="Exit",command=self.exitChat)
self.textArea.grid(column=0,columnspan=4,row=3,rowspan=2,sticky=NSEW)
self.typeArea.grid(column=0,columnspan=3,row=5,rowspan=2,sticky=NSEW)
self.sendButton.grid(column=3,columnspan=2,row=5,sticky=NSEW)
self.userList.grid(column=5,row=4,columnspan=2,sticky=NSEW)
self.users.grid(column=5,row=2,rowspan=2,columnspan=2,sticky=NSEW)
self.scrollbar.grid(column=4,row=3,rowspan=2,sticky=NSEW)
self.option.grid(column=5,row=5,sticky=NSEW)
self.exit.grid(column=6,row=5,sticky=NSEW)
self.scrollbar.configure(command=self.textArea.yview)
self.kickState=0;
## set up the main window
self.mainframe.grid()
## set the title
self.mainframe.master.title("Chat box")
def addToChat(self,*ignore):
message=self.typeArea.get()
if(self.kickState==0 and self.checkMsg(message)==1):
out_socket.sendto(message,(HOST,OPORT))
elif (self.kickState==1):
self.textArea.insert(END,"You have been kicked")
self.textArea.yview(END)
elif (self.kickState==2):
self.textArea.insert(END,"You have been banned")
self.textArea.yview(END)
if self.typeArea.get()=="quit":
self.exitChat()
self.typeArea.delete(0,END)
def updateChat(self):
rlist,wlist,elist=select.select([in_socket],[],[],1)
if len(rlist)!=0:
data=in_socket.recv(1024)
if len(data)!=0 and data.startswith("ulist:")==False:
self.textArea.insert(END,data)
if data.startswith("From"):
self.textArea.itemconfig(END,fg="DarkSeaGreen")
if (data=="You have been kicked"):
self.kickState=1;
elif (data=="You have been banned"):
self.kickState=2;
self.textArea.yview(END)
if(self.kickState==0):
self.userUpdate()
main_win.after(1000,self.updateChat)
def userUpdate(self):
out_socket.sendto("/update",(HOST,OPORT))
k=in_socket.recv(1024)
self.userList.delete(0,END)
tag="Admin"
if (k.startswith("ulist:")):
k=k[k.find(":")+1:]
while k!="":
k=k[k.find(" ")+1:]
user=k[:k.find(" ")]
if k!="":
if(userType(user)==1):
tag="Admin"
else:
tag="User"
self.userList.insert(END,user+" - "+tag)
if (tag=="Admin"):
self.userList.itemconfig(END,fg="RED")
def newDetails(self):
data=details(self.user)
if data[1]!="":
out_socket.sendto("user:"+data[1],(HOST,OPORT))
self.user=data[1]
def exitChat(self):
out_socket.sendto("{0} has left".format(self.user),(HOST,OPORT))
out_socket.sendto("/kick -{0}".format(self.user),(HOST,OPORT))
sys.exit()
def checkMsg(self,message):
if message=="":
return 0
elif message.startswith("/kick"):
if(userType(self.user)==1):
return 1
else:
self.textArea.insert(END,"You are not an admin!")
return 0
elif message.startswith("/ban"):
if(userType(self.user)==1):
name=message[message.find("-")+1:]
banUser(name)
return 1
else:
self.textArea.insert(END,"You are not an admin!")
return 0
elif message.startswith("user:"):
name=message[message.find(":")+1:]
if(newName(self.user,name)==1):
out_socket.sendto(message,(HOST,OPORT))
self.user=name
return 1
else :
self.textArea.insert(END,"Name already taken")
return 0
return 1
##new tkinter root
main_win=Tk()
app = MyMenu(main_win)
main_win.bind('<Return>',app.addToChat)
main_win.after(1000,app.updateChat)
main_win.mainloop()
#Close the socket
out_socket.close()
in_socket.close()
| Python |
from Tkinter import *
import xmlrpclib
from time import clock, time
server = xmlrpclib.ServerProxy("http://192.168.1.2:8888")
def userType(user):
return server.userType(user)
def newName(oldUname,newUname):
return server.newName(oldUname,newUname)
def banUser(name):
server.banUser(name)
class MyDetails:
def __init__(self,parent,uname):
self.mainframe=Frame(parent)
frame=Frame(self.mainframe)
frame.grid(row=0,column=0)
self.data={} ##data[1] - new name, [2] - new password
self.data[1]=""
self.data[2]=""
self.done=0
self.uname=uname ##used to search the database fo the old pass
##Title labels
self.changeName=Label(frame,text="Change your name",background = 'LightGoldenrod1')
self.changePassword=Label(frame,text="Change your password",background = 'LightGoldenrod1')
self.changeName.grid(row=0,column=0,columnspan=4,sticky=NSEW)
self.changePassword.grid(row=2,column=0,columnspan=4,sticky=NSEW)
##entries and labels
self.newName=Label(frame,text="New name:")
self.oldPass=Label(frame,text="Old Password:")
self.newPass=Label(frame,text="New Password:")
self.newNameEntry=Entry(frame,width=35)
self.oldPassEntry=Entry(frame,width=35,show="*")
self.newPassEntry=Entry(frame,width=35,show="*")
self.newName.grid(row=1,column=0)
self.newNameEntry.grid(row=1,column=1,columnspan=3)
self.oldPass.grid(row=3,column=0)
self.oldPassEntry.grid(row=3,column=1,columnspan=3)
self.newPass.grid(row=4,column=0)
self.newPassEntry.grid(row=4,column=1,columnspan=3)
self.mainframe.grid()
self.mainframe.master.title("Change details")
##button
self.changeName=Button(frame,text="Change Name",command=self.chName)
self.changePass=Button(frame,text="Change Password",command=self.chPass)
self.changeAll=Button(frame,text="Change all",command=self.chAll)
self.exitBtn=Button(frame,text="Exit",command=self.exitDet)
self.changeName.grid(row=5,column=0,sticky=NSEW)
self.changePass.grid(row=5,column=1,sticky=NSEW)
self.changeAll.grid(row=5,column=2,sticky=NSEW)
self.exitBtn.grid(row=5,column=3,sticky=NSEW)
##warnings label
self.warningLabel=Label(frame,text="")
self.warningLabel.grid(row=6,column=0,columnspan=4,sticky=NSEW)
def exitDet(self):
self.done=1
def chName(self):
if self.newNameEntry.get()!="":
if newName(self.uname,self.newNameEntry.get())==1:
self.data[1]=self.newNameEntry.get()
self.done=1
else:
self.warningLabel.configure(text="Name already in use",background = 'DarkRed')
self.warningLabel.update_idletasks()
self.done=0
def chPass(self):
if self.newPassEntry.get()!="":
if server.newPassword(self.oldPassEntry.get(),self.data[2],self.uname)==1:
self.data[2]=self.newPassEntry.get()
self.done= 1
else:
self.warningLabel.configure(text="Password incorect",background = 'DarkRed')
self.warningLabel.update_idletasks()
self.done=0
def chAll(self,*ignore):
if self.newNameEntry.get()!="" and self.newPassEntry.get()!="":
self.data[2]=self.newPassEntry.get()
server.newPassword(self.oldPassEntry.get(),self.data[2],self.uname)
self.data[1]=self.newNameEntry.get()
server.newName(self.uname,self.data[1])
self.done=1
else:
self.warningLabel.configure(text="Name or password incorect/not inserted",background = 'DarkRed')
self.warningLabel.update_idletasks()
self.done=0
##GUI login interface
class MyLogin:
def __init__(self,parent):
self.mainframe=Frame(parent)
frame=Frame(self.mainframe)
frame.grid(row=0,column=0)
self.UserLabel=Label(frame,text="User:")
self.PassLabel=Label(frame,text="Password:")
self.TextLabel=Label(frame,text="")
self.UserEntry=Entry(frame,width=30)
self.PassEntry=Entry(frame,show="*",width=30)
self.LogIn=Button(frame,text="LogIn",command=self.DoLogIn)
self.Reg=Button(frame,text="Register",command=self.Register)
##Grid set up
self.UserLabel.grid(row=0,column=0,columnspan=2,sticky=NSEW)
self.PassLabel.grid(row=1,column=0,columnspan=2,sticky=NSEW)
self.TextLabel.grid(row=3,column=0,columnspan=3,sticky=NSEW)
self.UserEntry.grid(row=0,column=2,sticky=NSEW)
self.PassEntry.grid(row=1,column=2,sticky=NSEW)
self.LogIn.grid(row=2,column=1,sticky=NSEW)
self.Reg.grid(row=2,column=2,sticky=W)
self.mainframe.grid()
self.mainframe.master.title("LonIn or create account")
##Set confirm state
self.confirm=False
##activate on selecting the log in option
def DoLogIn(self,*ignore):
##check if the details are correct or is banned
##status - {0 - wrong details, 1 - correct details, 2- banned user}
status=server.checkUser(self.UserEntry.get(),self.PassEntry.get())
##if stauts - 1 start the count down
if status==1:
self.logInTimer()
self.confirm=True
return 0
##if status 2 print that the user is banned
if status==2:
self.TextLabel.configure(text="Login failed: You are banned",background = 'DarkRed')
self.TextLabel.update_idletasks()
self.UserEntry.delete(0,END)
self.PassEntry.delete(0,END)
return 0
##if status 0 print that the details are wrong
self.TextLabel.configure(text="Login failed: Wrong user/password",background = 'DarkRed')
self.TextLabel.update_idletasks()
self.UserEntry.delete(0,END)
self.PassEntry.delete(0,END)
self.confirm=False
##register an user
def Register(self):
##check to see if the user exists
##the check is case sensitive so the user CoCo will not be allowed if Coco exists
if (server.addUser(self.UserEntry.get(),self.PassEntry.get())==0):
self.TextLabel.configure(text="Please select a different name",background = 'DarkRed')
self.TextLabel.update_idletasks()
self.confirm=False
return False
##if the username is free print the the account has been created
self.TextLabel.configure(text="Account created, you can now log in",background="LightSeaGreen")
self.TextLabel.update_idletasks()
self.UserEntry.delete(0,END)
self.PassEntry.delete(0,END)
return True
##timer activated if the login details are correct
def logInTimer(self):
now=time()
while int(time()-now)!=3:
self.TextLabel.configure(text="Loging in {0}".format(3-int(time()-now)),background="LightSeaGreen")
self.TextLabel.update_idletasks()
def log():
log_win=Tk()
login=MyLogin(log_win)
log_win.bind("<Return>",login.DoLogIn)
while login.confirm==False:
log_win.update()
user=login.UserEntry.get()
log_win.destroy()
server.newName(user,user)
return (user)
def details(uname):
det_win=Tk()
details=MyDetails(det_win,uname)
det_win.bind("<Return>",details.chAll)
while details.done==0:
det_win.update()
data=details.data
det_win.destroy()
return data
| Python |
# This is Python example on how to use Mongoose embeddable web server,
# http://code.google.com/p/mongoose
#
# Before using the mongoose module, make sure that Mongoose shared library is
# built and present in the current (or system library) directory
import mongoose
import sys
# Handle /show and /form URIs.
def EventHandler(event, conn, info):
if event == mongoose.HTTP_ERROR:
conn.printf('%s', 'HTTP/1.0 200 OK\r\n')
conn.printf('%s', 'Content-Type: text/plain\r\n\r\n')
conn.printf('HTTP error: %d\n', info.status_code)
return True
elif event == mongoose.NEW_REQUEST and info.uri == '/show':
conn.printf('%s', 'HTTP/1.0 200 OK\r\n')
conn.printf('%s', 'Content-Type: text/plain\r\n\r\n')
conn.printf('%s %s\n', info.request_method, info.uri)
if info.request_method == 'POST':
content_len = conn.get_header('Content-Length')
post_data = conn.read(int(content_len))
my_var = conn.get_var(post_data, 'my_var')
else:
my_var = conn.get_var(info.query_string, 'my_var')
conn.printf('my_var: %s\n', my_var or '<not set>')
conn.printf('HEADERS: \n')
for header in info.http_headers[:info.num_headers]:
conn.printf(' %s: %s\n', header.name, header.value)
return True
elif event == mongoose.NEW_REQUEST and info.uri == '/form':
conn.write('HTTP/1.0 200 OK\r\n'
'Content-Type: text/html\r\n\r\n'
'Use GET: <a href="/show?my_var=hello">link</a>'
'<form action="/show" method="POST">'
'Use POST: type text and submit: '
'<input type="text" name="my_var"/>'
'<input type="submit"/>'
'</form>')
return True
elif event == mongoose.NEW_REQUEST and info.uri == '/secret':
conn.send_file('/etc/passwd')
return True
else:
return False
# Create mongoose object, and register '/foo' URI handler
# List of options may be specified in the contructor
server = mongoose.Mongoose(EventHandler,
document_root='/tmp',
listening_ports='8080')
print ('Mongoose started on port %s, press enter to quit'
% server.get_option('listening_ports'))
sys.stdin.read(1)
# Deleting server object stops all serving threads
print 'Stopping server.'
del server
| Python |
# Copyright (c) 2004-2009 Sergey Lyubka
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# $Id: mongoose.py 471 2009-08-30 14:30:21Z valenok $
"""
This module provides python binding for the Mongoose web server.
There are two classes defined:
Connection: - wraps all functions that accept struct mg_connection pointer
as first argument.
Mongoose: wraps all functions that accept struct mg_context pointer as
first argument.
Creating Mongoose object automatically starts server, deleting object
automatically stops it. There is no need to call mg_start() or mg_stop().
"""
import ctypes
import os
NEW_REQUEST = 0
HTTP_ERROR = 1
EVENT_LOG = 2
INIT_SSL = 3
class mg_header(ctypes.Structure):
"""A wrapper for struct mg_header."""
_fields_ = [
('name', ctypes.c_char_p),
('value', ctypes.c_char_p),
]
class mg_request_info(ctypes.Structure):
"""A wrapper for struct mg_request_info."""
_fields_ = [
('user_data', ctypes.c_char_p),
('request_method', ctypes.c_char_p),
('uri', ctypes.c_char_p),
('http_version', ctypes.c_char_p),
('query_string', ctypes.c_char_p),
('remote_user', ctypes.c_char_p),
('log_message', ctypes.c_char_p),
('remote_ip', ctypes.c_long),
('remote_port', ctypes.c_int),
('status_code', ctypes.c_int),
('is_ssl', ctypes.c_int),
('num_headers', ctypes.c_int),
('http_headers', mg_header * 64),
]
mg_callback_t = ctypes.CFUNCTYPE(ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.POINTER(mg_request_info))
class Connection(object):
"""A wrapper class for all functions that take
struct mg_connection * as the first argument."""
def __init__(self, mongoose, connection):
self.m = mongoose
self.conn = ctypes.c_void_p(connection)
def get_header(self, name):
val = self.m.dll.mg_get_header(self.conn, name)
return ctypes.c_char_p(val).value
def get_var(self, data, name):
size = data and len(data) or 0
buf = ctypes.create_string_buffer(size)
n = self.m.dll.mg_get_var(data, size, name, buf, size)
return n >= 0 and buf or None
def printf(self, fmt, *args):
val = self.m.dll.mg_printf(self.conn, fmt, *args)
return ctypes.c_int(val).value
def write(self, data):
val = self.m.dll.mg_write(self.conn, data, len(data))
return ctypes.c_int(val).value
def read(self, size):
buf = ctypes.create_string_buffer(size)
n = self.m.dll.mg_read(self.conn, buf, size)
return n <= 0 and None or buf[:n]
def send_file(self, path):
self.m.dll.mg_send_file(self.conn, path)
class Mongoose(object):
"""A wrapper class for Mongoose shared library."""
def __init__(self, callback, **kwargs):
dll_extension = os.name == 'nt' and 'dll' or 'so'
self.dll = ctypes.CDLL('_mongoose.%s' % dll_extension)
self.dll.mg_start.restype = ctypes.c_void_p
self.dll.mg_modify_passwords_file.restype = ctypes.c_int
self.dll.mg_read.restype = ctypes.c_int
self.dll.mg_write.restype = ctypes.c_int
self.dll.mg_printf.restype = ctypes.c_int
self.dll.mg_get_header.restype = ctypes.c_char_p
self.dll.mg_get_var.restype = ctypes.c_int
self.dll.mg_get_cookie.restype = ctypes.c_int
self.dll.mg_get_option.restype = ctypes.c_char_p
if callback:
# Create a closure that will be called by the shared library.
def func(event, connection, request_info):
# Wrap connection pointer into the connection
# object and call Python callback
conn = Connection(self, connection)
return callback(event, conn, request_info.contents) and 1 or 0
# Convert the closure into C callable object
self.callback = mg_callback_t(func)
self.callback.restype = ctypes.c_char_p
else:
self.callback = ctypes.c_void_p(0)
args = [y for x in kwargs.items() for y in x] + [None]
options = (ctypes.c_char_p * len(args))(*args)
ret = self.dll.mg_start(self.callback, 0, options)
self.ctx = ctypes.c_void_p(ret)
def __del__(self):
"""Destructor, stop Mongoose instance."""
self.dll.mg_stop(self.ctx)
def get_option(self, name):
return self.dll.mg_get_option(self.ctx, name)
| Python |
'''
Created on 07/04/2011
@author: Eran_Z
IMDB functions, based on Alex's implementations
'''
import sys
try:
import imdb
except ImportError:
print 'You need to install the IMDbPY package!'
sys.exit(1)
#a handler/gateway for all IMDB API functions
imdbObj = imdb.IMDb()
def getLinkToMovie(title):
try:
# Do the search, and get the results (a list of Movie objects).
results = imdbObj.search_movie(title)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
for movie in results:
print '%s\t: %s : %s' % (movie.movieID, imdbObj.get_imdbID(movie),
movie['long imdb title'])
imdbURL = imdbObj.get_imdbURL(movie)
if imdbURL:
print 'IMDb URL: %s' % imdbURL
def searchMovie(title):
try:
# Do the search, and get the results (a list of Movie objects).
results = imdbObj.search_movie(title)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
# Print the results.
print ' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
title)
print 'movieID\t: imdbID : title'
# Print the long imdb title for every movie.
for movie in results:
print '%s\t: %s : %s' % (movie.movieID, imdbObj.get_imdbID(movie),
movie['long imdb title'])
####################################################################################
#if we want to check if movie with exact such title exist in the system
print "here all movies that have exact such title as 'title':"
for movie in results:
if (movie['title']==title):
print '%s\t: %s : %s' % (movie.movieID, imdbObj.get_imdbID(movie),
movie['long imdb title'])
def searchPerson(name):
try:
# Do the search, and get the results (a list of Person objects).
results = imdbObj.search_person(name)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
# Print the results.
print ' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name)
print 'personID\t: imdbID : name'
# Print the long imdb name for every person.
for person in results:
print '%s\t: %s : %s' % (person.personID, imdbObj.get_imdbID(person),
person['long imdb name'])
def topNMovies(n):
top250 = imdbObj.get_top250_movies()
ml = top250[:n]
print ''
print 'top 10 movies'
print 'rating\tvotes\ttitle'
for movie in ml:
print '%s\t%s\t%s' % (movie.get('rating'), movie.get('votes'),
movie['long imdb title'])
| Python |
'''
Created on 30/03/2011
@author: Eran_Z
Utilities
'''
def sum(x,y):
return x+y
| Python |
'''
Created on 08/04/2011
@author: Eran_Z
Google search (num results), based on Dima's implementation
currently uses deprecated API
'''
import json
import urllib
#N = 25270000000L #25.27 billion, roughly google's index size. Should be reduced for other engines.
N = 1870000000L #roughly the index of the deprecated API
def showsome(searchfor):
query = urllib.urlencode({'q': searchfor})
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s'%query
search_response = urllib.urlopen(url)
search_results = search_response.read()
results = json.loads(search_results)
data = results['responseData']
ret = data['cursor']['estimatedResultCount']
return long(ret)
| Python |
'''
Created on 29/03/2011
@author: Eran_Z
Weighting
'''
import search_m
from util_m import sum
from math import log
#Helper functions
def __singleNGDWeight(term1, term2):
return 0 if term1 == term2 else max(0, 1 - search_m.NGD(term1, term2))
def __singleMutualInformationWeight(term1, term2):
return 0 if term1 == term2 else search_m.searchTogether(term1, term2)*1.0/(search_m.searchSingle(term1)*search_m.searchSingle(term2))
def __pij(ci, wj, hi):
return search_m.searchTogether(ci, wj)*1.0/hi
def __plogp(ci, wj, hi):
p = __pij(ci, wj, hi)
return p * log(p, 2)
#Main functions
def uniformWeighter(context, world):
return [1]*len(context)
def NGDWeighter(context, world):
#TODO: test
return map(lambda ci: reduce(sum, map(lambda cj: __singleNGDWeight(ci, cj), context)), context)
def mutualInformationWeighter(context, world):
#TODO: test
return map(lambda ci: reduce(sum, map(lambda cj: __singleMutualInformationWeight(ci, cj), context)), context)
def entropyWeighter(context, world):
#h[i] = sigma(j=1..n) #(ci, wj)
h = map(lambda c: reduce(sum, map(lambda w: search_m.searchTogether(c, w), world)), context)
H = map(lambda i: -reduce(sum, map(lambda w: __plogp(context[i], w, h[i]), world)), range(len(context)))
sigma_H = reduce(sum, H)
return map(lambda i: 1-(H[i]*1.0/sigma_H), range(len(context)))
def regularSupervisedWeighter(context, world):
#TODO: stub
pass
def normalizedSupervisedWeighter(context, world):
#TODO: stub
pass
weightingAlgorithms = {"Uniform": uniformWeighter, "NGD": NGDWeighter, "Mutual Information": mutualInformationWeighter,
"Entropy": entropyWeighter, "Regular Supervised": regularSupervisedWeighter,
"Normalized Supervised": normalizedSupervisedWeighter }
| Python |
import json
import urllib
def showsome(searchfor):
query = urllib.urlencode({'q': searchfor})
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s'%query
search_response = urllib.urlopen(url)
search_results = search_response.read()
results = json.loads(search_results)
data = results['responseData']
ret = data['cursor']['estimatedResultCount']
return ret
val = showsome('google')
print val
| Python |
from apiclient.discovery import build
def search(searchfor):
service = build("customsearch", "v1",
developerKey="AIzaSyB1KoWaQxP9_o--plv19-JYDevfdhKFzjs")
res = service.cse().list(
q=searchfor,
cx='017576662512468239146:omuauf_lfve',
).execute()
ret = res['queries']['request'][0]['totalResults']
return ret
# instead google you can write string to search for
val = search('google')
# val is returned value from search
print val
| Python |
#!/usr/bin/env python
import glob
import logging
import os
import sys
import unittest
from trace import fullmodname
# Conditional import of cleanup function
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# Ensure current working directory is in path
sys.path.insert(0, os.getcwd())
def build_suite(folder, verbosity):
# find all of the test modules
top_level_modules = map(fullmodname, glob.glob(os.path.join(folder, 'test_*.py')))
# TODO(ade) Verify that this works on Windows. If it doesn't then switch to os.walk instead
lower_level_modules = map(fullmodname, glob.glob(os.path.join(folder, '*/test_*.py')))
modules = top_level_modules + lower_level_modules
if verbosity > 0:
print "Running the tests found in the following modules:"
print modules
# load all of the tests into a suite
try:
return unittest.TestLoader().loadTestsFromNames(modules)
except Exception, exception:
# attempt to produce a more specific message
for module in modules:
__import__(module)
raise
def run(test_folder_name, verbosity, exit_on_failure):
# Build and run the tests in test_folder_name
tests = build_suite(test_folder_name, verbosity)
result = unittest.TextTestRunner(verbosity=verbosity).run(tests)
if exit_on_failure and not result.wasSuccessful():
sys.exit(1)
cleanup()
def main():
if '--help' in sys.argv:
print 'Usage: python runtests.py [-q|--quiet|-v|--verbose] [--exit_on_failure] [tests|functional_tests|contrib_tests]'
return
verbosity = 1
exit_on_failure = '--exit_on_failure' in sys.argv
if '-q' in sys.argv or '--quiet' in sys.argv:
verbosity = 0
if "-v" in sys.argv or '--verbose' in sys.argv:
verbosity = 2
if verbosity == 0:
logging.disable(logging.CRITICAL)
elif verbosity == 1:
logging.disable(logging.ERROR)
elif verbosity == 2:
logging.basicConfig(level=logging.DEBUG)
# Allow user to run a specific folder of tests
if 'tests' in sys.argv:
run('tests', verbosity, exit_on_failure)
elif 'functional_tests' in sys.argv:
run('functional_tests', verbosity, exit_on_failure)
elif 'contrib_tests' in sys.argv:
run('contrib_tests', verbosity, exit_on_failure)
else:
run('tests', verbosity, exit_on_failure)
run('functional_tests', verbosity, exit_on_failure)
run('contrib_tests', verbosity, exit_on_failure)
if __name__ == '__main__':
main()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Google API Python client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
import setup_utils
has_setuptools = False
try:
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
packages = [
'apiclient',
'oauth2client',
'apiclient.ext',
'apiclient.contrib',
'apiclient.contrib.buzz',
'apiclient.contrib.latitude',
'apiclient.contrib.moderator',
'uritemplate',
]
install_requires = []
py_modules = []
# (module to test for, install_requires to add if missing, packages to add if missing, py_modules to add if missing)
REQUIREMENTS = [
('httplib2', 'httplib2', 'httplib2', None),
('oauth2', 'oauth2', 'oauth2', None),
('gflags', 'python-gflags', None, ['gflags', 'gflags_validators']),
(['json', 'simplejson', 'django.utils'], 'simplejson', 'simplejson', None)
]
for import_name, requires, package, modules in REQUIREMENTS:
if setup_utils.is_missing(import_name):
if has_setuptools:
install_requires.append(requires)
else:
if package is not None:
packages.append(package)
else:
py_modules.extend(modules)
long_desc = """The Google API Client for Python is a client library for
accessing the Buzz, Moderator, and Latitude APIs."""
setup(name="google-api-python-client",
version="1.0alpha11",
description="Google API Client Library for Python",
long_description=long_desc,
author="Joe Gregorio",
author_email="jcgregorio@google.com",
url="http://code.google.com/p/google-api-python-client/",
install_requires=install_requires,
packages=packages,
py_modules=py_modules,
package_data={
'apiclient': ['contrib/*/*.json']
},
license="Apache 2.0",
keywords="google api client",
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
self.method = method
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(self, True)
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(v)
url = (base_url.scheme, base_url.netloc, base_url.path, base_url.params,
urllib.urlencode(query, True), base_url.fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if hasattr(value, '__iter__'):
items.extend((key, item) for item in value)
else:
items.append((key, value))
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
items.extend(self._split_url_string(query).items())
encoded_str = urllib.urlencode(sorted(items))
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
is_multipart = method == 'POST' and headers.get('Content-Type',
DEFAULT_CONTENT_TYPE) != DEFAULT_CONTENT_TYPE
if body and method == "POST" and not is_multipart:
parameters = dict(parse_qsl(body))
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_CONTENT_TYPE)
if is_multipart:
headers.update(req.to_header())
else:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
from hashlib import sha1 as sha
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import imaplib
class IMAP4_SSL(imaplib.IMAP4_SSL):
"""IMAP wrapper for imaplib.IMAP4_SSL that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
imaplib.IMAP4_SSL.authenticate(self, 'XOAUTH',
lambda x: oauth2.build_xoauth_string(url, consumer, token))
| Python |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['run']
import BaseHTTPServer
import gflags
import logging
import socket
import sys
from optparse import OptionParser
from client import FlowExchangeError
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('auth_local_webserver', True,
('Run a local web server to handle redirects during '
'OAuth authorization.'))
gflags.DEFINE_string('auth_host_name', 'localhost',
('Host name to use when running a local web server to '
'handle redirects during OAuth authorization.'))
gflags.DEFINE_multi_int('auth_host_port', [8080, 8090],
('Port to use when running a local web server to '
'handle redirects during OAuth authorization.'))
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(s):
"""Handle a GET request
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
query = s.path.split('?', 1)[-1]
query = dict(parse_qsl(query))
s.server.query_params = query
s.wfile.write("<html><head><title>Authentication Status</title></head>")
s.wfile.write("<body><p>The authentication flow has completed.</p>")
s.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
pass
def run(flow, storage):
"""Core code for a command-line application.
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
Returns:
Credentials, the obtained credential.
"""
if FLAGS.auth_local_webserver:
success = False
port_number = 0
for port in FLAGS.auth_host_port:
port_number = port
try:
httpd = BaseHTTPServer.HTTPServer((FLAGS.auth_host_name, port),
ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
FLAGS.auth_local_webserver = success
if FLAGS.auth_local_webserver:
oauth_callback = 'http://%s:%s/' % (FLAGS.auth_host_name, port_number)
else:
oauth_callback = 'oob'
authorize_url = flow.step1_get_authorize_url(oauth_callback)
print 'Go to the following link in your browser:'
print authorize_url
print
if FLAGS.auth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credentials = flow.step2_exchange(code)
except FlowExchangeError:
sys.exit('The authentication has failed.')
storage.put(credentials)
credentials.set_store(storage.put)
print "You have successfully authenticated."
return credentials
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(name='oauth2client',
version='1.0beta1',
description='Google OAuth 2.0 Client Libary for Python',
license='Apache 2.0',
author='Google Inc.',
packages = ['oauth2client'],
package_dir = {'': '..'},
author_email='jcgregorio@google.com',
url='http://code.google.com/p/google-api-python-client',
)
| Python |
# Copyright 2010 Google Inc. All Rights Reserved.
"""An OAuth 2.0 client
Tools for interacting with OAuth 2.0 protected
resources.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import datetime
import httplib2
import logging
import urllib
import urlparse
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class FlowExchangeError(Error):
"""Error trying to exchange an authorization grant for an access token."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to refresh an expired access token."""
pass
class AccessTokenCredentialsError(Error):
"""Having only the access_token means no refresh is possible."""
pass
def _abstract():
raise NotImplementedError('You need to override this function')
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
oauth2client.client.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuth2Credentials(Credentials):
"""Credentials object for OAuth 2.0
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, access_token, client_id, client_secret, refresh_token,
token_expiry, token_uri, user_agent):
"""Create an instance of OAuth2Credentials
This constructor is not usually called by the user, instead
OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
Args:
token_uri: string, URI of token endpoint.
client_id: string, client identifier.
client_secret: string, client secret.
access_token: string, access token.
token_expiry: datetime, when the access_token expires.
refresh_token: string, refresh token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.refresh_token = refresh_token
self.store = None
self.token_expiry = token_expiry
self.token_uri = token_uri
self.user_agent = user_agent
# True if the credentials have been revoked or expired and can't be
# refreshed.
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, '_invalid', False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has expired and been refreshed.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled.
"""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled.
"""
self.__dict__.update(state)
self.store = None
def _refresh(self, http_request):
"""Refresh the access_token using the refresh_token.
Args:
http: An instance of httplib2.Http.request
or something that acts like it.
"""
body = urllib.urlencode({
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token' : self.refresh_token
})
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
logging.info("Refresing access_token")
resp, content = http_request(
self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if loads fails?
d = simplejson.loads(content)
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(
seconds = int(d['expires_in'])) + datetime.datetime.now()
else:
self.token_expiry = None
if self.store is not None:
self.store(self)
else:
# An {'error':...} response body means the token is expired or revoked, so
# we flag the credentials as such.
logging.error('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
self._invalid = True
if self.store is not None:
self.store(self)
else:
logging.warning("Unable to store refreshed credentials, no Storage provided.")
except:
pass
raise AccessTokenRefreshError(error_msg)
def authorize(self, http):
"""Authorize an httplib2.Http instance with these credentials.
Args:
http: An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
if headers == None:
headers = {}
headers['authorization'] = 'OAuth ' + self.access_token
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
if resp.status == 401:
logging.info("Refreshing because we got a 401")
self._refresh(request_orig)
headers['authorization'] = 'OAuth ' + self.access_token
return request_orig(uri, method, body, headers,
redirections, connection_type)
else:
return (resp, content)
http.request = new_request
return http
class AccessTokenCredentials(OAuth2Credentials):
"""Credentials object for OAuth 2.0
Credentials can be applied to an httplib2.Http object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token. This set of credentials is for the use case where you have
acquired an OAuth 2.0 access_token from another place such as a JavaScript
client or another web application, and wish to use it from Python. Because
only the access_token is present it can not be refreshed and will in time
expire.
AccessTokenCredentials objects may be safely pickled and unpickled.
Usage:
credentials = AccessTokenCredentials('<an access token>',
'my-user-agent/1.0')
http = httplib2.Http()
http = credentials.authorize(http)
Exceptions:
AccessTokenCredentialsExpired: raised when the access_token expires or is
revoked.
"""
def __init__(self, access_token, user_agent):
"""Create an instance of OAuth2Credentials
This is one of the few types if Credentials that you should contrust,
Credentials objects are usually instantiated by a Flow.
Args:
access_token: string, access token.
user_agent: string, The HTTP User-Agent to provide for this application.
Notes:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
"""
super(AccessTokenCredentials, self).__init__(
access_token,
None,
None,
None,
None,
None,
user_agent)
def _refresh(self, http_request):
raise AccessTokenCredentialsError(
"The access_token is expired or invalid and can't be refreshed.")
class OAuth2WebServerFlow(Flow):
"""Does the Web Server Flow for OAuth 2.0.
OAuth2Credentials objects may be safely pickled and unpickled.
"""
def __init__(self, client_id, client_secret, scope, user_agent,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
**kwargs):
"""Constructor for OAuth2WebServerFlow
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string, scope of the credentials being requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
**kwargs: dict, The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.params = kwargs
self.redirect_uri = None
def step1_get_authorize_url(self, redirect_uri='oob'):
"""Returns a URI to redirect to the provider.
Args:
redirect_uri: string, Either the string 'oob' for a non-web-based
application, or a URI that handles the callback from
the authorization server.
If redirect_uri is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
self.redirect_uri = redirect_uri
query = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': self.scope,
}
query.update(self.params)
parts = list(urlparse.urlparse(self.auth_uri))
query.update(dict(parse_qsl(parts[4]))) # 4 is the index of the query part
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def step2_exchange(self, code, http=None):
"""Exhanges a code for OAuth2Credentials.
Args:
code: string or dict, either the code as a string, or a dictionary
of the query parameters to the redirect_uri, which contains
the code.
http: httplib2.Http, optional http instance to use to do the fetch
"""
if not (isinstance(code, str) or isinstance(code, unicode)):
code = code['code']
body = urllib.urlencode({
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
'scope': self.scope
})
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
if http is None:
http = httplib2.Http()
resp, content = http.request(self.token_uri, method='POST', body=body, headers=headers)
if resp.status == 200:
# TODO(jcgregorio) Raise an error if simplejson.loads fails?
d = simplejson.loads(content)
access_token = d['access_token']
refresh_token = d.get('refresh_token', None)
token_expiry = None
if 'expires_in' in d:
token_expiry = datetime.datetime.now() + datetime.timedelta(seconds = int(d['expires_in']))
logging.info('Successfully retrieved access token: %s' % content)
return OAuth2Credentials(access_token, self.client_id, self.client_secret,
refresh_token, token_expiry, self.token_uri,
self.user_agent)
else:
logging.error('Failed to retrieve access token: %s' % content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = simplejson.loads(content)
if 'error' in d:
error_msg = d['error']
except:
pass
raise FlowExchangeError(error_msg)
| Python |
# Copyright 2010 Google Inc. All Rights Reserved.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 2.0
credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from client import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| Python |
# Copyright 2010 Google Inc. All Rights Reserved.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if not value:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from client import Credentials
from client import Flow
from client import Storage
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Credentials):
raise BadValueError('Property %s must be convertible '
'to an Credentials instance (%s)' %
(self.name, value))
return super(CredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
entity = self._model.get_or_insert(self._key_name)
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for setup.py file(s)."""
__author__ = 'tom.h.miller@gmail.com (Tom Miller)'
import sys
def is_missing(packages):
"""Return True if a package can't be imported."""
retval = True
sys_path_original = sys.path[:]
# Remove the current directory from the list of paths to check when
# importing modules.
try:
# Sometimes it's represented by an empty string?
sys.path.remove('')
except ValueError:
import os.path
try:
sys.path.remove(os.path.abspath(os.path.curdir))
except ValueError:
pass
if not isinstance(packages, type([])):
packages = [packages]
for name in packages:
try:
__import__(name)
retval = False
except ImportError:
retval = True
if retval == False:
break
sys.path = sys_path_original
return retval
| Python |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON Model tests
Unit tests for the JSON model.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import gflags
import os
import unittest
import httplib2
import apiclient.model
from apiclient.anyjson import simplejson
from apiclient.errors import HttpError
from apiclient.model import JsonModel
from apiclient.model import LoggingJsonModel
FLAGS = gflags.FLAGS
# Python 2.5 requires different modules
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
class Model(unittest.TestCase):
def test_json_no_body(self):
model = JsonModel(data_wrapper=False)
headers = {}
path_params = {}
query_params = {}
body = None
headers, params, query, body = model.request(headers, path_params, query_params, body)
self.assertEqual(headers['accept'], 'application/json')
self.assertTrue('content-type' not in headers)
self.assertNotEqual(query, '')
self.assertEqual(body, None)
def test_json_body(self):
model = JsonModel(data_wrapper=False)
headers = {}
path_params = {}
query_params = {}
body = {}
headers, params, query, body = model.request(headers, path_params, query_params, body)
self.assertEqual(headers['accept'], 'application/json')
self.assertEqual(headers['content-type'], 'application/json')
self.assertNotEqual(query, '')
self.assertEqual(body, '{}')
def test_json_body_data_wrapper(self):
model = JsonModel(data_wrapper=True)
headers = {}
path_params = {}
query_params = {}
body = {}
headers, params, query, body = model.request(headers, path_params, query_params, body)
self.assertEqual(headers['accept'], 'application/json')
self.assertEqual(headers['content-type'], 'application/json')
self.assertNotEqual(query, '')
self.assertEqual(body, '{"data": {}}')
def test_json_body_default_data(self):
"""Test that a 'data' wrapper doesn't get added if one is already present."""
model = JsonModel(data_wrapper=True)
headers = {}
path_params = {}
query_params = {}
body = {'data': 'foo'}
headers, params, query, body = model.request(headers, path_params, query_params, body)
self.assertEqual(headers['accept'], 'application/json')
self.assertEqual(headers['content-type'], 'application/json')
self.assertNotEqual(query, '')
self.assertEqual(body, '{"data": "foo"}')
def test_json_build_query(self):
model = JsonModel(data_wrapper=False)
headers = {}
path_params = {}
query_params = {'foo': 1, 'bar': u'\N{COMET}',
'baz': ['fe', 'fi', 'fo', 'fum'], # Repeated parameters
'qux': []}
body = {}
headers, params, query, body = model.request(headers, path_params, query_params, body)
self.assertEqual(headers['accept'], 'application/json')
self.assertEqual(headers['content-type'], 'application/json')
query_dict = parse_qs(query[1:])
self.assertEqual(query_dict['foo'], ['1'])
self.assertEqual(query_dict['bar'], [u'\N{COMET}'.encode('utf-8')])
self.assertEqual(query_dict['baz'], ['fe', 'fi', 'fo', 'fum'])
self.assertTrue('qux' not in query_dict)
self.assertEqual(body, '{}')
def test_user_agent(self):
model = JsonModel(data_wrapper=False)
headers = {'user-agent': 'my-test-app/1.23.4'}
path_params = {}
query_params = {}
body = {}
headers, params, query, body = model.request(headers, path_params, query_params, body)
self.assertEqual(headers['user-agent'], 'my-test-app/1.23.4 google-api-python-client/1.0')
def test_bad_response(self):
model = JsonModel(data_wrapper=False)
resp = httplib2.Response({'status': '401'})
resp.reason = 'Unauthorized'
content = '{"error": {"message": "not authorized"}}'
try:
content = model.response(resp, content)
self.fail('Should have thrown an exception')
except HttpError, e:
self.assertTrue('Unauthorized' in str(e))
resp['content-type'] = 'application/json'
try:
content = model.response(resp, content)
self.fail('Should have thrown an exception')
except HttpError, e:
self.assertTrue('not authorized' in str(e))
def test_good_response(self):
model = JsonModel(data_wrapper=True)
resp = httplib2.Response({'status': '200'})
resp.reason = 'OK'
content = '{"data": "is good"}'
content = model.response(resp, content)
self.assertEqual(content, 'is good')
def test_good_response_wo_data(self):
model = JsonModel(data_wrapper=False)
resp = httplib2.Response({'status': '200'})
resp.reason = 'OK'
content = '{"foo": "is good"}'
content = model.response(resp, content)
self.assertEqual(content, {'foo': 'is good'})
def test_good_response_wo_data_str(self):
model = JsonModel(data_wrapper=False)
resp = httplib2.Response({'status': '200'})
resp.reason = 'OK'
content = '"data goes here"'
content = model.response(resp, content)
self.assertEqual(content, 'data goes here')
class LoggingModel(unittest.TestCase):
def test_logging_json_model(self):
class MockLogging(object):
def __init__(self):
self.info_record = []
self.debug_record = []
def info(self, message, *args):
self.info_record.append(message % args)
def debug(self, message, *args):
self.debug_record.append(message % args)
class MockResponse(dict):
def __init__(self, items):
super(MockResponse, self).__init__()
self.status = items['status']
for key, value in items.iteritems():
self[key] = value
apiclient.model.logging = MockLogging()
apiclient.model.FLAGS = copy.deepcopy(FLAGS)
apiclient.model.FLAGS.dump_request_response = True
model = LoggingJsonModel()
request_body = {
'field1': 'value1',
'field2': 'value2'
}
body_string = model.request({}, {}, {}, request_body)[-1]
json_body = simplejson.loads(body_string)
self.assertEqual(request_body, json_body)
response = {'status': 200,
'response_field_1': 'response_value_1',
'response_field_2': 'response_value_2'}
response_body = model.response(MockResponse(response), body_string)
self.assertEqual(request_body, response_body)
self.assertEqual(apiclient.model.logging.info_record[:2],
['--request-start--',
'-headers-start-'])
self.assertTrue('response_field_1: response_value_1' in
apiclient.model.logging.info_record)
self.assertTrue('response_field_2: response_value_2' in
apiclient.model.logging.info_record)
self.assertEqual(simplejson.loads(apiclient.model.logging.info_record[-2]),
request_body)
self.assertEqual(apiclient.model.logging.info_record[-1],
'--response-end--')
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock tests
Unit tests for the Mocks.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import os
import unittest
from apiclient.errors import HttpError
from apiclient.discovery import build
from apiclient.http import RequestMockBuilder
from apiclient.http import HttpMock
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def datafile(filename):
return os.path.join(DATA_DIR, filename)
class Mocks(unittest.TestCase):
def setUp(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
def test_default_response(self):
requestBuilder = RequestMockBuilder({})
buzz = build('buzz', 'v1', http=self.http, requestBuilder=requestBuilder)
activity = buzz.activities().get(postId='tag:blah', userId='@me').execute()
self.assertEqual({}, activity)
def test_simple_response(self):
requestBuilder = RequestMockBuilder({
'chili.activities.get': (None, '{"data": {"foo": "bar"}}')
})
buzz = build('buzz', 'v1', http=self.http, requestBuilder=requestBuilder)
activity = buzz.activities().get(postId='tag:blah', userId='@me').execute()
self.assertEqual({"foo": "bar"}, activity)
def test_errors(self):
errorResponse = httplib2.Response({'status': 500, 'reason': 'Server Error'})
requestBuilder = RequestMockBuilder({
'chili.activities.list': (errorResponse, '{}')
})
buzz = build('buzz', 'v1', http=self.http, requestBuilder=requestBuilder)
try:
activity = buzz.activities().list(scope='@self', userId='@me').execute()
self.fail('An exception should have been thrown')
except HttpError, e:
self.assertEqual('{}', e.content)
self.assertEqual(500, e.resp.status)
self.assertEqual('Server Error', e.resp.reason)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discovery document tests
Unit tests for objects created from discovery documents.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import unittest
import urlparse
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from apiclient.http import HttpMockSequence
from oauth2client.client import AccessTokenCredentials
from oauth2client.client import AccessTokenCredentialsError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import FlowExchangeError
from oauth2client.client import OAuth2Credentials
from oauth2client.client import OAuth2WebServerFlow
class OAuth2CredentialsTests(unittest.TestCase):
def setUp(self):
access_token = "foo"
client_id = "some_client_id"
client_secret = "cOuDdkfjxxnv+"
refresh_token = "1/0/a.df219fjls0"
token_expiry = "ignored"
token_uri = "https://www.google.com/accounts/o8/oauth2/token"
user_agent = "refresh_checker/1.0"
self.credentials = OAuth2Credentials(
access_token, client_id, client_secret,
refresh_token, token_expiry, token_uri,
user_agent)
def test_token_refresh_success(self):
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
http = self.credentials.authorize(http)
resp, content = http.request("http://example.com")
self.assertEqual(content['authorization'], 'OAuth 1/3w')
def test_token_refresh_failure(self):
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '400'}, '{"error":"access_denied"}'),
])
http = self.credentials.authorize(http)
try:
http.request("http://example.com")
self.fail("should raise AccessTokenRefreshError exception")
except AccessTokenRefreshError:
pass
def test_non_401_error_response(self):
http = HttpMockSequence([
({'status': '400'}, ''),
])
http = self.credentials.authorize(http)
resp, content = http.request("http://example.com")
self.assertEqual(400, resp.status)
class AccessTokenCredentialsTests(unittest.TestCase):
def setUp(self):
access_token = "foo"
user_agent = "refresh_checker/1.0"
self.credentials = AccessTokenCredentials(access_token, user_agent)
def test_token_refresh_success(self):
http = HttpMockSequence([
({'status': '401'}, ''),
])
http = self.credentials.authorize(http)
try:
resp, content = http.request("http://example.com")
self.fail("should throw exception if token expires")
except AccessTokenCredentialsError:
pass
except Exception:
self.fail("should only throw AccessTokenCredentialsError")
def test_non_401_error_response(self):
http = HttpMockSequence([
({'status': '400'}, ''),
])
http = self.credentials.authorize(http)
resp, content = http.request("http://example.com")
self.assertEqual(400, resp.status)
class OAuth2WebServerFlowTest(unittest.TestCase):
def setUp(self):
self.flow = OAuth2WebServerFlow(
client_id='client_id+1',
client_secret='secret+1',
scope='foo',
user_agent='unittest-sample/1.0',
)
def test_construct_authorize_url(self):
authorize_url = self.flow.step1_get_authorize_url('oob')
parsed = urlparse.urlparse(authorize_url)
q = parse_qs(parsed[4])
self.assertEqual(q['client_id'][0], 'client_id+1')
self.assertEqual(q['response_type'][0], 'code')
self.assertEqual(q['scope'][0], 'foo')
self.assertEqual(q['redirect_uri'][0], 'oob')
def test_exchange_failure(self):
http = HttpMockSequence([
({'status': '400'}, '{"error":"invalid_request"}')
])
try:
credentials = self.flow.step2_exchange('some random code', http)
self.fail("should raise exception if exchange doesn't get 200")
except FlowExchangeError:
pass
def test_exchange_success(self):
http = HttpMockSequence([
({'status': '200'},
"""{ "access_token":"SlAV32hkKG",
"expires_in":3600,
"refresh_token":"8xLOxBtZp8" }"""),
])
credentials = self.flow.step2_exchange('some random code', http)
self.assertEqual(credentials.access_token, 'SlAV32hkKG')
self.assertNotEqual(credentials.token_expiry, None)
self.assertEqual(credentials.refresh_token, '8xLOxBtZp8')
def test_exchange_no_expires_in(self):
http = HttpMockSequence([
({'status': '200'}, """{ "access_token":"SlAV32hkKG",
"refresh_token":"8xLOxBtZp8" }"""),
])
credentials = self.flow.step2_exchange('some random code', http)
self.assertEqual(credentials.token_expiry, None)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for errors handling
"""
__author__ = 'afshar@google.com (Ali Afshar)'
import unittest
import httplib2
from apiclient.errors import HttpError
JSON_ERROR_CONTENT = """
{
"error": {
"errors": [
{
"domain": "global",
"reason": "required",
"message": "country is required",
"locationType": "parameter",
"location": "country"
}
],
"code": 400,
"message": "country is required"
}
}
"""
def fake_response(data, headers):
return httplib2.Response(headers), data
class Error(unittest.TestCase):
"""Test handling of error bodies."""
def test_json_body(self):
"""Test a nicely formed, expected error response."""
resp, content = fake_response(JSON_ERROR_CONTENT,
{'status':'400', 'content-type': 'application/json'})
error = HttpError(resp, content)
self.assertEqual(str(error), '<HttpError 400 "country is required">')
def test_bad_json_body(self):
"""Test handling of bodies with invalid json."""
resp, content = fake_response('{',
{'status':'400', 'content-type': 'application/json'})
error = HttpError(resp, content)
self.assertEqual(str(error), '<HttpError 400 "{">')
def test_with_uri(self):
"""Test handling of passing in the request uri."""
resp, content = fake_response('{',
{'status':'400', 'content-type': 'application/json'})
error = HttpError(resp, content, 'http://example.org')
self.assertEqual(str(error), '<HttpError 400 when requesting http://example.org returned "{">')
def test_missing_message_json_body(self):
"""Test handling of bodies with missing expected 'message' element."""
resp, content = fake_response('{}',
{'status':'400', 'content-type': 'application/json'})
error = HttpError(resp, content)
self.assertEqual(str(error), '<HttpError 400 "{}">')
def test_non_json(self):
"""Test handling of non-JSON bodies"""
resp, content = fake_response('NOT OK', {'status':'400'})
error = HttpError(resp, content)
self.assertEqual(str(error), '<HttpError 400 "Ok">')
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discovery document tests
Unit tests for objects created from discovery documents.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import os
import unittest
import urlparse
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
from apiclient.discovery import build, key2param
from apiclient.http import HttpMock
from apiclient.http import tunnel_patch
from apiclient.http import HttpMockSequence
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def datafile(filename):
return os.path.join(DATA_DIR, filename)
class Utilities(unittest.TestCase):
def test_key2param(self):
self.assertEqual('max_results', key2param('max-results'))
self.assertEqual('x007_bond', key2param('007-bond'))
class DiscoveryErrors(unittest.TestCase):
def test_failed_to_parse_discovery_json(self):
self.http = HttpMock(datafile('malformed.json'), {'status': '200'})
try:
buzz = build('buzz', 'v1', self.http)
self.fail("should have raised an exception over malformed JSON.")
except InvalidJsonError:
pass
class Discovery(unittest.TestCase):
def test_method_error_checking(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
buzz = build('buzz', 'v1', self.http)
# Missing required parameters
try:
buzz.activities().list()
self.fail()
except TypeError, e:
self.assertTrue('Missing' in str(e))
# Parameter doesn't match regex
try:
buzz.activities().list(scope='@myself', userId='me')
self.fail()
except TypeError, e:
self.assertTrue('not an allowed value' in str(e))
# Parameter doesn't match regex
try:
buzz.activities().list(scope='not@', userId='foo')
self.fail()
except TypeError, e:
self.assertTrue('not an allowed value' in str(e))
# Unexpected parameter
try:
buzz.activities().list(flubber=12)
self.fail()
except TypeError, e:
self.assertTrue('unexpected' in str(e))
def _check_query_types(self, request):
parsed = urlparse.urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q['q'], ['foo'])
self.assertEqual(q['i'], ['1'])
self.assertEqual(q['n'], ['1.0'])
self.assertEqual(q['b'], ['false'])
self.assertEqual(q['a'], ['[1, 2, 3]'])
self.assertEqual(q['o'], ['{\'a\': 1}'])
self.assertEqual(q['e'], ['bar'])
def test_type_coercion(self):
http = HttpMock(datafile('zoo.json'), {'status': '200'})
zoo = build('zoo', 'v1', http)
request = zoo.query(q="foo", i=1.0, n=1.0, b=0, a=[1,2,3], o={'a':1}, e='bar')
self._check_query_types(request)
request = zoo.query(q="foo", i=1, n=1, b=False, a=[1,2,3], o={'a':1}, e='bar')
self._check_query_types(request)
request = zoo.query(q="foo", i="1", n="1", b="", a=[1,2,3], o={'a':1}, e='bar')
self._check_query_types(request)
def test_optional_stack_query_parameters(self):
http = HttpMock(datafile('zoo.json'), {'status': '200'})
zoo = build('zoo', 'v1', http)
request = zoo.query(trace='html', fields='description')
parsed = urlparse.urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q['trace'], ['html'])
self.assertEqual(q['fields'], ['description'])
def test_patch(self):
http = HttpMock(datafile('zoo.json'), {'status': '200'})
zoo = build('zoo', 'v1', http)
request = zoo.animals().patch(name='lion', body='{"description": "foo"}')
self.assertEqual(request.method, 'PATCH')
def test_tunnel_patch(self):
http = HttpMockSequence([
({'status': '200'}, file(datafile('zoo.json'), 'r').read()),
({'status': '200'}, 'echo_request_headers_as_json'),
])
http = tunnel_patch(http)
zoo = build('zoo', 'v1', http)
resp = zoo.animals().patch(name='lion', body='{"description": "foo"}').execute()
self.assertTrue('x-http-method-override' in resp)
def test_buzz_resources(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
buzz = build('buzz', 'v1', self.http)
self.assertTrue(getattr(buzz, 'activities'))
self.assertTrue(getattr(buzz, 'photos'))
self.assertTrue(getattr(buzz, 'people'))
self.assertTrue(getattr(buzz, 'groups'))
self.assertTrue(getattr(buzz, 'comments'))
self.assertTrue(getattr(buzz, 'related'))
def test_auth(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
buzz = build('buzz', 'v1', self.http)
auth = buzz.auth_discovery()
self.assertTrue('request' in auth)
def test_full_featured(self):
# Zoo should exercise all discovery facets
# and should also have no future.json file.
self.http = HttpMock(datafile('zoo.json'), {'status': '200'})
zoo = build('zoo', 'v1', self.http)
self.assertTrue(getattr(zoo, 'animals'))
request = zoo.animals().list(name='bat', projection="full")
parsed = urlparse.urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q['name'], ['bat'])
self.assertEqual(q['projection'], ['full'])
def test_nested_resources(self):
self.http = HttpMock(datafile('zoo.json'), {'status': '200'})
zoo = build('zoo', 'v1', self.http)
self.assertTrue(getattr(zoo, 'animals'))
request = zoo.my().favorites().list(max_results="5")
parsed = urlparse.urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q['max-results'], ['5'])
def test_top_level_functions(self):
self.http = HttpMock(datafile('zoo.json'), {'status': '200'})
zoo = build('zoo', 'v1', self.http)
self.assertTrue(getattr(zoo, 'query'))
request = zoo.query(q="foo")
parsed = urlparse.urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q['q'], ['foo'])
class Next(unittest.TestCase):
def test_next_for_people_liked(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
buzz = build('buzz', 'v1', self.http)
people = {'links':
{'next':
[{'href': 'http://www.googleapis.com/next-link'}]}}
request = buzz.people().liked_next(people)
self.assertEqual(request.uri, 'http://www.googleapis.com/next-link')
class DeveloperKey(unittest.TestCase):
def test_param(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
buzz = build('buzz', 'v1', self.http, developerKey='foobie_bletch')
activities = {'links':
{'next':
[{'href': 'http://www.googleapis.com/next-link'}]}}
request = buzz.activities().list_next(activities)
parsed = urlparse.urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q['key'], ['foobie_bletch'])
def test_next_for_activities_list(self):
self.http = HttpMock(datafile('buzz.json'), {'status': '200'})
buzz = build('buzz', 'v1', self.http, developerKey='foobie_bletch')
activities = {'links':
{'next':
[{'href': 'http://www.googleapis.com/next-link'}]}}
request = buzz.activities().list_next(activities)
self.assertEqual(request.uri,
'http://www.googleapis.com/next-link?key=foobie_bletch')
if __name__ == '__main__':
unittest.main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.