text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/python3
#
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <dholth@stetson.edu> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
import socket
import struct
import math
import sys
import string
import pprint
def hexDump(bytes):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(bytes)):
sys.stdout.write("%2x " % (ord(bytes[i])))
if (i+1) % 8 == 0:
print( repr(bytes[i-7:i+1]))
if(len(bytes) % 8 != 0):
print( string.rjust("", 11), repr(bytes[i-len(bytes)%8:i+1]))
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self):
self.address = ""
self.typetags = ","
self.message = ""
def setAddress(self, address):
self.address = address
def setMessage(self, message):
self.message = message
def setTypetags(self, typetags):
self.typetags = typetags
def clear(self):
self.address = ""
self.clearData()
def clearData(self):
self.typetags = ","
self.message = ""
def append(self, argument, typehint = None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.rawAppend(binary[1])
def rawAppend(self, data):
"""Appends raw data to the message. Use append()."""
self.message = self.message + data
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
def readString(data):
length = data.find(0)# data.find('\0') #string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
#return (data[0:length], data[nextData:])
return (data[0:length].decode('utf_8'), data[nextData:])
def readBlob(data):
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def readInt(data):
if(len(data)<4):
print( "Error: too few bytes for int", data, len(data))
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer."""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit double float."""
floater = struct.unpack(">d", data[0:8])
big = float(floater[0])
rest = data[8:]
return (big, rest)
def readFloat(data):
if(len(data)<4):
print( "Error: too few bytes for float", data, len(data))
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def OSCBlob(next):
"""Convert a string into an OSC Blob,
returning a (typetag, data) tuple."""
if type(next) == type(""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag = 'b'
else:
tag = ''
binary = ''
return (tag, binary)
def OSCArgument(next):
"""Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple."""
if type(next) == type(""):
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
binary = struct.pack(">%ds" % (OSCstringLength), next)
tag = "s"
elif type(next) == type(42.5):
binary = struct.pack(">f", next)
tag = "f"
elif type(next) == type(13):
binary = struct.pack(">i", next)
tag = "i"
else:
binary = ""
tag = ""
return (tag, binary)
def parseArgs(args):
"""Given a list of strings, produces a list
where those strings have been parsed (where
possible) as floats or integers."""
parsed = []
for arg in args:
print( arg)
arg = arg.strip()
interpretation = None
try:
interpretation = float(arg)
if string.find(arg, ".") == -1:
interpretation = int(interpretation)
except:
# Oh - it was a string.
interpretation = arg
pass
parsed.append(interpretation)
return parsed
l = [47, 116, 101, 115, 116, 0, 0, 0, 44, 105, 0, 0, 0, 0, 0, 4]
b = bytes(l)
# decodeISC(b)
# ['/test', ',i', 4]
ll = [47, 116, 101, 115, 116, 0, 0, 0, 44, 105, 0, 0, 0, 0, 1, 23]
#decode(bytes(ll)
# ['/test', ',i', 279]
bbf = [47, 116, 101, 115, 116, 47, 48, 0, 44, 98, 98, 102, 0, 0, 0, 0, 0, 0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 4, 0, 8, 8, 8, 66, 242, 0, 0]
# decode(bytes(bbf)
# ['/test/0', ',bbf', b'\x15', b'\x00\x08\x08\x08', 121.0]
iif =[47, 116, 101, 115, 116, 47, 48, 0, 44, 105, 105, 102, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 250, 66, 200, 61, 116]
# defcode(bytes(iif)
# ['/test/0', ',iif', 12, 250, 100.12002563476562]
class BundleNotSupported(Exception):
pass
def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
table = { "i" : readInt, "f" : readFloat, "s" : readString, "b" : readBlob, "d" : readDouble }
decoded = []
address, rest = readString(data)
typetags = ""
#print('address',address, 'rest', repr(rest))
#typetags, rest1 = readString(rest)
#print('typetags',typetags, 'rest', repr(rest1))
#for tag in typetags[1:]:
# value, rest2 = table[tag](rest)
# print(value,repr(rest2))
if address == "#bundle":
print('BUNDLE not Supported!')
raise BundleNotSupported
time, rest = readLong(rest)
# decoded.append(address)
# decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest) > 0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags[0] == ",":
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print( "Oops, typetag lacks the magic ,")
return decoded
class CallbackManager:
"""This utility class maps OSC addresses to callables.
The CallbackManager calls its callbacks with a list
of decoded OSC arguments, including the address and
the typetags as the first two arguments."""
def __init__(self):
self.callbacks = {}
self.add(self.unbundler, "#bundle")
def handle(self, data, source = None):
"""Given OSC data, tries to call the callback with the
right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source)
def dispatch(self, message, source = None):
"""Sends decoded OSC data to an appropriate calback"""
try:
if type(message[0]) == str :
# got a single message
address = message[0]
self.callbacks[address](message, source)
elif type(message[0]) == list :
# smells like nested messages
for msg in message :
self.dispatch(msg, source)
except KeyError as e:
# address not found
print( 'address %s not found ' % address)
pprint.pprint(message)
except IndexError as e:
print( 'got malformed OSC message')
pass
except None as e:
print( "Exception in", address, "callback :", e)
return
def add(self, callback, name):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[name]
else:
self.callbacks[name] = callback
def unbundler(self, messages):
"""Dispatch the messages in a decoded bundle."""
# first two elements are #bundle and the time tag, rest are messages.
for message in messages[2:]:
self.dispatch(message)
if __name__ == "__main__":
hexDump("Welcome to the OSC testing program.")
print()
message = OSCMessage()
message.setAddress("/foo/play")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
hexDump(message.getBinary())
print( "Making and unmaking a message..")
strings = OSCMessage()
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
hexDump(raw)
print( "Retrieving arguments...")
data = raw
for i in range(6):
text, data = readString(data)
print( text)
number, data = readFloat(data)
print( number)
number, data = readFloat(data)
print( number)
number, data = readInt(data)
print( number)
hexDump(raw)
print( decodeOSC(raw))
print( decodeOSC(message.getBinary()))
print( "Testing Blob types.")
blob = OSCMessage()
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
hexDump(blob.getBinary())
print( decodeOSC(blob.getBinary()))
def printingCallback(*stuff):
sys.stdout.write("Got: ")
for i in stuff:
sys.stdout.write(str(i) + " ")
sys.stdout.write("\n")
print( "Testing the callback manager.")
c = CallbackManager()
c.add(printingCallback, "/print")
c.handle(message.getBinary())
message.setAddress("/print")
c.handle(message.getBinary())
print1 = OSCMessage()
print1.setAddress("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
c.handle(print1.getBinary())
bundle = OSCMessage()
bundle.setAddress("")
bundle.append("#bundle")
bundle.append(0)
bundle.append(0)
bundle.append(print1.getBinary(), 'b')
bundle.append(print1.getBinary(), 'b')
bundlebinary = bundle.message
print( "sending a bundle to the callback manager")
c.handle(bundlebinary)
| gratefulfrog/SPI | RPI/Python/Tests/SERIAL_TESTS/OSC_00/PythonSlip/OSC.py | Python | gpl-3.0 | 12,002 | [
"VisIt"
] | 50e1e51c592d83a5d200f687d33e094d4eae447ecb5f273cfe6f51a5fc56037d |
#!/usr/bin/env python
#
#
# This example shows how to add an observer to a Python program. It extends
# the Step1/Python/Cone.py Python example (see that example for information on
# the basic setup).
#
# VTK uses a command/observer design pattern. That is, observers watch for
# particular events that any vtkObject (or subclass) may invoke on
# itself. For example, the vtkRenderer invokes a "StartEvent" as it begins
# to render. Here we add an observer that invokes a command when this event
# is observed.
#
import vtk
import time
#
# define the callback
#
def myCallback(obj,string):
print "Starting a render"
#
# create the basic pipeline as in Step1
#
cone = vtk.vtkConeSource()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection( cone.GetOutputPort() )
coneActor = vtk.vtkActor()
coneActor.SetMapper( coneMapper )
ren1= vtk.vtkRenderer()
ren1.AddActor( coneActor )
ren1.SetBackground( 0.1, 0.2, 0.4 )
#
# Add the observer here
#
ren1.AddObserver("StartEvent", myCallback)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer( ren1 )
renWin.SetSize( 300, 300 )
#
# now we loop over 360 degreeees and render the cone each time
#
# for i in range(0,360):
# time.sleep(0.03)
# renWin.Render()
# ren1.GetActiveCamera().Azimuth( 1 )
| CMUSV-VisTrails/WorkflowRecommendation | examples/vtk_examples/Tutorial/Step2/Cone2.py | Python | bsd-3-clause | 1,349 | [
"VTK"
] | c1b3de99a162b31767b75073e0a16010324677e53257965b71d46747a0b00085 |
#Author : Lewis Mervin lhm30@cam.ac.uk
#Supervisor : Dr. A. Bender
#All rights reserved 2014
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 08/04/14) and ChEMBL18
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import os
import sys
import numpy as np
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#import user query
def importQuery():
query = open(file_name).read().splitlines()
matrix = []
for q in query:
matrix.append(calcFingerprints(q))
matrix = np.array(matrix, dtype=np.uint8)
return matrix
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
#get names of uniprots
def getName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
uniprots = []
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
uniprots.append(t[1])
return uniprots
#main
introMessage()
file_name = sys.argv[1]
output_name = 'out_results_singlemodel_ranked_number.txt'
file = open(output_name, 'w')
querymatrix = importQuery()
u_name = dict()
uniprots = getName()
print 'Importing Model'
with open('onemodel.pkl', 'rb') as fid:
bnb = cPickle.load(fid)
print 'Total Number of Query Molecules : ' + str(len(querymatrix))
print 'Number of Targets in Model : ' + str(len(bnb.class_count_))
probs = bnb.predict_proba(querymatrix)
for i in range(len(uniprots)):
row = [u_name[uniprots[i]],uniprots[i]]
for prob in probs:
order = prob.argsort()
ranks = order[::-1].argsort()
row.append(ranks[i]+1)
row.append("%.2f" % np.average(row[2:]))
file.write('\t'.join(map(str,row)) + '\n')
#update precent finished
percent = (float(i)/float(len(uniprots)))*100
sys.stdout.write('Performing Classification on Query Molecules: %3d%%\r' % percent)
sys.stdout.flush()
print '\nWrote Results to: ' + output_name
file.close() | lhm30/PIDGIN | singlemodel/predict_singlemodel_ranked_number.py | Python | mit | 2,536 | [
"RDKit"
] | c4500436e848360f31273ab6192f487c3d507567a2802faf5ad549d62f6ef05d |
from yade import pack
from yade import export
from yade import timing
from yade import plot
if ( 'PFVFLOW' in features ):
num_spheres=1000# number of spheres
young=1e6
compFricDegree = 3 # initial contact friction during the confining phase
finalFricDegree = 30 # contact friction during the deviatoric loading
mn,mx=Vector3(0,0,0),Vector3(1,1,0.4) # corners of the initial packing
graindensity=2600
toleranceWarning =1.e-11
toleranceCritical=1.e-6
O.materials.append(FrictMat(young=young,poisson=0.5,frictionAngle=radians(compFricDegree),density=graindensity,label='spheres'))
O.materials.append(FrictMat(young=young,poisson=0.5,frictionAngle=0,density=0,label='walls'))
walls=aabbWalls([mn,mx],thickness=0,material='walls')
wallIds=O.bodies.append(walls)
sp=pack.SpherePack()
sp.makeCloud(mn,mx,-1,0.3333,num_spheres,False, 0.95,seed=1) #"seed" make the "random" generation always the same
sp.toSimulation(material='spheres')
triax=TriaxialStressController(
maxMultiplier=1.+2e4/young, # spheres growing factor (fast growth)
finalMaxMultiplier=1.+2e3/young, # spheres growing factor (slow growth)
thickness = 0,
stressMask = 7,
max_vel = 0.005,
internalCompaction=True, # If true the confining pressure is generated by growing particles
)
newton=NewtonIntegrator(damping=0.2)
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()],label="iloop"
),
FlowEngine(dead=1,label="flow"),#introduced as a dead engine for the moment, see 2nd section
GlobalStiffnessTimeStepper(active=1,timeStepUpdateInterval=100,timestepSafetyCoefficient=0.8),
triax,
newton
]
triax.goal1=triax.goal2=triax.goal3=-10000
while 1:
O.run(1000, True)
unb=unbalancedForce()
if unb<0.001 and abs(-10000-triax.meanStress)/10000<0.001:
break
setContactFriction(radians(finalFricDegree))
radius=0
for b in O.bodies:
if b.state.mass==0:
b.state.blockedDOFs='xyzXYZ'
b.state.vel=(0,0,0)
b.state.angVel=(0,0,0)
if b.state.mass>0:
radius+=b.shape.radius
# b.state.blockedDOFs='xyz'
# b.state.vel=(0,0,0)
radius=radius/num_spheres
triax.dead=True
while 1:
O.run(1000, True)
unb=unbalancedForce()
if unb<0.001:
break
press=1000.
O.run(10,1)
flow.dead=0
flow.meshUpdateInterval=-1
flow.useSolver=3
flow.permeabilityFactor=1
flow.viscosity=0.1
flow.bndCondIsPressure=[0,0,1,0,0,0]
flow.bndCondValue=[0,0,press,0,0,0]
flow.boundaryUseMaxMin=[0,0,0,0,0,0]
flow.iniVoidVolumes=True
newton.damping=0.1
GlobalStiffnessTimeStepper.dead=True
O.dt=min(0.8*PWaveTimeStep(),0.8*1./1200.*pi/flow.viscosity*graindensity*radius**2)
O.dynDt=False
O.run(1,1)
voidvol=0.0
voidvoltot=0.0
nvoids=flow.nCells()
totalflux=[0] * (nvoids)
initialvol=[0] * (nvoids)
bar=[0] * (nvoids)
initiallevel=O.bodies[flow.wallIds[flow.ymin]].state.pos[1]+(O.bodies[flow.wallIds[flow.ymax]].state.pos[1]-O.bodies[flow.wallIds[flow.ymin]].state.pos[1])/3
initialymax=O.bodies[flow.wallIds[flow.ymax]].state.pos[1]
for ii in range(nvoids):
initialvol[ii]=1./flow.getCellInvVoidVolume(ii)
bar[ii]=flow.getCellBarycenter(ii)[1]
voidvoltot+=initialvol[ii]
if bar[ii]>=initiallevel:
voidvol+=initialvol[ii]
flow.setCellPImposed(ii,True)
flow.setCellPressure(ii,0)
O.run(1,1)
iterini=O.time
flow.saveVtk("./vtk",True)
for ii in range(nvoids):
if bar[ii]>=initiallevel:
if flow.getCellPImposed(ii)==True:
#totalflux[ii]=flow.getCellFluxFromId(ii)*O.dt
totalflux[ii]=0.0*O.dt
#Qx=(flow.getBoundaryFlux(flow.wallIds[flow.xmin]))*O.dt+(flow.getBoundaryFlux(flow.wallIds[flow.xmax]))*O.dt
#Qz=(flow.getBoundaryFlux(flow.wallIds[flow.zmin]))*O.dt+(flow.getBoundaryFlux(flow.wallIds[flow.zmax]))*O.dt
#Qin=(flow.getBoundaryFlux(flow.wallIds[flow.ymin]))*O.dt
#Qout=(flow.getBoundaryFlux(flow.wallIds[flow.ymax]))*O.dt
Qin=0.0*O.dt
Qout=0.0*O.dt
Qx=0.0*O.dt
Qz=0.0*O.dt
bubble=0
deltabubble=0
deltaovercross=0
deltaagain=0
def pressureImbibition():
global bubble,deltabubble,deltaovercross,deltaagain
global Qin,Qout,Qz,Qx,totalflux,total
Qin+=(flow.getBoundaryFlux(flow.wallIds[flow.ymin]))*O.dt
Qout+=(flow.getBoundaryFlux(flow.wallIds[flow.ymax]))*O.dt
Qx+=(flow.getBoundaryFlux(flow.wallIds[flow.xmin]))*O.dt+(flow.getBoundaryFlux(flow.wallIds[flow.xmax]))*O.dt
Qz+=(flow.getBoundaryFlux(flow.wallIds[flow.zmin]))*O.dt+(flow.getBoundaryFlux(flow.wallIds[flow.zmax]))*O.dt
for ii in range(nvoids):
if bar[ii]>=initiallevel:
if flow.getCellPImposed(ii)==True:
totalflux[ii]+=flow.getCellFluxFromId(ii)*O.dt
for ii in range(nvoids):
if bar[ii]>=initiallevel:
if flow.getCellPImposed(ii)==True:
if (-totalflux[ii])>initialvol[ii]:
deltaflux=-totalflux[ii]-initialvol[ii]
kk=0
neigh=[nvoids+2,nvoids+2,nvoids+2,nvoids+2]
neighok=[nvoids+2,nvoids+2,nvoids+2,nvoids+2]
for jj in range(4):
if jj<=len(flow.getNeighbors(ii))-1:
if flow.getCellPImposed(flow.getNeighbors(ii)[jj])==True:
if (-totalflux[flow.getNeighbors(ii)[jj]])<initialvol[flow.getNeighbors(ii)[jj]]:
neigh[kk]=flow.getNeighbors(ii)[jj]
kk=kk+1
if kk==0:
totalflux[ii]+=deltaflux
bubble=bubble+1
deltabubble+=deltaflux
if kk!=0:
totalflux[ii]+=deltaflux
deltafluxEach=deltaflux/kk
deltadelta=0
for xx in range(4):
if kk!=0:
if neigh[xx]<=nvoids:
jj=neigh[xx]
if (-totalflux[jj]+deltafluxEach)>initialvol[jj]:
deltadelta+=-totalflux[jj]-initialvol[jj]
totalflux[jj]+=-(-totalflux[jj]-initialvol[jj])
kk=kk-1
neighok[kk]=jj
if kk==0:
deltaflux2=deltaflux-deltadelta
kk2=0
neigh2=[nvoids+2]*(4*4)
neighok2=[nvoids+2]*(4*4)
for xx in range(4):
if neigh[xx]<=nvoids:
jj=neigh[xx]
for gg in range(4):
if gg<=len(flow.getNeighbors(jj))-1:
if flow.getCellPImposed(flow.getNeighbors(jj)[gg])==True:
if (-totalflux[flow.getNeighbors(jj)[gg]])<initialvol[flow.getNeighbors(jj)[gg]]:
neigh2[kk2]=flow.getNeighbors(jj)[gg]
kk2=kk2+1
if kk2==0:
bubble=bubble+1
deltabubble+=deltaflux2
if kk2!=0:
deltafluxEach2=deltaflux2/kk2
for xx in range(16):
if kk2!=0:
if neigh2[xx]<=nvoids:
gg=neigh2[xx]
if (-totalflux[gg]+deltafluxEach2)>initialvol[gg]:
print('check')
if (-totalflux[gg]+deltafluxEach2)<=initialvol[gg]:
deltadelta+=deltafluxEach2
totalflux[gg]+=-deltafluxEach2
kk2=kk2-1
neighok2[kk2]=gg
if deltaflux!=deltadelta:
print('overcross')
deltaovercross+=+(deltaflux-deltadelta)
if kk!=0:
deltafluxEach2=(deltaflux-deltadelta)/kk
for xx in range(4):
if kk!=0:
if neigh[xx]<=nvoids:
jj=neigh[xx]
if jj!=neighok[0] and jj!=neighok[1] and jj!=neighok[2] and jj!=neighok[3]:
if (-totalflux[jj]+deltafluxEach2)<=initialvol[jj]:
totalflux[jj]+=-deltafluxEach2
deltadelta+=deltafluxEach2
kk=kk-1
neighok[kk]=jj
if deltaflux!=deltadelta:
print('again')
deltaagain+=+(deltaflux-deltadelta)
total=0
for ii in range(nvoids):
total+=totalflux[ii]
if bar[ii]>=initiallevel:
if flow.getCellPImposed(ii)==True:
if -(totalflux[ii])==initialvol[ii]:
flow.setCellPImposed(ii,False)
#print 'pp_',ii
if -(totalflux[ii])>initialvol[ii]:
flow.setCellPImposed(ii,False)
print('error_',ii)
total=abs(total)
# for ii in range(nvoids):
# if bar[ii]>=initiallevel:
# if flow.getCellPImposed(ii)==False:
# if -(totalflux[ii])!=initialvol[ii]:
# print 'error_',ii
file=open('Test.txt',"w")
checkdifference=0
def equilibriumtest():
global F33,F22,checkdifference,errors
#unbalanced=unbalancedForce()
F33=abs(O.forces.f(flow.wallIds[flow.ymax])[1])
F22=abs(O.forces.f(flow.wallIds[flow.ymin])[1])
#F11 =abs(O.forces.f(flow.wallIds[flow.xmax])[0]),
#F00=abs(O.forces.f(flow.wallIds[flow.xmin])[0]),
#F44=abs(O.forces.f(flow.wallIds[flow.zmin])[2]),
#F55=abs(O.forces.f(flow.wallIds[flow.zmax])[2]),
deltaF=abs(F33-F22)
file.write(str(O.iter)+" "+str(deltaF)+"\n")
if O.time>=iterini+1.5:
if checkdifference==0:
print('check F done')
if deltaF>0.01*press:
raise YadeCheckError('Error: too high difference between forces acting at the bottom and upper walls')
#O.pause()
checkdifference=1
once=0
def fluxtest():
global once,QinOk
no=0
QinOk=-Qin-deltabubble
error=QinOk-total
if error>toleranceWarning:
print("Warning: difference between total water volume flowing through bottom wall and water loss due to air bubble generations",QinOk," vs. total water volume flowing inside dry or partially saturated cells",total)
if error>toleranceCritical:
raise YadeCheckError("The difference is more, than the critical tolerance!")
file.write(str(O.time)+" "+str(QinOk)+" "+str(error)+" \n")
for ii in range(nvoids):
if bar[ii]>=initiallevel:
if flow.getCellPImposed(ii)==True:
no=1
if once==0:
if no==0:
imbtime=O.time-iterini
print(imbtime,voidvol,total,QinOk)
if voidvol-total>toleranceWarning:
print("Warning: initial volume of dry voids",voidvol," vs. total water volume flowing inside dry or partially saturated cells",total)
if voidvol-total>toleranceCritical:
raise YadeCheckError("The difference is more, than the critical tolerance!")
file.write(str(imbtime)+" "+str(voidvol)+" "+str(total)+" "+str(QinOk)+"\n")
once=1
timing.stats()
def addPlotData():
global F33,F22,QinOk,total
plot.addData(i1=O.iter,
t=O.time,
Fupper=F33,
Fbottom=F22,
Q=QinOk,
T=total
)
plot.live=True
plot.plots={'t':('Fbottom','Fupper'),' t ':('Q','T')}
plot.plot()
def pl():
flow.saveVtk("./vtk",True)
O.engines=O.engines+[PyRunner(iterPeriod=100,command='pl()')]
#O.engines=O.engines+[VTKRecorder(iterPeriod=100,recorders=['spheres'],fileName='./exp')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='equilibriumtest()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='pressureImbibition()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='fluxtest()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='addPlotData()')]
O.timingEnabled=True
#file.close()
#plot.saveDataTxt('plots.txt',vars=('i1','t','Fupper','Fbottom','Q','T'))
import tempfile, shutil
dirpath = tempfile.mkdtemp()
for fileName in ['./vtk', './Test.txt' ]:
if (os.path.exists(fileName)): shutil.move(fileName,dirpath)
print("File %s moved into %s/ directory"%(fileName,dirpath))
else:
print("This checkDEM-PFVPressureInjection.py cannot be executed because PFVFLOW is disabled")
| cosurgi/trunk | scripts/checks-and-tests/checks/checkDEM-PFVPressureInjection.py | Python | gpl-2.0 | 13,383 | [
"VTK"
] | 41f394397328631c605f2e73e8f4d13babd5300a3a3b2ad39c9d1728b52ffeca |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPreprocesscore(RPackage):
"""A library of core preprocessing routines"""
homepage = "https://bioconductor.org/packages/preprocessCore/"
git = "https://git.bioconductor.org/packages/preprocessCore.git"
version('1.38.1', commit='c58cb4c720eda0f1c733b989b14912093a7c5fbc')
depends_on('r@3.4.0:3.4.9', when='@1.38.1')
| krafczyk/spack | var/spack/repos/builtin/packages/r-preprocesscore/package.py | Python | lgpl-2.1 | 1,606 | [
"Bioconductor"
] | af8c11a6849ac43128008df3e502170ba751a0448a5362b4304c859a4a54bcde |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 06:43:14 2020
@author: deborahkhider, fzhu, jeg
Utilities to manipulate timeseries
"""
__all__ = [
'simple_stats',
'bin',
'interp',
'gkernel',
'grid_properties',
'standardize',
'ts2segments',
'annualize',
'gaussianize',
'gaussianize_single',
'detrend',
'detect_outliers',
'remove_outliers',
'eff_sample_size'
]
import numpy as np
import pandas as pd
import warnings
import copy
from scipy import special
from scipy import signal
from scipy import interpolate
from scipy import stats
from pyhht import EMD
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
import statsmodels.tsa.stattools as sms
import math
from sys import exit
from .plotting import plot_scatter_xy,plot_xy,savefig,showfig
from .filter import savitzky_golay
from .tsbase import (
clean_ts
)
def simple_stats(y, axis=None):
""" Computes simple statistics
Computes the mean, median, min, max, standard deviation, and interquartile
range of a numpy array y, ignoring NaNs.
Parameters
----------
y: array
A Numpy array
axis : int, tuple of ints
Optional. Axis or Axes along which the means
are computed, the default is to compute the mean of the flattened
array. If a tuple of ints, performed over multiple axes
Returns
-------
mean : float
mean of y, ignoring NaNs
median : float
median of y, ignoring NaNs
min_ : float
mininum value in y, ignoring NaNs
max_ : float
max value in y, ignoring NaNs
std : float
standard deviation of y, ignoring NaNs
IQR : float
Interquartile range of y along specified axis, ignoring NaNs
"""
# make sure that y is an array
y = np.array(y, dtype='float64')
# Perform the various calculations
mean = np.nanmean(y, axis=axis)
std = np.nanstd(y, axis=axis)
median = np.nanmedian(y, axis=axis)
min_ = np.nanmin(y, axis=axis)
max_ = np.nanmax(y, axis=axis)
IQR = np.nanpercentile(y, 75, axis=axis) - np.nanpercentile(y, 25, axis=axis)
return mean, median, min_, max_, std, IQR
def bin(x, y, bin_size=None, start=None, stop=None, evenly_spaced = True):
""" Bin the values
Parameters
----------
x : array
The x-axis series.
y : array
The y-axis series.
bin_size : float
The size of the bins. Default is the mean resolution if evenly_spaced is not True
start : float
Where/when to start binning. Default is the minimum
stop : float
When/where to stop binning. Default is the maximum
evenly_spaced : {True,False}
Makes the series evenly-spaced. This option is ignored if bin_size is set to float
Returns
-------
binned_values : array
The binned values
bins : array
The bins (centered on the median, i.e., the 100-200 bin is 150)
n : array
number of data points in each bin
error : array
the standard error on the mean in each bin
"""
# Make sure x and y are numpy arrays
x = np.array(x, dtype='float64')
y = np.array(y, dtype='float64')
if bin_size is not None and evenly_spaced == True:
warnings.warn('The bin_size has been set, the series may not be evenly_spaced')
# Get the bin_size if not available
if bin_size is None:
if evenly_spaced == True:
bin_size = np.nanmax(np.diff(x))
else:
bin_size = np.nanmean(np.diff(x))
# Get the start/stop if not given
if start is None:
start = np.nanmin(x)
if stop is None:
stop = np.nanmax(x)
# Set the bin medians
bins = np.arange(start+bin_size/2, stop + bin_size/2, bin_size)
# Perform the calculation
binned_values = []
n = []
error = []
for val in np.nditer(bins):
idx = [idx for idx, c in enumerate(x) if c >= (val-bin_size/2) and c < (val+bin_size/2)]
if y[idx].size == 0:
binned_values.append(np.nan)
n.append(np.nan)
error.append(np.nan)
else:
binned_values.append(np.nanmean(y[idx]))
n.append(y[idx].size)
error.append(np.nanstd(y[idx]))
res_dict = {
'bins': bins,
'binned_values': binned_values,
'n': n,
'error': error,
}
return res_dict
def gkernel(t,y, h = 3.0, step=None,start=None,stop=None, step_style = 'max'):
'''
Coarsen time resolution using a Gaussian kernel
Parameters
----------
t : 1d array
the original time axis
y : 1d array
values on the original time axis
step : float
The interpolation step. Default is max spacing between consecutive points.
step_style : 'string'
step style to be applied from `grid_properties` [default = 'max']
start : float
where/when to start the interpolation. Default is min(t).
stop : float
where/when to stop the interpolation. Default is max(t).
h : scalar; kernel e-folding scale
Returns
-------
tc : 1d array
the coarse-grained time axis
yc: 1d array
The coarse-grained time series
References
----------
Rehfeld, K., Marwan, N., Heitzig, J., and Kurths, J.: Comparison of correlation analysis
techniques for irregularly sampled time series, Nonlin. Processes Geophys.,
18, 389–404, https://doi.org/10.5194/npg-18-389-2011, 2011.
'''
if len(t) != len(y):
raise ValueError('y and t must have the same length')
# get the interpolation step if not provided
if step is None:
_, _, step = grid_properties(np.asarray(t), step_style = step_style)
# Get the start and end point if not given
if start is None:
start = np.nanmin(np.asarray(t))
if stop is None:
stop = np.nanmax(np.asarray(t))
# Get the uniform time axis.
tc = np.arange(start,stop,step)
kernel = lambda x, s : 1.0/(s*np.sqrt(2*np.pi))*np.exp(-0.5*(x/s)**2) # define kernel function
yc = np.zeros((len(tc)))
yc[:] = np.nan
for i in range(len(tc)-1):
xslice = t[(t>=tc[i])&(t<tc[i+1])]
yslice = y[(t>=tc[i])&(t<tc[i+1])]
if len(xslice)>0:
d = xslice-tc[i]
weight = kernel(d,h)
yc[i] = sum(weight*yslice)/sum(weight)
else:
yc[i] = np.nan
return tc, yc
def grid_properties(x,step_style='median'):
''' Establishes the grid properties of a numerical array:
start, stop, and representative step.
Parameters
----------
x : array
step_style : str
Method to obtain a representative step if x is not evenly spaced.
Valid entries: 'median' [default], 'mean', 'mode' or 'max'
The mode is the most frequent entry in a dataset, and may be a good choice if the timeseries
is nearly equally spaced but for a few gaps.
Max is a conservative choice, appropriate for binning methods and Gaussian kernel coarse-graining
Returns
-------
start : float
min(x)
stop : float
max(x)
step : float
The representative spacing between consecutive values, computed as above
'''
start = np.nanmin(x)
stop = np.nanmax(x)
delta = np.diff(x)
if step_style == 'mean':
step = delta.mean()
elif step_style == 'max':
step = delta.max()
elif step_style == 'mode':
step = stats.mode(delta)[0][0]
else:
step = np.median(delta)
return start, stop, step
def interp(x,y, interp_type='linear', step=None,start=None,stop=None, step_style= 'mean',**kwargs):
""" Interpolate y onto a new x-axis
Parameters
----------
x : array
The x-axis
y : array
The y-axis
interp_type : {‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’}
where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’.
step : float
The interpolation step. Default is mean spacing between consecutive points.
start : float
where/when to start the interpolation. Default is min..
stop : float
where/when to stop the interpolation. Default is max.
kwargs : kwargs
Aguments specific to interpolate.interp1D. See scipy for details https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
If getting an error about extrapolation, you can use the arguments `bound_errors=False` and `fill_value="extrapolate"` to allow for extrapolation.
Returns
-------
xi : array
The interpolated x-axis
yi : array
The interpolated y values
"""
#Make sure x and y are numpy arrays
x = np.array(x,dtype='float64')
y = np.array(y,dtype='float64')
# get the interpolation step if not available
if step is None:
_, _, step = grid_properties(np.asarray(x), step_style = step_style)
# Get the start and end point if not given
if start is None:
start = np.nanmin(np.asarray(x))
if stop is None:
stop = np.nanmax(np.asarray(x))
# Get the interpolated x-axis.
xi = np.arange(start,stop,step)
#Make sure the data is increasing
data = pd.DataFrame({"x-axis": x, "y-axis": y}).sort_values('x-axis')
# Add arguments
yi = interpolate.interp1d(data['x-axis'],data['y-axis'],kind=interp_type,**kwargs)(xi)
return xi, yi
def on_common_axis(x1, y1, x2, y2, method = 'interpolation', step=None, start=None, stop=None):
"""Places two timeseries on a common axis
Note this function assumes that the time representation and units are the same (e.g., BP vs CE)
Parameters
----------
x1 : array
x-axis values of the first timeseries
y1 : array
y-axis values of the first timeseries
x2 : array
x-axis values of the second timeseries
y2 : array
y-axis values of the second timeseries
method : str
Which method to use to get the timeseries on the same x axis.
Valid entries: 'interpolation' (default, linear interpolation),
'bin', 'None'. 'None' only cuts the timeseries to the common
period but does not attempt to generate a common time axis
step : float
The interpolation step. Default is mean resolution
of lowest resolution series
start : float
where/when to start. Default is the maximum of the minima of
the two timeseries
stop : float
Where/when to stop. Default is the minimum of the maxima of
the two timeseries
Returns
-------
xi1, xi2 : array
The interpolated x-axis
interp_values1, interp_values2 : array
the interpolated y-values
"""
# make sure that x1, y1, x2, y2 are numpy arrays
x1 = np.array(x1, dtype='float64')
y1 = np.array(y1, dtype='float64')
x2 = np.array(x2, dtype='float64')
y2 = np.array(y2, dtype='float64')
# Find the mean/max x-axis is not provided
if start is None:
start = np.nanmax([np.nanmin(x1), np.nanmin(x2)])
if stop is None:
stop = np.nanmin([np.nanmax(x1), np.nanmax(x2)])
# Get the interp_step
if step is None:
step = np.nanmin([np.nanmean(np.diff(x1)), np.nanmean(np.diff(x2))])
if method == 'interpolation':
# perform the interpolation
xi1, interp_values1 = interp(x1, y1, step=step, start=start,
stop=stop)
xi2, interp_values2 = interp(x2, y2, step=step, start=start,
stop=stop)
elif method == 'bin':
xi1, interp_values1, _ , _ = bin(x1, y1, bin_size=step, start=start,
stop=stop)
xi2, interp_values2, _ , _ = bin(x2, y2, bin_size=step, start=start,
stop=stop)
elif method == None:
min_idx1 = np.where(x1>=start)[0][0]
min_idx2 = np.where(x2>=start)[0][0]
max_idx1 = np.where(x1<=stop)[0][-1]
max_idx2 = np.where(x2<=stop)[0][-1]
xi1 = x1[min_idx1:max_idx1+1]
xi2 = x2[min_idx2:max_idx2+1]
interp_values1 = y1[min_idx1:max_idx1+1]
interp_values2 = y2[min_idx2:max_idx2+1]
else:
raise KeyError('Not a valid interpolation method')
return xi1, xi2, interp_values1, interp_values2
def standardize(x, scale=1, axis=0, ddof=0, eps=1e-3):
""" Centers and normalizes a given time series. Constant or nearly constant time series not rescaled.
Parameters
----------
x : array
vector of (real) numbers as a time series, NaNs allowed
scale : real
A scale factor used to scale a record to a match a given variance
axis : int or None
axis along which to operate, if None, compute over the whole array
ddof : int
degress of freedom correction in the calculation of the standard deviation
eps : real
a threshold to determine if the standard deviation is too close to zero
Returns
-------
z : array
The standardized time series (z-score), Z = (X - E[X])/std(X)*scale, NaNs allowed
mu : real
The mean of the original time series, E[X]
sig : real
The standard deviation of the original time series, std[X]
References
----------
1. Tapio Schneider's MATLAB code: https://github.com/tapios/RegEM/blob/master/standardize.m
2. The zscore function in SciPy: https://github.com/scipy/scipy/blob/master/scipy/stats/stats.py
See also
--------
pyleoclim.utils.tsutils.preprocess : pre-processes a times series using standardization and detrending.
"""
x = np.asanyarray(x)
assert x.ndim <= 2, 'The time series x should be a vector or 2-D array!'
mu = np.nanmean(x, axis=axis) # the mean of the original time series
sig = np.nanstd(x, axis=axis, ddof=ddof) # the standard deviation of the original time series
mu2 = np.asarray(np.copy(mu)) # the mean used in the calculation of zscore
sig2 = np.asarray(np.copy(sig) / scale) # the standard deviation used in the calculation of zscore
if np.any(np.abs(sig) < eps): # check if x contains (nearly) constant time series
warnings.warn('Constant or nearly constant time series not rescaled.')
where_const = np.abs(sig) < eps # find out where we have (nearly) constant time series
# if a vector is (nearly) constant, keep it the same as original, i.e., substract by 0 and divide by 1.
mu2[where_const] = 0
sig2[where_const] = 1
if axis and mu.ndim < x.ndim:
z = (x - np.expand_dims(mu2, axis=axis)) / np.expand_dims(sig2, axis=axis)
else:
z = (x - mu2) / sig2
return z, mu, sig
def center(y, axis=0):
""" Centers array y (i.e. removes the sample mean)
Parameters
----------
y : array
vector of (real) numbers as a time series, NaNs allowed
axis : int or None
axis along which to operate, if None, compute over the whole array
Returns
-------
yc : array
The centered time series, yc = (y - ybar), NaNs allowed
ybar : real
The sampled mean of the original time series, y
References
----------
Tapio Schneider's MATLAB code: https://github.com/tapios/RegEM/blob/master/center.m
"""
y = np.asanyarray(y)
assert y.ndim <= 2, 'The time series y should be a vector or 2-D array!'
ybar = np.nanmean(y, axis=axis) # the mean of the original time series
if axis and ybar.ndim < y.ndim:
yc = y - np.expand_dims(ybar, axis=axis)
else:
yc = y - ybar
return yc, ybar
def ts2segments(ys, ts, factor=10):
''' Chop a time series into several segments based on gap detection.
The rule of gap detection is very simple:
we define the intervals between time points as dts, then if dts[i] is larger than factor * dts[i-1],
we think that the change of dts (or the gradient) is too large, and we regard it as a breaking point
and chop the time series into two segments here
Parameters
----------
ys : array
A time series, NaNs allowed
ts : array
The time points
factor : float
the factor that adjusts the threshold for gap detection
Returns
-------
seg_ys : list
a list of several segments with potentially different lengths
seg_ts : list
a list of the time axis of the several segments
n_segs : int
the number of segments
'''
ys, ts = clean_ts(ys, ts)
nt = np.size(ts)
dts = np.diff(ts)
seg_ys, seg_ts = [], [] # store the segments with lists
n_segs = 1
i_start = 0
for i in range(1, nt-1):
if np.abs(dts[i]) > factor*np.abs(dts[i-1]):
i_end = i + 1
seg_ys.append(ys[i_start:i_end])
seg_ts.append(ts[i_start:i_end])
i_start = np.copy(i_end)
n_segs += 1
seg_ys.append(ys[i_start:nt])
seg_ts.append(ts[i_start:nt])
return seg_ys, seg_ts, n_segs
def annualize(ys, ts):
''' Annualize a time series whose time resolution is finer than 1 year
Parameters
----------
ys : array
A time series, NaNs allowed
ts : array
The time axis of the time series, NaNs allowed
Returns
-------
ys_ann : array
the annualized time series
year_int : array
The time axis of the annualized time series
'''
ys = np.asarray(ys, dtype=float)
ts = np.asarray(ts, dtype=float)
assert ys.size == ts.size, 'The size of time axis and data value should be equal!'
year_int = list(set(np.floor(ts)))
year_int = np.sort(list(map(int, year_int)))
n_year = len(year_int)
year_int_pad = list(year_int)
year_int_pad.append(np.max(year_int)+1)
ys_ann = np.zeros(n_year)
for i in range(n_year):
t_start = year_int_pad[i]
t_end = year_int_pad[i+1]
t_range = (ts >= t_start) & (ts < t_end)
ys_ann[i] = np.average(ys[t_range], axis=0)
return ys_ann, year_int
def gaussianize(X):
""" Transforms a (proxy) timeseries to a Gaussian distribution.
Originator: Michael Erb, Univ. of Southern California - April 2017
Parameters
----------
X : array
Values for the timeseries.
Returns
-------
Xn : array
Gaussianized timseries
"""
# Give every record at least one dimensions, or else the code will crash.
X = np.atleast_1d(X)
# Make a blank copy of the array, retaining the data type of the original data variable.
Xn = copy.deepcopy(X)
Xn[:] = np.NAN
if len(X.shape) == 1:
Xn = gaussianize_single(X)
else:
for i in range(X.shape[1]):
Xn[:, i] = gaussianize_single(X[:, i])
return Xn
def gaussianize_single(X_single):
""" Transforms a single (proxy) timeseries to Gaussian distribution.
Originator: Michael Erb, Univ. of Southern California - April 2017
Parameters
----------
X_single : 1D Array
A single timeseries
Returns
-------
Xn_single : Gaussianized values for a single timeseries.
"""
# Count only elements with data.
n = X_single[~np.isnan(X_single)].shape[0]
# Create a blank copy of the array.
Xn_single = copy.deepcopy(X_single)
Xn_single[:] = np.NAN
nz = np.logical_not(np.isnan(X_single))
index = np.argsort(X_single[nz])
rank = np.argsort(index)
CDF = 1.*(rank+1)/(1.*n) - 1./(2*n)
Xn_single[nz] = np.sqrt(2)*special.erfinv(2*CDF - 1)
return Xn_single
def detrend(y, x=None, method="emd", n=1, sg_kwargs=None):
"""Detrend a timeseries according to four methods
Detrending methods include, "linear", "constant", using a low-pass
Savitzky-Golay filter, and using Empirical Mode Decomposition (default).
Parameters
----------
y : array
The series to be detrended.
x : array
The time axis for the timeseries. Necessary for use with
the Savitzky-Golay filters method since the series should be evenly spaced.
method : str
The type of detrending:
- "linear": the result of a linear least-squares fit to y is subtracted from y.
- "constant": only the mean of data is subtracted.
- "savitzky-golay", y is filtered using the Savitzky-Golay filters and the resulting filtered series is subtracted from y.
- "emd" (default): Empirical mode decomposition. The last mode is assumed to be the trend and removed from the series
n : int
Works only if `method == 'emd'`. The number of smoothest modes to remove.
sg_kwargs : dict
The parameters for the Savitzky-Golay filters. see pyleoclim.utils.filter.savitzy_golay for details.
Returns
-------
ys : array
The detrended timeseries.
See also
--------
pylecolim.utils.filter.savitzky_golay : Filtering using Savitzy-Golay
pylecolim.utils.tsutils.preprocess : pre-processes a times series using standardization and detrending.
"""
y = np.array(y)
if x is not None:
x = np.array(x)
if method == "linear":
ys = signal.detrend(y,type='linear')
elif method == 'constant':
ys = signal.detrend(y,type='constant')
elif method == "savitzky-golay":
# Check that the timeseries is uneven and interpolate if needed
if x is None:
raise ValueError("A time axis is needed for use with the Savitzky-Golay filter method")
# Check whether the timeseries is unvenly-spaced and interpolate if needed
if len(np.unique(np.diff(x)))>1:
warnings.warn("Timeseries is not evenly-spaced, interpolating...")
x_interp, y_interp = interp(x,y,bounds_error=False,fill_value='extrapolate')
else:
x_interp = x
y_interp = y
sg_kwargs = {} if sg_kwargs is None else sg_kwargs.copy()
# Now filter
y_filt = savitzky_golay(y_interp,**sg_kwargs)
# Put it all back on the original x axis
y_filt_x = np.interp(x,x_interp,y_filt)
ys = y-y_filt_x
elif method == "emd":
imfs = EMD(y).decompose()
if np.shape(imfs)[0] == 1:
trend = np.zeros(np.size(y))
else:
# trend = imfs[-1]
trend = np.sum(imfs[-n:], axis=0) # remove the n smoothest modes
ys = y - trend
else:
raise KeyError('Not a valid detrending method')
return ys
def distance_neighbors(signal):
'''Finds Distance of each point in the timeseries from its 4 nearest neighbors
Parameters
----------
signal : array
The timeseries
Returns
-------
distances : array
Distance of each point from its nearest neighbors in decreasing order
'''
nn = NearestNeighbors(n_neighbors=4) # 4 nearest neighbors
nbrs =nn.fit(signal.reshape(-1,1))
distances, indices = nbrs.kneighbors(signal.reshape(-1,1))
distances = sorted(distances[:,-1],reverse=True)
return distances
def find_knee(distances):
'''Finds knee point automatically in a given array sorted in decreasing order
Parameters
----------
distances : array
Distance of each point in the timeseries from it's nearest neighbors in decreasing order
Returns
-------
knee : float
knee point in the array
'''
nPoints = len(distances)
allCoord = np.vstack((range(nPoints), distances)).T
np.array([range(nPoints), distances])
firstPoint = allCoord[0]
lineVec = allCoord[-1] - allCoord[0]
lineVecNorm = lineVec / np.sqrt(np.sum(lineVec**2))
vecFromFirst = allCoord - firstPoint
# scalarProduct = np.sum(vecFromFirst * np.matlib.repmat(lineVecNorm, nPoints, 1), axis=1)
scalarProduct = np.sum(vecFromFirst * np.tile(lineVecNorm, (nPoints, 1)), axis=1)
vecFromFirstParallel = np.outer(scalarProduct, lineVecNorm)
vecToLine = vecFromFirst - vecFromFirstParallel
distToLine = np.sqrt(np.sum(vecToLine ** 2, axis=1))
idxOfBestPoint = np.argmax(distToLine)
knee = distances[idxOfBestPoint]
return knee
def detect_outliers(ts, ys,auto=True, plot_knee=True,plot_outliers=True,
plot_outliers_kwargs=None,plot_knee_kwargs=None,
figsize=[10,4],saveknee_settings=None,
saveoutliers_settings=None,mute=False):
''' Function to detect outliers in the given timeseries
for more details, see: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
Parameters
----------
ts : array
time axis of time series
ys : array
y values of time series
plot : boolean
true by default, plots the outliers using a scatter plot
auto : boolean
true by default, if false the user manually selects the knee point
mute : bool, optional
if True, the plot will not show;
recommend to turn on when more modifications are going to be made on ax
(going to be deprecated)
plot_kwargs : dict
keyword arguments for ax.plot()
Returns
-------
outliers : array
a list of values consisting of outlier indices
See also
--------
pylecolim.utils.tsutils.distance_neighbors : Finds Distance of each point in the timeseries from its 4 nearest neighbors
pylecolim.utils.tsustils.find_knee : Finds knee point automatically in a given array sorted in decreasing order
pylecolim.utils.tsutils.remove_outliers : Removes outliers from a timeseries
'''
#Take care of arguments for the knee plot
saveknee_settings = {} if saveknee_settings is None else saveknee_settings.copy()
try:
minpts = math.log(len(ys))
distances = distance_neighbors(ys)
flag = all(v < 0.0001 for v in distances)
knee_point = find_knee(distances)
mark = distances.index(knee_point)
index = [i for i in range(len(distances))]
if auto == True:
db = DBSCAN(eps=knee_point, min_samples=minpts)
clusters = db.fit(ys.reshape(-1, 1))
cluster_labels = clusters.labels_
outliers = np.where(cluster_labels == -1)
if plot_knee==True:
fig1, ax1 = plt.subplots(figsize=figsize)
if flag == True:
knee_point = 0.1
ax1.annotate("knee={}".format(knee_point), (mark, knee_point),
arrowprops=dict(facecolor='black', shrink=0.05))
plot_xy(index, distances,xlabel='Indices',ylabel='Distances',plot_kwargs=plot_knee_kwargs,ax=ax1)
elif auto == False:
plot_xy(index, distances, xlabel='Indices', ylabel='Distances',plot_kwargs=plot_knee_kwargs)
eps = float(input('Enter the value for knee point'))
if plot_knee==True:
fig1,ax1 = plt.subplots(figsize=figsize)
ax1.annotate("knee={}".format(eps), (mark, knee_point),
arrowprops=dict(facecolor='black', shrink=0.05))
plot_xy(index, distances, xlabel='Indices', ylabel='Distances',plot_kwargs=plot_knee_kwargs,ax=ax1)
db = DBSCAN(eps=eps, min_samples=minpts)
clusters = db.fit(ys.reshape(-1, 1))
cluster_labels = clusters.labels_
outliers = np.where(cluster_labels == -1)
if 'fig1' in locals():
if 'path' in saveknee_settings:
savefig(fig1, settings=saveknee_settings)
# else:
# if not mute:
# showfig(fig1)
if plot_outliers==True:
x2 = ts[outliers]
y2 = ys[outliers]
plot_scatter_xy(ts,ys,x2,y2,figsize=figsize,xlabel='time',ylabel='value',savefig_settings=saveoutliers_settings,plot_kwargs=plot_outliers_kwargs, mute=mute)
return outliers
except ValueError:
choice = input('Switch to Auto Mode(y/n)?')
choice = choice.lower()
if choice == 'y':
a = detect_outliers(ts, ys, auto=True)
return a
else:
exit(1)
def remove_outliers(ts,ys,outlier_points):
''' Removes outliers from a timeseries
Parameters
----------
ts : array
x axis of timeseries
ys : array
y axis of timeseries
outlier_points : array
indices of outlier points
Returns
-------
ys : array
y axis of timeseries
ts : array
x axis of timeseries
See also
--------
pylecolim.utils.tsutils.detect_outliers : Function to detect outliers in the given timeseries
'''
ys = np.delete(ys,outlier_points)
ts = np.delete(ts,outlier_points)
return ys,ts
def eff_sample_size(y, detrend_flag=False):
'''
Effective Sample Size of timeseries y
Parameters
----------
y : float
1d array
detrend : boolean
if True (default), detrends y before estimation.
Returns
-------
neff : float
The effective sample size
Reference
---------
Thiébaux HJ, Zwiers FW. 1984. The interpretation and estimation of
effective sample sizes. Journal of Climate and Applied Meteorology 23: 800–811.
'''
if len(y) < 100:
fft = False
else:
fft = True
if detrend_flag:
yd = detrend(y)
else:
yd = y
n = len(y)
nl = math.floor(max(np.sqrt(n),10)) # rule of thumb for choosing number of lags
rho = sms.acf(yd,adjusted=True,fft=fft,nlags=nl) # compute autocorrelation function
kvec = np.arange(nl)
fac = (1-kvec/nl)*rho[1:]
neff = n/(1+2*np.sum(fac)) # Thiébaux & Zwiers 84, Eq 2.1
return neff
# alias
std = standardize
gauss = gaussianize
def preprocess(ys, ts, detrend=False, sg_kwargs=None,
gaussianize=False, standardize=True):
''' Return the processed time series using detrend and standardization.
Parameters
----------
ys : array
a time series
ts : array
The time axis for the timeseries. Necessary for use with
the Savitzky-Golay filters method since the series should be evenly spaced.
detrend : string
'none'/False/None - no detrending will be applied;
'linear' - a linear least-squares fit to `ys` is subtracted;
'constant' - the mean of `ys` is subtracted
'savitzy-golay' - ys is filtered using the Savitzky-Golay filters and the resulting filtered series is subtracted from y.
sg_kwargs : dict
The parameters for the Savitzky-Golay filters. see pyleoclim.utils.filter.savitzy_golay for details.
gaussianize : bool
If True, gaussianizes the timeseries
standardize : bool
If True, standardizes the timeseries
Returns
-------
res : array
the processed time series
See also
--------
pyleoclim.utils.filter.savitzy_golay : Filtering using Savitzy-Golay
'''
if detrend == 'none' or detrend is False or detrend is None:
ys_d = ys
else:
ys_d = detrend(ys, ts, method=detrend, sg_kwargs=sg_kwargs)
if standardize:
res, _, _ = std(ys_d)
else:
res = ys_d
if gaussianize:
res = gauss(res)
return res
| LinkedEarth/Pyleoclim_util | pyleoclim/utils/tsutils.py | Python | gpl-3.0 | 32,133 | [
"Gaussian"
] | f467d47cee78937a8e1e050dc3ca649247fc92dc3ba44f4514b7291e1fdd63ed |
from nose.tools import assert_equal
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
from probfit.nputil import mid
from probfit.pdf import crystalball, gaussian
from probfit.functor import Normalized
from probfit.toy import gen_toy
from probfit._libstat import compute_chi2
from probfit.nputil import vector_apply
from probfit.costfunc import BinnedLH
def test_gen_toy():
np.random.seed(0)
bound = (-1, 2)
ntoy = 100000
toy = gen_toy(crystalball, ntoy, bound=bound,
alpha=1., n=2., mean=1., sigma=0.3, quiet=False)
assert_equal(len(toy), ntoy)
htoy, bins = np.histogram(toy, bins=1000, range=bound)
ncball = Normalized(crystalball, bound)
f = lambda x: ncball(x, 1., 2., 1., 0.3)
expected = vector_apply(f, mid(bins)) * ntoy * (bins[1] - bins[0])
# print htoy[:100]
# print expected[:100]
htoy = htoy * 1.0
err = np.sqrt(expected)
chi2 = compute_chi2(htoy, expected, err)
print chi2, len(bins), chi2 / len(bins)
assert(0.9 < (chi2 / len(bins)) < 1.1)
def test_gen_toy2():
pdf = gaussian
np.random.seed(0)
toy = gen_toy(pdf, 10000, (-5, 5), mean=0, sigma=1)
binlh = BinnedLH(pdf, toy, bound=(-5, 5), bins=100)
lh = binlh(0., 1.)
for x in toy:
assert(x < 5)
assert(x >= -5)
assert_equal(len(toy), 10000)
assert(lh / 100. < 1.)
| mtresch/probfit | test/testtoy.py | Python | mit | 1,379 | [
"Gaussian"
] | bd7785b8a37a02c4e0385a8630be2495c1da4962e8736352e9284e4ff4586e37 |
import random
import numpy as np
class Robot(object):
def __init__(self, length=0.5):
# creates robot and initializes location/orientation to 0, 0, 0
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.measurement_noise = 0.0
self.num_collisions = 0
self.num_steps = 0
self.steering_drift = 0.0
def set(self, new_x, new_y, new_orientation):
# sets a robot coordinate
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation) % (2.0 * np.pi)
def set_noise(self, new_s_noise, new_d_noise, new_m_noise=0.0):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
self.measurement_noise = float(new_m_noise)
def check_collision(self, grid):
# checks of the robot pose collides with an obstacle, or
# is too far outside the plane
for i in range(len(grid)):
for j in range(len(grid[0])):
"""
whenever grid cell is blocked
find the distance between grid cell position and robot position
if distance is small it means robot is about to collide
"""
if grid[i][j] == 1:
dist = np.sqrt((self.x - float(i)) ** 2 + (self.y - float(j)) ** 2)
if dist < 0.5:
self.num_collisions += 1
return False
return True
def check_goal(self, goal, threshold=1.0):
dist = np.sqrt((float(goal[0]) - self.x) ** 2 + (float(goal[1]) - self.y) ** 2)
"""
find distance between robot location and goal location
if distance is small it means robot has reached the goal
"""
return dist < threshold
def set_steering_drift(self, drift):
# Sets the systematical steering drift parameter
self.steering_drift = drift
def move(self, grid, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
# steering = front wheel steering angle, limited by max_steering_angle
# distance = total distance driven, most be non-negative
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# make a new copy
res = Robot()
res.length = self.length
res.steering_noise = self.steering_noise
res.distance_noise = self.distance_noise
res.measurement_noise = self.measurement_noise
res.num_collisions = self.num_collisions
res.num_steps = self.num_steps + 1
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# Execute motion
"""
y = mx where m = tan(steering)
"""
turn = np.tan(steering2) * distance2 / res.length
if abs(turn) < tolerance:
# approximate by straight line motion
res.x = self.x + (distance2 * np.cos(self.orientation))
res.y = self.y + (distance2 * np.sin(self.orientation))
res.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (np.sin(self.orientation) * radius)
cy = self.y + (np.cos(self.orientation) * radius)
res.orientation = (self.orientation + turn) % (2.0 * np.pi)
res.x = cx + (np.sin(res.orientation) * radius)
res.y = cy - (np.cos(res.orientation) * radius)
# check for collision
# res.check_collision(grid)
return res
def move_simple(self, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering = front wheel steering angle, limited by max_steering_angle
distance = total distance driven, most be non-negative
"""
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# make a new copy
# res = Robot()
# res.length = self.length
# res.steering_noise = self.steering_noise
# res.distance_noise = self.distance_noise
# res.steering_drift = self.steering_drift
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# apply steering drift
steering2 += self.steering_drift
# Execute motion
turn = np.tan(steering2) * distance2 / self.length
if abs(turn) < tolerance:
# approximate by straight line motion
self.x += distance2 * np.cos(self.orientation)
self.y += distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (np.sin(self.orientation) * radius)
cy = self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x = cx + (np.sin(self.orientation) * radius)
self.y = cy - (np.cos(self.orientation) * radius)
def sense(self):
return [random.gauss(self.x, self.measurement_noise),
random.gauss(self.y, self.measurement_noise)]
def measurement_prob(self, measurement):
# computes the probability of a measurement
# compute errors
error_x = measurement[0] - self.x
error_y = measurement[1] - self.y
# calculate Gaussian
error = np.exp(- (error_x ** 2) / (self.measurement_noise ** 2) / 2.0) / np.sqrt(
2.0 * np.pi * (self.measurement_noise ** 2))
error *= np.exp(- (error_y ** 2) / (self.measurement_noise ** 2) / 2.0) / np.sqrt(
2.0 * np.pi * (self.measurement_noise ** 2))
return error
def cte(self, radius):
"""
NOT USED IN SLAM
this is the method which is used to provide the cross track error for a race track.
previously we were using y as a cte but in real world we have to use other parameters
for the cte instead of only y. here we now use radius as a parameter
a rectangle shaped race track is under consideration when applying logic below
"""
cte = 0.0
# case - 1:
# robot is in 1st region where x is less than radius.
# we find the error using distance formula
if self.x < radius:
cte += np.sqrt((self.x - radius) ** 2 + (self.y - radius) ** 2)
cte -= radius
# case - 2:
# when robot is outside of boundary i.e. outside of 3*radius
# we find the error using distance formula but taking care of shifting x
# distance from y doesnt have any effect.
elif self.x > 3 * radius:
cte += np.sqrt((self.x - 3 * radius) ** 2 + (self.y - radius) ** 2)
cte -= radius
# case - 3:
# robot is in upper region from center (inside or outside of the boundary)
# in this case, error is y - diameter
# we subtract from diameter because is measure from origin and not from center
# -ve --> within boundary and above center
# +ve --> outside of the upper boundary
elif self.y > radius:
cte = self.y - 2 * radius
# case - 4:
# robot in lower region from center (inside or outside of the boundary)
# in this case, error is origin - y
# -ve --> within boundary and below center
# +ve --> outside of the lower boundary
else:
cte -= self.y
return cte
def __repr__(self):
# return '[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
return '[ {:.5f}, {:.5f} ]'.format(self.x, self.y)
| aurangzaib/robotics-ai | class_robot.py | Python | gpl-3.0 | 8,465 | [
"Gaussian"
] | d4fc248c8631abed5b360b7059e227504afb6dd9c2fceed70367698f45a1bb00 |
#!/usr/bin/python
"""
demoish.py
Let's think of the interactive shell prompt roughly as a state machine.
Inputs:
- Enter a line that finishes a command
- Enter a line that's incomplete.
- Hitting TAB to complete
- which display multiple candidates or fill in a single candidate
- Ctrl-C to cancel a COMMAND in progress.
- Ctrl-C to cancel a COMPLETION in progress, for slow completions.
- NOTE: if there are blocking NFS calls, completion should go in a
thread/process?
- EOF: Ctrl-D on an EMPTY line.
(Ctrl-D on a non-empty line behaves just like hitting enter)
- SIGWINCH: Terminal width change.
Actions:
- Display completions, which depends on the terminal width.
- Display a 1-line message showing lack of completions ('no variables that
begin with $')
- Execute a command
- Clear N lines below the prompt (must happen frequently)
- Exit the shell
State:
- The terminal width. Changes dynamically.
- The prompt: PS1 or PS2. (Or could save/restore here?)
- The number of lines to clear next. EraseLines() uses this.
- The completion that is in progress. The 'compopt' builtin affects this.
- The number of times you have requested the same completion (to show more
lines)
UI:
- Explanatory message when there's no completion
- Progress when there's a slow completion (over 200 ms)
- Empty input "moves" the prompt down a line
- Flag help displayed in yellow
- Line can be bold. Although we might want syntax highlighting for $foo
and so forth. "$foo" vs. '$foo' is useful.
LATER:
- Could have a caching decorator, because we recompute candidates every time.
For $PATH entries?
- experiment with ordering? You would have to disable readline sorting:
Variable: int rl_sort_completion_matches
If an application sets this variable to 0, Readline will not sort the list of
completions (which implies that it cannot remove any duplicate completions).
The default value is 1, which means that Readline will sort the completions
and, depending on the value of rl_ignore_completion_duplicates, will attempt
to remove duplicate matches.
"""
from __future__ import print_function
import optparse
import os
import readline
import signal
import sys
import time
# Only for prompt rendering.
import getpass
import pwd
import socket
import comp_ui
log = comp_ui.log
debug_log = comp_ui.debug_log
# Prompt style
_RIGHT = '_RIGHT'
_OSH = '_OSH'
def GetHomeDir():
"""Get the user's home directory from the /etc/passwd.
Used by $HOME initialization in osh/state.py. Tilde expansion and readline
initialization use mem.GetVar('HOME').
"""
uid = os.getuid()
try:
e = pwd.getpwuid(uid)
except KeyError:
return None
else:
return e.pw_dir
_HOME_DIR = GetHomeDir()
class WordsAction(object):
"""Yield a fixed list of completion candidates."""
def __init__(self, words, delay=None):
self.words = words
self.delay = delay
def Matches(self, prefix):
for w in self.words:
if w.startswith(prefix):
if self.delay is not None:
time.sleep(self.delay)
yield w
class FileSystemAction(object):
"""Complete paths from the file system.
Directories will have a / suffix.
Copied from core/completion.py in Oil.
"""
def __init__(self, dirs_only=False, exec_only=False, add_slash=False):
self.dirs_only = dirs_only
self.exec_only = exec_only
# This is for redirects, not for UserSpec, which should respect compopt -o
# filenames.
self.add_slash = add_slash # for directories
def Matches(self, to_complete):
#log('fs %r', to_complete)
i = to_complete.rfind('/')
if i == -1: # it looks like 'foo'
to_list = '.'
base = ''
elif i == 0: # it's an absolute path to_complete like / or /b
to_list = '/'
base = '/'
else:
to_list = to_complete[:i]
base = to_list
#log('to_list %r', to_list)
try:
names = os.listdir(to_list)
except OSError as e:
return # nothing
for name in names:
path = os.path.join(base, name)
if path.startswith(to_complete):
if self.exec_only:
# TODO: Handle exception if file gets deleted in between listing and
# check?
if not os.access(path, os.X_OK):
continue
if self.add_slash and os.path.isdir(path):
yield path + '/'
else:
yield path
_FS_ACTION = FileSystemAction(add_slash=True)
class FlagsHelpAction(object):
"""Yield flags and their help.
Return a list of TODO: This API can't be expressed in shell itself. How do
zsh and fish do it?
"""
def __init__(self, flags):
self.flags = flags # a list of tuples
def Matches(self, prefix):
for flag, desc in self.flags:
if flag.startswith(prefix):
yield flag, desc
class FlagsAndFileSystemAction(object):
"""Complete flags if the word starts with '-', otherwise files.
This is basically what _longopt in bash-completion does.
"""
def __init__(self, flags_action, fs_action):
self.flags_action = flags_action
self.fs_action = fs_action
def Matches(self, prefix):
if prefix.startswith('-'):
for m in self.flags_action.Matches(prefix):
yield m
else:
for m in self.fs_action.Matches(prefix):
yield m
def JoinLinesOfCommand(pending_lines):
last_line_pos = 0
parts = []
for line in pending_lines:
if line.endswith('\\\n'):
line = line[:-2]
last_line_pos += len(line)
parts.append(line)
cmd = ''.join(parts)
return cmd, last_line_pos
def MakeCompletionRequest(lines):
"""Returns a 4-tuple or an error code.
Returns:
first: The first word, or None if we're completing the first word itself
to_complete: word to complete
prefix: string
prefix_pos: integer
Cases we CAN complete:
echo foo \
f<TAB>
echo foo \
bar f<TAB>
Cases we CAN'T complete:
ec\
h<TAB> # complete 'o' ?
echo f\
o<TAB> # complete 'o' ?
"""
#log('pending_lines %s', pending_lines)
# first word can't be split over multiple lines
if len(lines) > 1 and ' ' not in lines[0]:
return -1
partial_cmd, last_line_pos = JoinLinesOfCommand(lines)
# the first word if we're completing an arg, or None if we're completing the
# first word itself
first = None
cmd_last_space_pos = partial_cmd.rfind(' ')
if cmd_last_space_pos == -1: # FIRST WORD state, no prefix
prefix_pos = 0
to_complete = partial_cmd
prefix = ''
else: # Completing an argument, may be on any line
# Find the first word with the left-most space. (Not the right-most space
# above).
j = partial_cmd.find(' ')
assert j != -1
first = partial_cmd[:j]
# The space has to be on the current line, or be the last char on the
# previous line before the line continuation. Otherwise we can't complete
# anything.
if cmd_last_space_pos < last_line_pos-1:
return -2
last_line = lines[-1]
line_space_pos = last_line.rfind(' ')
if line_space_pos == -1: # space is on previous line
prefix_pos = 0 # complete all of this line
else:
prefix_pos = line_space_pos + 1
#log('space_pos = %d, last_line_pos = %d', line_space_pos, last_line_pos)
to_complete = last_line[prefix_pos:]
prefix = last_line[:prefix_pos]
#log('X partial_cmd %r', partial_cmd, file=DEBUG_F)
#log('X to_complete %r', to_complete, file=DEBUG_F)
unquoted = ShellUnquote(to_complete)
return first, unquoted, prefix, prefix_pos
def ShellUnquote(s):
# This is an approximation. In OSH we'll use the
# CompletionWordEvaluator.
result = []
for ch in s:
if ch != '\\':
result.append(ch)
return ''.join(result)
def ShellQuote(s):
# TODO: Use regex replace.
# & ; ( also need replacing. And { in case you have a file
# {foo,bar}
# And ! for history.
return s.replace(
' ', '\\ ').replace(
'$', '\\$').replace(
';', '\\;').replace(
'|', '\\|')
class RootCompleter(object):
"""Dispatch to multiple completers."""
def __init__(self, reader, display, comp_lookup, comp_state):
"""
Args:
reader: for completing the entire command, not just one line
comp_lookup: Dispatch to completion logic for different commands
comp_state: fields are added here for Display
"""
self.reader = reader
self.display = display
self.comp_lookup = comp_lookup
self.comp_state = comp_state
def Matches(self, comp):
line = comp['line']
self.comp_state['ORIG'] = line
#log('lines %s', self.reader.pending_lines, file=DEBUG_F)
lines = list(self.reader.pending_lines)
lines.append(line)
result = MakeCompletionRequest(lines)
if result == -1:
self.display.PrintOptional("(can't complete first word spanning lines)")
return
if result == -2:
self.display.PrintOptional("(can't complete last word spanning lines)")
return
# We have to add on prefix before sending it completer. And then
first, to_complete, prefix, prefix_pos = result
# For the Display callback to look at
self.comp_state['prefix_pos'] = prefix_pos
# Reset this at the beginning of each completion.
# Is there any way to avoid creating a duplicate dictionary each time?
# I think every completer could have an optional PAYLOAD.
# Yes that is better.
# And maybe you can yield the original 'c' too, without prefix and ' '.
self.comp_state['DESC'] = {}
if first:
completer = self.comp_lookup.get(first, _FS_ACTION)
else:
completer = self.comp_lookup['__first']
#log('to_complete: %r', to_complete, file=DEBUG_F)
i = 0
start_time = time.time()
for match in completer.Matches(to_complete):
if isinstance(match, tuple):
flag, desc = match # hack
if flag.endswith('='): # Hack for --color=auto
rl_match = flag
else:
rl_match = flag + ' '
self.comp_state['DESC'][rl_match] = desc # save it for later
else:
match = ShellQuote(match)
if match.endswith('/'): # Hack for directories
rl_match = match
else:
rl_match = match + ' '
yield prefix + rl_match
# TODO: avoid calling time() so much?
elapsed_ms = (time.time() - start_time) * 1000
# NOTES:
# - Ctrl-C works here! You only get the first 5 candidates.
# - These progress messages will not help if the file system hangs! We
# might want to run "adversarial completions" in a separate process?
i += 1
if elapsed_ms > 200:
plural = '' if i == 1 else 'es'
self.display.PrintOptional(
'... %d match%s for %r in %d ms (Ctrl-C to cancel)', i,
plural, line, elapsed_ms)
if i == 0:
self.display.PrintRequired('(no matches for %r)', line)
class CompletionCallback(object):
"""Registered with the readline library and called for completions."""
def __init__(self, root_comp):
self.root_comp = root_comp
self.iter = None
def Call(self, word_prefix, state):
"""Generate completions."""
if state == 0: # initial completion
orig_line = readline.get_line_buffer()
#begin = readline.get_begidx()
end = readline.get_endidx()
comp = {'line': orig_line[:end]}
#debug_log('line %r', orig_line)
#debug_log('begidx %d', begin)
#debug_log('endidx %d', end)
self.iter = self.root_comp.Matches(comp)
try:
c = self.iter.next()
except StopIteration:
c = None
return c
def __call__(self, word_prefix, state):
try:
return self.Call(word_prefix, state)
except Exception as e:
# Readline swallows exceptions!
print(e)
raise
def DoNothing(unused1, unused2):
pass
class PromptEvaluator(object):
"""Evaluate the prompt and give it a certain style."""
def __init__(self, style):
"""
Args:
style: _RIGHT, _BOLD, _UNDERLINE, _REVERSE or _OSH
"""
self.style = style
def Eval(self, template):
p = template
p = p.replace('\u', getpass.getuser())
p = p.replace('\h', socket.gethostname())
cwd = os.getcwd().replace(_HOME_DIR, '~') # Hack
p = p.replace('\w', cwd)
prompt_len = len(p)
right_prompt_str = None
if self.style == _RIGHT:
right_prompt_str = p
p2 = comp_ui.PROMPT_BOLD + ': ' + comp_ui.PROMPT_RESET
prompt_len = 2
elif 0:
#elif self.style == _BOLD: # Make it bold and add '$ '
p2 = comp_ui.PROMPT_BOLD + p + '$ ' + comp_ui.PROMPT_RESET
prompt_len += 2
elif 0:
#elif self.style == _UNDERLINE:
# Don't underline the space
p2 = comp_ui.PROMPT_UNDERLINE + p + comp_ui.PROMPT_RESET + ' '
prompt_len += 1
elif 0:
#elif self.style == _REVERSE:
p2 = comp_ui.PROMPT_REVERSE + ' ' + p + ' ' + comp_ui.PROMPT_RESET + ' '
prompt_len += 3
elif self.style == _OSH:
p2 = p + '$ ' # emulate bash style
prompt_len += 2
else:
raise AssertionError
return p2, prompt_len, right_prompt_str
class InteractiveLineReader(object):
"""Simplified version of OSH prompt.
Holds PS1 / PS2 state.
"""
def __init__(self, ps1, ps2, prompt_eval, display, bold_line=False,
erase_empty=0):
self.ps1 = ps1
self.ps2 = ps2
self.prompt_eval = prompt_eval
self.display = display
self.bold_line = bold_line
self.erase_empty = erase_empty
self.prompt_str = ''
self.right_prompt = ''
self.pending_lines = [] # for completion to use
self.Reset() # initialize self.prompt_str
# https://stackoverflow.com/questions/22916783/reset-python-sigint-to-default-signal-handler
self.orig_handler = signal.getsignal(signal.SIGINT)
self.last_prompt_len = 0
#log('%s', self.orig_handler)
def GetLine(self):
signal.signal(signal.SIGINT, self.orig_handler) # raise KeyboardInterrupt
p = self.prompt_str
if self.bold_line:
p += comp_ui.PROMPT_BOLD
if self.right_prompt_str: # only for PS1
self.display.ShowPromptOnRight(self.right_prompt_str)
try:
line = raw_input(p) + '\n' # newline required
except KeyboardInterrupt:
print('^C')
line = -1
except EOFError:
print('^D') # bash prints 'exit'; mksh prints ^D.
line = -2
else:
self.pending_lines.append(line)
finally:
# Ignore it usually, so we don't get KeyboardInterrupt in weird places.
# NOTE: This can't be SIG_IGN, because that affects the child process.
signal.signal(signal.SIGINT, DoNothing)
# Nice trick to remove repeated prompts.
if line == '\n':
if self.erase_empty == 0:
pass
elif self.erase_empty == 1:
# Go up one line and erase the whole line
sys.stdout.write('\x1b[1A\x1b[2K\n')
sys.stdout.flush()
elif self.erase_empty == 2:
sys.stdout.write('\x1b[1A\x1b[2K')
sys.stdout.write('\x1b[1A\x1b[2K')
sys.stdout.write('\n') # go down one line
sys.stdout.flush()
else:
raise AssertionError(self.erase_empty)
self.prompt_str = self.ps2
self.right_prompt_str = None
self.display.SetPromptLength(len(self.ps2))
return line
def Reset(self):
self.prompt_str, prompt_len, self.right_prompt_str = (
self.prompt_eval.Eval(self.ps1))
self.display.SetPromptLength(prompt_len)
del self.pending_lines[:]
def CurrentRenderedPrompt(self):
"""For BasicDisplay to reprint the prompt."""
return self.prompt_str
def MainLoop(reader, display):
while True:
# TODO: Catch KeyboardInterrupt and EOFError here.
line = reader.GetLine()
# Erase lines before execution, displaying PS2, or exit!
display.EraseLines()
if line == -1: # Ctrl-C
display.Reset()
reader.Reset()
continue
#log('got %r', line)
if line == -2: # EOF
break
if line.endswith('\\\n'):
continue
if line.startswith('cd '):
try:
dest = line.strip().split(None, 1)[1]
except IndexError:
log('cd: dir required')
else:
try:
os.chdir(dest)
except OSError as e:
log('cd: %s', e)
display.Reset()
reader.Reset()
continue
# Take multiple lines from the reader, simulating the OSH parser.
cmd, _ = JoinLinesOfCommand(reader.pending_lines)
os.system(cmd)
display.Reset()
reader.Reset()
_MORE_COMMANDS = [
'cd', 'echo', 'sleep', 'clear', 'slowc', 'many', 'toomany'
]
ECHO_WORDS = [
'zz', 'foo', 'bar', 'baz', 'spam', 'eggs', 'python', 'perl', 'pearl',
# To simulate filenames with spaces
'two words', 'three words here',
]
def LoadFlags(path):
flags = []
with open(path) as f:
for line in f:
try:
flag, desc = line.split(None, 1)
desc = desc.strip()
except ValueError:
#log('Error: %r', line)
#raise
flag = line.strip()
desc = None
# TODO: do something with the description
flags.append((flag, desc))
return flags
_PS1 = '[demoish] \u@\h \w'
def main(argv):
p = optparse.OptionParser(__doc__, version='snip 0.1')
p.add_option(
'--flag-dir', dest='flag_dir', default=None,
help='Directory with flags definitions')
p.add_option(
'--style', dest='style', default='osh',
help='Style of prompt')
opts, _ = p.parse_args(argv[1:])
_, term_width = comp_ui.GetTerminalSize()
fmt = '%' + str(term_width) + 's'
#msg = "[Oil 0.6.pre11] Type 'help' or visit https://oilshell.org/help/ "
msg = "Type 'help' or visit https://oilshell.org/ for help"
print(fmt % msg)
print('')
# Used to store the original line, flag descriptions, etc.
comp_state = {}
if opts.style == 'bare':
display = comp_ui.BasicDisplay(comp_state)
prompt = PromptEvaluator(_OSH)
reader = InteractiveLineReader(_PS1, '> ', prompt, display,
bold_line=False)
display.SetReader(reader) # needed to re-print prompt
elif opts.style == 'osh':
display = comp_ui.NiceDisplay(comp_state, bold_line=True)
prompt = PromptEvaluator(_OSH)
reader = InteractiveLineReader(_PS1, '> ', prompt, display,
bold_line=True, erase_empty=1)
elif opts.style == 'oil':
display = comp_ui.NiceDisplay(comp_state, bold_line=True)
# Oil has reverse video on the right. It's also bold, and may be syntax
# highlighted later.
prompt = PromptEvaluator(_RIGHT)
reader = InteractiveLineReader(_PS1, '| ', prompt, display,
bold_line=True, erase_empty=2)
else:
raise RuntimeError('Invalid style %r' % opts.style)
# Register a callback to receive terminal width changes.
signal.signal(signal.SIGWINCH, lambda x, y: display.OnWindowChange())
comp_lookup = {
'echo': WordsAction(ECHO_WORDS),
'slowc': WordsAction([str(i) for i in xrange(20)], delay=0.1),
'many': WordsAction(['--flag%d' % i for i in xrange(50)]),
'toomany': WordsAction(['--too%d' % i for i in xrange(1000)]),
}
commands = []
if opts.flag_dir:
for cmd in os.listdir(opts.flag_dir):
path = os.path.join(opts.flag_dir, cmd)
flags = LoadFlags(path)
fl = FlagsHelpAction(flags)
comp_lookup[cmd] = FlagsAndFileSystemAction(fl, _FS_ACTION)
commands.append(cmd)
comp_lookup['__first'] = WordsAction(commands + _MORE_COMMANDS)
# Register a callback to generate completion candidates.
root_comp = RootCompleter(reader, display, comp_lookup, comp_state)
readline.set_completer(CompletionCallback(root_comp))
# We want to parse the line ourselves, rather than use readline's naive
# delimiter-based tokenization.
readline.set_completer_delims('')
# Register a callback to display completions.
# NOTE: If we don't register a hook, readline will print the ENTIRE command
# line completed, not just the word.
# NOTE: Is this style hard to compile? Maybe have to expand the args
# literally.
readline.set_completion_display_matches_hook(
lambda *args: display.PrintCandidates(*args)
)
readline.parse_and_bind('tab: complete')
MainLoop(reader, display)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| oilshell/blog-code | interactive-shell/demoish.py | Python | apache-2.0 | 20,494 | [
"VisIt"
] | e944fc3debe4f40c7df53e10b07c9b15777cc64d3afc920671e64b4888c8ab5e |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sessions2trash.py
Run this script in a web2py environment shell e.g. python web2py.py -S app
If models are loaded (-M option) auth.settings.expiration is assumed
for sessions without an expiration. If models are not loaded, sessions older
than 60 minutes are removed. Use the --expiration option to override these
values.
Typical usage:
# Delete expired sessions every 5 minutes
nohup python web2py.py -S app -M -R scripts/sessions2trash.py &
# Delete sessions older than 60 minutes regardless of expiration,
# with verbose output, then exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 3600 -f -v
# Delete all sessions regardless of expiry and exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 0
"""
from gluon.storage import Storage
from optparse import OptionParser
import cPickle
import datetime
import os
import stat
import time
EXPIRATION_MINUTES = 60
SLEEP_MINUTES = 5
VERSION = 0.3
class SessionSet(object):
"""Class representing a set of sessions"""
def __init__(self, expiration, force, verbose):
self.expiration = expiration
self.force = force
self.verbose = verbose
def get(self):
"""Get session files/records."""
raise NotImplementedError
def trash(self):
"""Trash expired sessions."""
now = datetime.datetime.now()
for item in self.get():
status = 'OK'
last_visit = item.last_visit_default()
try:
session = item.get()
if session.auth:
if session.auth.expiration and not self.force:
self.expiration = session.auth.expiration
if session.auth.last_visit:
last_visit = session.auth.last_visit
except:
pass
age = 0
if last_visit:
age = total_seconds(now - last_visit)
if age > self.expiration or not self.expiration:
item.delete()
status = 'trashed'
if self.verbose > 1:
print 'key: %s' % str(item)
print 'expiration: %s seconds' % self.expiration
print 'last visit: %s' % str(last_visit)
print 'age: %s seconds' % age
print 'status: %s' % status
print ''
elif self.verbose > 0:
print('%s %s' % (str(item), status))
class SessionSetDb(SessionSet):
"""Class representing a set of sessions stored in database"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionDb instances for existing sessions."""
sessions = []
tablename = 'web2py_session'
if request.application:
tablename = 'web2py_session_' + request.application
if tablename in db:
for row in db(db[tablename].id > 0).select():
sessions.append(SessionDb(row))
return sessions
class SessionSetFiles(SessionSet):
"""Class representing a set of sessions stored in flat files"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionFile instances for existing sessions."""
path = os.path.join(request.folder, 'sessions')
return [SessionFile(os.path.join(path, x)) for x in os.listdir(path)]
class SessionDb(object):
"""Class representing a single session stored in database"""
def __init__(self, row):
self.row = row
def delete(self):
self.row.delete_record()
db.commit()
def get(self):
session = Storage()
session.update(cPickle.loads(self.row.session_data))
return session
def last_visit_default(self):
return self.row.modified_datetime
def __str__(self):
return self.row.unique_key
class SessionFile(object):
"""Class representing a single session stored as a flat file"""
def __init__(self, filename):
self.filename = filename
def delete(self):
os.unlink(self.filename)
def get(self):
session = Storage()
with open(self.filename, 'rb+') as f:
session.update(cPickle.load(f))
return session
def last_visit_default(self):
return datetime.datetime.fromtimestamp(
os.stat(self.filename)[stat.ST_MTIME])
def __str__(self):
return self.filename
def total_seconds(delta):
"""
Adapted from Python 2.7's timedelta.total_seconds() method.
Args:
delta: datetime.timedelta instance.
"""
return (delta.microseconds + (delta.seconds + (delta.days * 24 * 3600)) * \
10 ** 6) / 10 ** 6
def main():
"""Main processing."""
usage = '%prog [options]' + '\nVersion: %s' % VERSION
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force',
action='store_true', dest='force', default=False,
help=('Ignore session expiration. '
'Force expiry based on -x option or auth.settings.expiration.')
)
parser.add_option('-o', '--once',
action='store_true', dest='once', default=False,
help='Delete sessions, then exit.',
)
parser.add_option('-s', '--sleep',
dest='sleep', default=SLEEP_MINUTES * 60, type="int",
help='Number of seconds to sleep between executions. Default 300.',
)
parser.add_option('-v', '--verbose',
default=0, action='count',
help="print verbose output, a second -v increases verbosity")
parser.add_option('-x', '--expiration',
dest='expiration', default=None, type="int",
help='Expiration value for sessions without expiration (in seconds)',
)
(options, unused_args) = parser.parse_args()
expiration = options.expiration
if expiration is None:
try:
expiration = auth.settings.expiration
except:
expiration = EXPIRATION_MINUTES * 60
set_db = SessionSetDb(expiration, options.force, options.verbose)
set_files = SessionSetFiles(expiration, options.force, options.verbose)
while True:
set_db.trash()
set_files.trash()
if options.once:
break
else:
if options.verbose:
print 'Sleeping %s seconds' % (options.sleep)
time.sleep(options.sleep)
main()
| SEA000/uw-empathica | empathica/scripts/sessions2trash.py | Python | mit | 6,636 | [
"VisIt"
] | e1f9472ce3301b4106a12d95cab11223fd86d1cb4050c83c5f5fd2228c37b49c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from compliance_checker.suite import CheckSuite
from netCDF4 import Dataset
from tempfile import gettempdir
from compliance_checker.cf import util
from compliance_checker.tests.resources import STATIC_FILES
from compliance_checker.tests import BaseTestCase
import pytest
import os
class TestCFIntegration(BaseTestCase):
def setUp(self):
'''
Initialize the dataset
'''
self.cs = CheckSuite()
self.cs.load_all_available_checkers()
# get current std names table version (it changes)
self._std_names = util.StandardNameTable()
# --------------------------------------------------------------------------------
# Helper Methods
# --------------------------------------------------------------------------------
def new_nc_file(self):
'''
Make a new temporary netCDF file for the scope of the test
'''
nc_file_path = os.path.join(gettempdir(), 'example.nc')
if os.path.exists(nc_file_path):
raise IOError('File Exists: %s' % nc_file_path)
nc = Dataset(nc_file_path, 'w')
self.addCleanup(os.remove, nc_file_path)
self.addCleanup(nc.close)
return nc
def load_dataset(self, nc_dataset):
'''
Return a loaded NC Dataset for the given path
'''
if not isinstance(nc_dataset, str):
raise ValueError("nc_dataset should be a string")
nc_dataset = Dataset(nc_dataset, 'r')
self.addCleanup(nc_dataset.close)
return nc_dataset
def get_results(self, check_results):
'''
Returns a tuple of the value scored, possible, and a list of messages
in the result set.
'''
aggregation = self.cs.build_structure('cf', check_results['cf'][0], 'test', 1)
out_of = 0
scored = 0
results = aggregation['all_priorities']
for r in results:
if isinstance(r.value, tuple):
out_of += r.value[1]
scored += r.value[0]
else:
out_of += 1
scored += int(r.value)
# Store the messages
messages = []
for r in results:
messages.extend(r.msgs)
return scored, out_of, messages
def test_sldmb_43093_agg(self):
dataset = self.load_dataset(STATIC_FILES['sldmb_43093_agg'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 7
msgs = [
u'attribute time:_CoordianteAxisType should begin with a letter and be composed of letters, digits, and underscores',
u'attribute lat:_CoordianteAxisType should begin with a letter and be composed of letters, digits, and underscores',
u'attribute lon:_CoordianteAxisType should begin with a letter and be composed of letters, digits, and underscores',
u'§2.6.2 global attribute history should exist and be a non-empty string',
u'standard_name temperature is not defined in Standard Name Table v49',
u"temperature's auxiliary coordinate specified by the coordinates attribute, precise_lat, is not a variable in this dataset",
u"temperature's auxiliary coordinate specified by the coordinates attribute, precise_lon, is not a variable in this dataset"
]
assert all(m in messages for m in msgs)
@pytest.mark.slowtest
def test_ocos(self):
dataset = self.load_dataset(STATIC_FILES['ocos'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert len(messages) == 63
msgs = [
u"zeta's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_rho, xi_rho",
u"ubar's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_u, xi_u",
u"vbar's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_v, xi_v",
u"u's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_rho, eta_u, xi_u",
u"v's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_rho, eta_v, xi_v",
u"w's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_w, eta_rho, xi_rho",
u"temp's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_rho, eta_rho, xi_rho",
u"salt's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_rho, eta_rho, xi_rho",
u"AKv's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_w, eta_rho, xi_rho",
u"AKt's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_w, eta_rho, xi_rho",
u"AKs's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_w, eta_rho, xi_rho",
u"tke's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, s_w, eta_rho, xi_rho",
u"shflux's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_rho, xi_rho",
u"latent's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_rho, xi_rho",
u"sensible's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_rho, xi_rho",
u"lwrad's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_rho, xi_rho",
u"swrad's dimensions are not in the recommended order T, Z, Y, X. They are ocean_time, eta_rho, xi_rho",
u'§2.6.1 Conventions global attribute does not contain "CF-1.6". The CF Checker only supports CF-1.6 at this time.',
u"units (None) attribute of 's_w' must be a string compatible with UDUNITS",
u"units (None) attribute of 's_rho' must be a string compatible with UDUNITS",
u"units (None) attribute of 'Cs_w' must be a string compatible with UDUNITS",
u"units (None) attribute of 'user' must be a string compatible with UDUNITS",
u"units (None) attribute of 'Cs_r' must be a string compatible with UDUNITS",
u"CF recommends latitude variable 'lat_rho' to use units degrees_north",
u"CF recommends latitude variable 'lat_u' to use units degrees_north",
u"CF recommends latitude variable 'lat_v' to use units degrees_north",
u"CF recommends latitude variable 'lat_psi' to use units degrees_north",
u"CF recommends longitude variable 'lon_rho' to use units degrees_east",
u"CF recommends longitude variable 'lon_u' to use units degrees_east",
u"CF recommends longitude variable 'lon_v' to use units degrees_east",
u"CF recommends longitude variable 'lon_psi' to use units degrees_east",
u'Unidentifiable feature for variable dt',
u'Unidentifiable feature for variable dtfast',
u'Unidentifiable feature for variable dstart',
u'Unidentifiable feature for variable nl_tnu2',
u'Unidentifiable feature for variable nl_visc2',
u'Unidentifiable feature for variable Akt_bak',
u'Unidentifiable feature for variable Akv_bak',
u'Unidentifiable feature for variable Akk_bak',
u'Unidentifiable feature for variable Akp_bak',
u'Unidentifiable feature for variable rdrg',
u'Unidentifiable feature for variable Zob',
u'Unidentifiable feature for variable Zos',
u'Unidentifiable feature for variable Znudg',
u'Unidentifiable feature for variable M2nudg',
u'Unidentifiable feature for variable M3nudg',
u'Unidentifiable feature for variable Tnudg',
u'Unidentifiable feature for variable FSobc_in',
u'Unidentifiable feature for variable FSobc_out',
u'Unidentifiable feature for variable M2obc_in',
u'Unidentifiable feature for variable M2obc_out',
u'Unidentifiable feature for variable Tobc_in',
u'Unidentifiable feature for variable Tobc_out',
u'Unidentifiable feature for variable M3obc_in',
u'Unidentifiable feature for variable M3obc_out',
u'Unidentifiable feature for variable rho0',
u'Unidentifiable feature for variable xl',
u'Unidentifiable feature for variable el',
u'Unidentifiable feature for variable Tcline',
u'Unidentifiable feature for variable hc',
u'Unidentifiable feature for variable Cs_r',
u'Unidentifiable feature for variable Cs_w',
u'Unidentifiable feature for variable user'
]
assert all([m in messages for m in msgs])
def test_l01_met(self):
dataset = self.load_dataset(STATIC_FILES['l01-met'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 16
# The variable is supposed to be a status flag but it's mislabled
msgs = [
u'units for variable air_temperature_qc must be convertible to K currently they are 1',
u'units for variable wind_speed_qc must be convertible to m s-1 currently they are 1',
u'standard_name visibility is not defined in Standard Name Table v49',
u'standard_name modifier data_quality for variable visibility_qc is not a valid modifier according to appendix C',
u'standard_name wind_direction is not defined in Standard Name Table v49',
u'standard_name modifier data_quality for variable wind_direction_qc is not a valid modifier according to appendix C',
u'standard_name wind_gust is not defined in Standard Name Table v49',
u'standard_name modifier data_quality for variable wind_gust_qc is not a valid modifier according to appendix C',
u'standard_name modifier data_quality for variable air_temperature_qc is not a valid modifier according to appendix C',
u'standard_name use_wind is not defined in Standard Name Table v49',
u'standard_name barometric_pressure is not defined in Standard Name Table v49',
u'standard_name modifier data_quality for variable barometric_pressure_qc is not a valid modifier according to appendix C',
u'standard_name modifier data_quality for variable wind_speed_qc is not a valid modifier according to appendix C',
u'standard_name barometric_pressure is not defined in Standard Name Table v49',
u"CF recommends latitude variable 'lat' to use units degrees_north",
u"CF recommends longitude variable 'lon' to use units degrees_east"
]
assert all(m in messages for m in msgs)
def test_usgs_dem_saipan(self):
dataset = self.load_dataset(STATIC_FILES['usgs_dem_saipan'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 1
msgs = [
u'§2.6.1 Conventions global attribute does not contain "CF-1.6". The CF Checker only supports CF-1.6 at this time.'
]
assert all(m in messages for m in msgs)
def test_sp041(self):
dataset = self.load_dataset(STATIC_FILES['sp041'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 3
assert (u"lat_qc is not a variable in this dataset") in messages
for i, msg in enumerate(messages):
if msg.startswith("Different feature types"):
break
else:
assert False, "'Different feature types discovered' was not found in the checker messages"
def test_3mf07(self):
"""Load the 3mf07.nc file and run the CF check suite on it. There should be
several variable/attribute combos which fail:
- latitude:valid min
- latitude:valid_max
- longitude:valid_min
- longitude:valid_max
- references is an empty string
- comment (global attr) is an empty string
- z:dimensions are not a proper subset of dims for variable flag, haul
- variable flag/haul has an unidentifiable feature"""
dataset = self.load_dataset(STATIC_FILES['3mf07'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
msgs = [
u'latitude:valid_min must be a numeric type not a string',
u'latitude:valid_max must be a numeric type not a string',
u'longitude:valid_min must be a numeric type not a string',
u'longitude:valid_max must be a numeric type not a string',
u'§2.6.2 references global attribute should be a non-empty string',
u'§2.6.2 comment global attribute should be a non-empty string',
u'dimensions for auxiliary coordinate variable z (z) are not a subset of dimensions for variable flag (profile)',
u'dimensions for auxiliary coordinate variable z (z) are not a subset of dimensions for variable haul (profile)',
u'Unidentifiable feature for variable flag',
u'Unidentifiable feature for variable haul'
]
assert scored < out_of
assert all(m in messages for m in msgs)
def test_ooi_glider(self):
dataset = self.load_dataset(STATIC_FILES['ooi_glider'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 5
msgs = [
u'§2.6.2 comment global attribute should be a non-empty string',
u"units (None) attribute of 'deployment' must be a string compatible with UDUNITS",
u'Attribute long_name or/and standard_name is highly recommended for variable deployment',
u"latitude variable 'latitude' should define standard_name='latitude' or axis='Y'",
u"longitude variable 'longitude' should define standard_name='longitude' or axis='X'"
]
assert all(m in messages for m in msgs)
def test_swan(self):
dataset = self.load_dataset(STATIC_FILES['swan'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 10
msgs = [
u'global attribute _CoordSysBuilder should begin with a letter and be composed of letters, digits, and underscores',
u'§2.6.1 Conventions global attribute does not contain "CF-1.6". The CF Checker only supports CF-1.6 at this time.',
u'units for variable time_offset must be convertible to s currently they are hours since 2013-02-18T00:00:00Z',
u'units for variable time_run must be convertible to s currently they are hours since 2013-02-18 00:00:00.000 UTC',
u"lon's axis attribute must be T, X, Y, or Z, currently x", "lat's axis attribute must be T, X, Y, or Z, currently y",
u"z's axis attribute must be T, X, Y, or Z, currently z",
u"z: vertical coordinates not defining pressure must include a positive attribute that is either 'up' or 'down'",
u'GRID is not a valid CF featureType. It must be one of point, timeseries, trajectory, profile, timeseriesprofile, trajectoryprofile',
u'Unidentifiable feature for variable time_offset'
]
assert all(m in messages for m in msgs)
def test_kibesillah(self):
dataset = self.load_dataset(STATIC_FILES['kibesillah'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 1
# test for global attributes (CF 2.6.2)
assert (u"§2.6.2 global attribute title should exist and be a non-empty string") in messages
def test_pr_inundation(self):
dataset = self.load_dataset(STATIC_FILES['pr_inundation'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 21
msgs = [
u"waterlevel's dimensions are not in the recommended order T, Z, Y, X. They are time, m, n",
u"velocity_x's dimensions are not in the recommended order T, Z, Y, X. They are time, Layer, m, n",
u"velocity_y's dimensions are not in the recommended order T, Z, Y, X. They are time, Layer, m, n",
u"tau_x's dimensions are not in the recommended order T, Z, Y, X. They are time, m, n",
u"tau_y's dimensions are not in the recommended order T, Z, Y, X. They are time, m, n",
u'§2.6.2 grid_depth:comment should be a non-empty string',
u'§2.6.2 depth:comment should be a non-empty string',
u'§2.6.2 institution global attribute should be a non-empty string',
u'§2.6.2 comment global attribute should be a non-empty string',
u"units (None) attribute of 'LayerInterf' must be a string compatible with UDUNITS",
u"units (None) attribute of 'time_bounds' must be a string compatible with UDUNITS",
u"units (None) attribute of 'Layer' must be a string compatible with UDUNITS",
u'units for variable area must be convertible to m2 currently they are degrees2',
u"k: vertical coordinates not defining pressure must include a positive attribute that is either 'up' or 'down'",
u'grid_longitude is not associated with a coordinate defining true latitude and sharing a subset of dimensions',
u'grid_longitude is not associated with a coordinate defining true longitude and sharing a subset of dimensions',
u'grid_latitude is not associated with a coordinate defining true latitude and sharing a subset of dimensions',
u'grid_latitude is not associated with a coordinate defining true longitude and sharing a subset of dimensions',
u'time_bounds might be a cell boundary variable but there are no variables that define it as a boundary using the `bounds` attribute.',
u'Unidentifiable feature for variable time_bounds',
u'Unidentifiable feature for variable grid_depth'
]
assert all(m in messages for m in msgs)
def test_fvcom(self):
dataset = self.load_dataset(STATIC_FILES['fvcom'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 40
for msg in messages:
if msg.startswith("dimensions for auxiliary coordinate variable siglay"):
break
# it's not clear to me what this is supposed to be doing -- this else clause is outside of the if
else:
raise AssertionError(u"\"dimensions for auxiliary coordinate variable siglay (node, siglay) "
"are not a subset of dimensions for variable u (siglay, nele, time)\""
" not in messages")
assert (u"Unidentifiable feature for variable x") in messages
assert (u'§2.6.1 Conventions global attribute does not contain '
'"CF-1.6". The CF Checker only supports CF-1.6 '
'at this time.') in messages
def test_ww3(self):
dataset = self.load_dataset(STATIC_FILES['ww3'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 8
msgs = [
u'§2.6.2 global attribute title should exist and be a non-empty string',
u'§2.6.2 global attribute history should exist and be a non-empty string',
u'§2.6.1 Conventions field is not present',
u'Attribute long_name or/and standard_name is highly recommended for variable time',
u'Attribute long_name or/and standard_name is highly recommended for variable lon',
u'Attribute long_name or/and standard_name is highly recommended for variable lat',
u"latitude variable 'lat' should define standard_name='latitude' or axis='Y'",
u"longitude variable 'lon' should define standard_name='longitude' or axis='X'"
]
assert all(m in messages for m in msgs)
def test_glcfs(self):
dataset = self.load_dataset(STATIC_FILES['glcfs'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
assert len(messages) == 14
assert (u"units for variable time_offset must be convertible to s currently "
"they are hours since 2016-01-01T12:00:00Z") in messages
assert (u"standard_name cloud_cover is not defined in Standard Name Table v{}".format(self._std_names._version)) in messages
assert (u"standard_name dew_point is not defined in Standard Name Table v{}".format(self._std_names._version)) in messages
assert (u"GRID is not a valid CF featureType. It must be one of point, timeseries, "
"trajectory, profile, timeseriesprofile, trajectoryprofile") in messages
assert (u"global attribute _CoordSysBuilder should begin with a letter and "
"be composed of letters, digits, and underscores") in messages
assert (u"source should be defined")
assert (u'units for cl, "fraction" are not recognized by UDUNITS') in messages
def test_ncei_templates(self):
"""
Tests some of the NCEI NetCDF templates, which usually should get a
perfect score.
"""
dataset = self.load_dataset(STATIC_FILES['NCEI_profile_template_v2_0'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
assert scored < out_of
def test_bad_cf_roles(self):
'''
Tests the CF checker detects datasets with more than 2 defined cf_role
variables.
'''
dataset = self.load_dataset(STATIC_FILES['bad_cf_role'])
check_results = self.cs.run(dataset, [], 'cf')
scored, out_of, messages = self.get_results(check_results)
msgs = [
u'§2.6.2 global attribute title should exist and be a non-empty string',
u'§2.6.2 global attribute history should exist and be a non-empty string',
u'§2.6.1 Conventions field is not present',
u'Unidentifiable feature for variable T',
u'§9.5 The only acceptable values of cf_role for Discrete Geometry CF data sets are timeseries_id, profile_id, and trajectory_id'
]
assert scored < out_of
assert all(m in messages for m in msgs)
| lukecampbell/compliance-checker | compliance_checker/tests/test_cf_integration.py | Python | apache-2.0 | 23,588 | [
"NetCDF"
] | 17695162b03165817430e2858d07ce3813797b97aeb31ea6f66ed9133c584dd7 |
#! /usr/bin/env python
# Copyright (c) 2005 Robert L. Campbell
from pymol import cmd
def fitting(obj1,select1,obj2,select2):
"""
DESCRIPTION
"fitting" allows the superpositioning of object1 onto object2 using
the atoms in selection1 and selection2 (side chains are ignored). The
residue names, residue numbers chain identifiers, segment identifiers,
and alt ids of selection1 are changed to match those in selection2,
temporarily. This allows the normal "fit" command to work. They are
reset after "fit" is run and two new selections are created showing
the selected atoms.
Be careful when creating your selection strings. Within the
selections, do not include the object name because the chain, residue
name, residue number etc. of selection1 of object1 are converted to
match those in selection2. If the object names are included in the
selections, no atoms will be selected since an atom cannot exist in
both object1 and object2 at the same time.
It is important that the beginning residue numbers specify the
aligned residues, but the ending numbers are not critical. The
shorter of the two selections is used in the fit calculation.
USAGE
fitting object1, selection1, object2, selection2
DO NOT include object names in selections!
EXAMPLES
fitting 1xuu, c. a & (i. 296-309 or i. 335-340), 1ame, i. 8-21 or i. 47-52
"""
list_m = []
list_n = []
backbone = 'n. n+ca+c+o &! r. hoh+wat'
select1 = '(%s) & %s' % (select1,backbone)
select2 = '(%s) & %s' % (select2,backbone)
m=cmd.get_model("%s & %s" % (obj1,select1))
n=cmd.get_model("%s & %s" % (obj2,select2))
# for the atoms to be used in fit:
# store id, chain, resn, resi, name, segi, alt
for at in m.atom:
list_m.append((at.id,at.chain,at.resn,at.resi,at.name,at.segi, at.alt))
for at in n.atom:
list_n.append((at.id,at.chain,at.resn,at.resi,at.name,at.segi, at.alt))
if len(m.atom) <= len(n.atom):
total = len(m.atom)
else:
total = len(n.atom)
# set a new segi for the atoms to be used in fit command and to allow resetting later
seg_fit="1fit"
# change the chain,resn,resi,segi and alt of select1 to match select2
for i in range(total):
cmd.do("alter %s & id %s, chain='%s'" % (obj1,list_m[i][0],list_n[i][1]))
cmd.do("alter %s & id %s, resn='%s'" % (obj1,list_m[i][0],list_n[i][2]))
cmd.do("alter %s & id %s, resi=%s" % (obj1,list_m[i][0],list_n[i][3]))
cmd.do("alter %s & id %s, segi='%s'" % (obj1,list_m[i][0],seg_fit))
cmd.do("alter %s & id %s, alt='%s'" % (obj1,list_m[i][0],list_n[i][6]))
# change the segid for obj2 and select2
cmd.do("alter %s & id %s, segi='%s'" % (obj2,list_n[i][0],seg_fit))
print "Fitting %s and %s\n to %s and %s" % (obj1,select1,obj2,select2)
print "Altered to:"
print "%s & %s & segi %s\n" % (obj1,select2,seg_fit),
print "%s & %s & segi %s\n" % (obj2,select2,seg_fit),
print "--------------------------------------------\n"
rms = cmd.fit("%s & %s & segi %s" % (obj1,select2,seg_fit),"%s & %s & segi %s" % (obj2,select2,seg_fit) ,quiet=0)
cmd.delete("%s_fitting" % obj1)
cmd.delete("%s_fitting" % obj2)
# create new objects to show the fit atoms
cmd.create("%s_fitting" % obj1, "%s & %s & segi %s" % (obj1,select2,seg_fit))
cmd.create("%s_fitting" % obj2, "%s & %s & segi %s" % (obj2,select2,seg_fit))
# reset chain,resn,resi,segi & alt of obj1 & select1 from stored list
for atoms_m in list_m:
cmd.do("alter %s & id %s, chain='%s'" % (obj1,atoms_m[0],atoms_m[1]))
cmd.do("alter %s & id %s, resn='%s'" % (obj1,atoms_m[0],atoms_m[2]))
cmd.do("alter %s & id %s, resi=%s" % (obj1,atoms_m[0],atoms_m[3]))
cmd.do("alter %s & id %s, segi='%s'" % (obj1,atoms_m[0],atoms_m[5]))
cmd.do("alter %s & id %s, alt='%s'" % (obj1,atoms_m[0],atoms_m[6]))
# reset segi of obj2 & select2 from stored list
for atoms_n in list_n:
cmd.do("alter %s & id %s, segi='%s'" % (obj2,atoms_n[0],atoms_n[5]))
print "RMSD for fitting selection %s of %s onto \n selection %s of %s = %6.3f" % (select1, obj1, select2, obj2, rms)
cmd.extend("fitting",fitting)
| wjurkowski/ppibench | vegas/scripts/fitting.py | Python | apache-2.0 | 4,121 | [
"PyMOL"
] | 24c68b8c72b342c1b56fd49f8dbfce7d85835a9bcb0cf70bfda66593ef963e72 |
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
import chigger
#from mooseutils import message
#message.MOOSE_DEBUG_MODE = True
data = vtk.vtkFloatArray()
n = 100
m = 100
data.SetNumberOfTuples(n*m)
idx = 0
for i in range(n):
for j in range(m):
data.SetValue(idx, i+j)
idx += 1
plane0 = chigger.geometric.PlaneSource2D(origin=[100,100,0], point1=[100,200,0], point2=[200,100,0], resolution=[n,m], cmap='viridis', data=data)
result = chigger.base.ChiggerResult(plane0)
window = chigger.RenderWindow(result, size=[300,300], test=True)
window.write('plane_source.png')
window.start()
| Chuban/moose | python/chigger/tests/geometric/plane_source/plane_source.py | Python | lgpl-2.1 | 1,481 | [
"MOOSE",
"VTK"
] | 35fd02eb4c9bf9d089eebeb94db81d0101da6410f8f680e982328efc9b928109 |
#!/usr/bin/env python
""" This script retrieves the output of all the jobs of a given
test. <jobName> is the path of the directory created by submitMyJob
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import sys
from DIRAC.Interfaces.API.Dirac import Dirac
import os
if len(sys.argv)< 2 :
print "Usage %s <jobName>"%sys.argv[0]
sys.exit(1)
jobName = sys.argv[1]
finalStatus = ['Done', 'Failed']
dirac = Dirac()
idstr = open("%s/jobIdList.txt"%jobName, 'r').readlines()
ids = map(int, idstr)
print "found %s jobs"%(len(ids))
res = dirac.getJobSummary(ids)
if not res['OK']:
print res['Message']
sys.exit(1)
metadata = res['Value']
for jid in ids:
jobMeta = metadata.get( jid, None )
if not jobMeta :
print "No metadata for job ", jid
continue
status = jobMeta['Status']
print "%s %s" % ( jid, status )
if status in finalStatus:
outputDir = '%s/%s'%(jobName,status)
if not os.path.exists( "%s/%s" % ( outputDir, jid ) ):
print "Retrieving sandbox"
res = dirac.getOutputSandbox( jid, outputDir = outputDir )
| andresailer/DIRAC | tests/Performance/DFCPerformance/retrieveResult.py | Python | gpl-3.0 | 1,100 | [
"DIRAC"
] | fc76b0488ec6eb12c49ea6b49e055a3ab38710474e93f9bcdde8fc4aed9985e5 |
# Orca
# Copyright (C) 2014-2015 Synthicity, LLC
# Copyright (C) 2015 Autodesk
# See full license in LICENSE.
| SANDAG/orca | orca/utils/tests/__init__.py | Python | bsd-3-clause | 110 | [
"ORCA"
] | 33111c9f9a6af655b6600244c5bc5c766815424fb98d7aae3de1aa48fa423dd8 |
#
# Copyright (C) 2009, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 999 $
# $Date: 2009-02-09 11:39:12 -0500 (Mon, 09 Feb 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-library.googlecode.com/svn/trunk/projects/packages/examples/mines-sarsa-python/sample_mines_environment.py $
import random
import sys
from rlglue.environment.Environment import Environment
from rlglue.environment import EnvironmentLoader as EnvironmentLoader
from rlglue.types import Observation
from rlglue.types import Action
from rlglue.types import Reward_observation_terminal
# This is a very simple discrete-state, episodic grid world that has
# exploding mines in it. If the agent steps on a mine, the episode
# ends with a large negative reward.
#
# The reward per step is -1, with +10 for exiting the game successfully
# and -100 for stepping on a mine.
# TO USE THIS Environment [order doesn't matter]
# NOTE: I'm assuming the Python codec is installed an is in your Python path
# - Start the rl_glue executable socket server on your computer
# - Run the SampleSarsaAgent and SampleExperiment from this or a
# different codec (Matlab, Python, Java, C, Lisp should all be fine)
# - Start this environment like:
# $> python sample_mines_environment.py
class mines_environment(Environment):
WORLD_FREE = 0
WORLD_OBSTACLE = 1
WORLD_MINE = 2
WORLD_GOAL = 3
randGenerator=random.Random()
fixedStartState=False
startRow=1
startCol=1
currentState=10
def env_init(self):
self.map=[ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
#The Python task spec parser is not yet able to build task specs programmatically
return "VERSION RL-Glue-3.0 PROBLEMTYPE episodic DISCOUNTFACTOR 1 OBSERVATIONS INTS (0 107) ACTIONS INTS (0 3) REWARDS (-100.0 10.0) EXTRA SampleMinesEnvironment(C/C++) by Brian Tanner."
def env_start(self):
if self.fixedStartState:
stateValid=self.setAgentState(self.startRow,self.startCol)
if not stateValid:
print "The fixed start state was NOT valid: "+str(int(self.startRow))+","+str(int(self.startRow))
self.setRandomState()
else:
self.setRandomState()
returnObs=Observation()
returnObs.intArray=[self.calculateFlatState()]
return returnObs
def env_step(self,thisAction):
# Make sure the action is valid
assert len(thisAction.intArray)==1,"Expected 1 integer action."
assert thisAction.intArray[0]>=0, "Expected action to be in [0,3]"
assert thisAction.intArray[0]<4, "Expected action to be in [0,3]"
self.updatePosition(thisAction.intArray[0])
theObs=Observation()
theObs.intArray=[self.calculateFlatState()]
returnRO=Reward_observation_terminal()
returnRO.r=self.calculateReward()
returnRO.o=theObs
returnRO.terminal=self.checkCurrentTerminal()
return returnRO
def env_cleanup(self):
pass
def env_message(self,inMessage):
# Message Description
# 'set-random-start-state'
#Action: Set flag to do random starting states (the default)
if inMessage.startswith("set-random-start-state"):
self.fixedStartState=False;
return "Message understood. Using random start state.";
# Message Description
# 'set-start-state X Y'
# Action: Set flag to do fixed starting states (row=X, col=Y)
if inMessage.startswith("set-start-state"):
splitString=inMessage.split(" ");
self.startRow=int(splitString[1]);
self.startCol=int(splitString[2]);
self.fixedStartState=True;
return "Message understood. Using fixed start state.";
# Message Description
# 'print-state'
# Action: Print the map and the current agent location
if inMessage.startswith("print-state"):
self.printState();
return "Message understood. Printed the state.";
return "SamplesMinesEnvironment(Python) does not respond to that message.";
def setAgentState(self,row, col):
self.agentRow=row
self.agentCol=col
return self.checkValid(row,col) and not self.checkTerminal(row,col)
def setRandomState(self):
numRows=len(self.map)
numCols=len(self.map[0])
startRow=self.randGenerator.randint(0,numRows-1)
startCol=self.randGenerator.randint(0,numCols-1)
while not self.setAgentState(startRow,startCol):
startRow=self.randGenerator.randint(0,numRows-1)
startCol=self.randGenerator.randint(0,numCols-1)
def checkValid(self,row, col):
valid=False
numRows=len(self.map)
numCols=len(self.map[0])
if(row < numRows and row >= 0 and col < numCols and col >= 0):
if self.map[row][col] != self.WORLD_OBSTACLE:
valid=True
return valid
def checkTerminal(self,row,col):
if (self.map[row][col] == self.WORLD_GOAL or self.map[row][col] == self.WORLD_MINE):
return True
return False
def checkCurrentTerminal(self):
return self.checkTerminal(self.agentRow,self.agentCol)
def calculateFlatState(self):
numRows=len(self.map)
return self.agentCol * numRows + self.agentRow
def updatePosition(self, theAction):
# When the move would result in hitting an obstacles, the agent simply doesn't move
newRow = self.agentRow;
newCol = self.agentCol;
if (theAction == 0):#move down
newCol = self.agentCol - 1;
if (theAction == 1): #move up
newCol = self.agentCol + 1;
if (theAction == 2):#move left
newRow = self.agentRow - 1;
if (theAction == 3):#move right
newRow = self.agentRow + 1;
#Check if new position is out of bounds or inside an obstacle
if(self.checkValid(newRow,newCol)):
self.agentRow = newRow;
self.agentCol = newCol;
def calculateReward(self):
if(self.map[self.agentRow][self.agentCol] == self.WORLD_GOAL):
return 10.0;
if(self.map[self.agentRow][self.agentCol] == self.WORLD_MINE):
return -100.0;
return -1.0;
def printState(self):
numRows=len(self.map)
numCols=len(self.map[0])
print "Agent is at: "+str(self.agentRow)+","+str(self.agentCol)
print "Columns:0-10 10-17"
print "Col ",
for col in range(0,numCols):
print col%10,
for row in range(0,numRows):
print
print "Row: "+str(row)+" ",
for col in range(0,numCols):
if self.agentRow==row and self.agentCol==col:
print "A",
else:
if self.map[row][col] == self.WORLD_GOAL:
print "G",
if self.map[row][col] == self.WORLD_MINE:
print "M",
if self.map[row][col] == self.WORLD_OBSTACLE:
print "*",
if self.map[row][col] == self.WORLD_FREE:
print " ",
print
if __name__=="__main__":
EnvironmentLoader.loadEnvironment(mines_environment()) | shiwalimohan/RLInfiniteMario | system/codecs/Python/examples/mines-sarsa-example/sample_mines_environment.py | Python | gpl-2.0 | 7,414 | [
"Brian"
] | 0e74fa295ed5ea176c0675a6869573d2e9e573cd59382e52936d9433a8f4cccf |
# Copyright (c) 2014, Satoshi Nakamoto Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import fnmatch
import io
from datetime import datetime
import logging
import sys
import csv
import json
import uuid
from pacioli import app, db, models
import pacioli.auditing.blockchain as blockchain
import pacioli.accounting.rates as rates
from werkzeug import secure_filename
from dateutil import parser
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
# Assumes a Werkzeug File Storage object: http://werkzeug.pocoo.org/docs/0.9/datastructures/#werkzeug.datastructures.FileStorage
def process_filestorage(file):
if allowed_file(file.filename):
fileName = secure_filename(file.filename)
fileType = fileName.rsplit('.', 1)[1]
file.seek(0, os.SEEK_END)
fileSize = file.tell()
fileText = file.stream.getvalue().decode('UTF-8')
process_memoranda(fileName, fileType, fileSize, fileText)
def process_memoranda(fileName, fileType, fileSize, fileText):
uploadDate = datetime.now()
memoranda_id = str(uuid.uuid4())
memo = models.Memoranda(
id=memoranda_id,
date=uploadDate,
fileName=fileName,
fileType=fileType,
fileText=fileText,
fileSize=fileSize)
db.session.add(memo)
db.session.commit()
document = io.StringIO(fileText)
if fileType == 'csv':
process_csv(document, memoranda_id)
def process_csv(document, memoranda_id):
reader = csv.reader(document)
reader = enumerate(reader)
rows = [pair for pair in reader]
header = max(rows, key=lambda tup:len(tup[1]))
for row in rows:
if row[0] > header[0] and len(row[1]) == len(header[1]):
memoranda = zip(header[1], row[1])
memoranda = dict(memoranda)
# Bitcoin Core
if header[1] == ['Confirmed', 'Date', 'Type', 'Label', 'Address', 'Amount', 'ID']:
address = memoranda['Address']
txid = memoranda['ID'][:64]
date = parser.parse(memoranda['Date'])
amount = int(float(memoranda['Amount'])*100000000)
amount = abs(amount)
if amount > 0:
debit_ledger_account = "Bitcoin Core"
credit_ledger_account = "Revenues"
elif amount < 0:
debit_ledger_account = "Expenses"
credit_ledger_account = "Bitcoin Core"
# MultiBit
elif header[1] == ['Date', 'Description', 'Amount (BTC)', 'Amount ($)', 'Transaction Id']:
address = memoranda['Description']
address = address.split('(')[-1]
address = address.replace(r"\(.*\)","")
txid = memoranda['Transaction Id']
date = parser.parse(memoranda['Date'])
amount = int(float(memoranda['Amount (BTC)'])*100000000)
if amount > 0:
debit_ledger_account = "MultiBit"
credit_ledger_account = "Revenues"
elif amount < 0:
debit_ledger_account = "Expenses"
credit_ledger_account = "MultiBit"
amount = abs(amount)
# Armory
elif header[1] == ['Date', 'Transaction ID', '#Conf', 'Wallet ID', 'Wallet Name', 'Credit', 'Debit', 'Fee (paid by this wallet)', 'Wallet Balance', 'Total Balance', 'Label']:
# Armory does not export the address you're receiving with / sending to .... complain here: https://github.com/etotheipi/BitcoinArmory/issues/247
address = ''
txid = memoranda['Transaction ID']
date = parser.parse(memoranda['Date'])
fee = memoranda['Fee (paid by this wallet)']
if fee == '':
fee = 0
else:
fee = int(float(fee)*100000000)
if memoranda['Credit'] == '':
memoranda['Credit'] = 0
if memoranda['Debit'] == '':
memoranda['Debit'] = 0
credit = int(float(memoranda['Credit'])*100000000)
debit = int(float(memoranda['Debit'])*100000000)
if credit > 0:
amount = abs(credit)
debit_ledger_account = "Armory"
credit_ledger_account = "Revenues"
elif debit > 0:
amount = abs(debit) - abs(fee)
debit_ledger_account = "Expenses"
credit_ledger_account = "Armory"
# Electrum
elif header[1] == ["transaction_hash","label", "confirmations", "value", "fee", "balance", "timestamp"]:
address = ''
# Electrum does not export the address you're receiving with / sending to .... complain here: https://github.com/spesmilo/electrum/issues/911
txid = memoranda['transaction_hash']
date = parser.parse(memoranda['timestamp'])
value = int(float(memoranda['value'])*100000000)
fee = int(float(memoranda['fee'])*100000000)
if value > 0:
amount = abs(value)
debit_ledger_account = "Electrum"
credit_ledger_account = "Revenues"
elif value < 0:
amount = abs(value) - abs(fee)
debit_ledger_account = "Expenses"
credit_ledger_account = "Electrum"
#Coinbase
elif header[1] == ["Timestamp","Balance","BTC Amount","To","Notes","Instantly Exchanged","Transfer Total","Transfer Total Currency","Transfer Fee","Transfer Fee Currency","Transfer Payment Method","Transfer ID","Order Price","Order Currency","Order BTC","Order Tracking Code","Order Custom Parameter","Order Paid Out","Recurring Payment ID","Coinbase ID (visit https://www.coinbase.com/transactions/[ID] in your browser)","Bitcoin Hash (visit https://www.coinbase.com/tx/[HASH] in your browser for more info)"]:
address = memoranda['To']
txid = memoranda['Bitcoin Hash (visit https://www.coinbase.com/tx/[HASH] in your browser for more info)'][:64]
date = parser.parse(memoranda['Timestamp'])
amount = int(float(memoranda['BTC Amount'])*100000000)
if amount > 0:
debit_ledger_account = "Coinbase"
credit_ledger_account = "Revenues"
elif amount < 0:
debit_ledger_account = "Expenses"
credit_ledger_account = "Coinbase"
amount = abs(amount)
else:
return False
memoranda_transactions_id = str(uuid.uuid4())
journal_entry_id = str(uuid.uuid4())
debit_ledger_entry_id = str(uuid.uuid4())
credit_ledger_entry_id = str(uuid.uuid4())
# blockchain.get_transaction(txid)
tx_details = str(memoranda)
memoranda_transaction = models.MemorandaTransactions(
id=memoranda_transactions_id,
memoranda_id=memoranda_id,
txid=txid,
details=tx_details)
db.session.add(memoranda_transaction)
db.session.commit()
journal_entry = models.JournalEntries(
id=journal_entry_id,
memoranda_transactions_id=memoranda_transactions_id)
db.session.add(journal_entry)
db.session.commit()
debit_ledger_entry = models.LedgerEntries(
id=debit_ledger_entry_id,
date=date,
tside="debit",
ledger=debit_ledger_account,
amount=amount,currency="satoshis",
journal_entry_id=journal_entry_id)
db.session.add(debit_ledger_entry)
credit_ledger_entry = models.LedgerEntries(
id=credit_ledger_entry_id,date=date,
tside="credit",
ledger=credit_ledger_account,
amount=amount,
currency="satoshis",
journal_entry_id=journal_entry_id)
db.session.add(credit_ledger_entry)
db.session.commit()
| enikesha/pacioli | pacioli/accounting/memoranda.py | Python | bsd-3-clause | 9,435 | [
"VisIt"
] | 11a28282a401591e84abc148c4088d121e5e42219f534e65ecca0fd04298f630 |
import os
QEPYRC = {'pw.x':'/opt/espresso-5.1/bin/pw.x',
'ph.x':'/opt/espresso-5.1/bin/ph.x',
'q2r.x':'/opt/espresso-5.1/bin/q2r.x',
'mode':'queue',
'command':'qsub',
'options':'-joe',
'walltime':'24',
'nodes':1,
'ppn':1,
'mem':'2GB',
'vmem':'2GB',
'jobname:':'None'}
def read_configuration(fname):
f = open(fname)
for line in f:
line = line.strip()
if line.startswith('#'):
pass
elif line == '':
pass
else:
if '#' in line:
# take the part before the first #
line = line.split('#')[0]
key, value = line.split('=')
QEPYRC[key.strip()] = value.strip()
config_files = [os.path.join(os.environ['HOME'], '.qepyrc'),
'.qepyrc']
for cf in config_files:
if os.path.exists(cf):
read_configuration(cf)
| kparrish/qepy | qepy/qepyrc.py | Python | gpl-2.0 | 759 | [
"ESPResSo"
] | 2cd6416c2e9025dd514a7ccabf000dea930c3a076525362e9edc71ff44f4111a |
# -*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""
The :mod:`elm` module implements the
Extreme Learning Machine Classifiers and Regressors (ELMClassifier,
ELMRegressor, SimpleELMRegressor, SimpleELMClassifier).
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import pinv2
from sklearn.utils import as_float_array
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.preprocessing import LabelBinarizer
from random_layer import RandomLayer, MLPRandomLayer
__all__ = ["ELMRegressor",
"ELMClassifier",
"GenELMRegressor",
"GenELMClassifier"]
# BaseELM class, regressor and hidden_layer attributes
# and provides defaults for docstrings
class BaseELM(BaseEstimator):
"""
Base class for ELMs.
Warning: This class should not be used directly.
Use derived classes instead.
"""
__metaclass__ = ABCMeta
def __init__(self, hidden_layer, regressor):
self.regressor = regressor
self.hidden_layer = hidden_layer
@abstractmethod
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
@abstractmethod
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
class GenELMRegressor(BaseELM, RegressorMixin):
"""
ELMRegressor is a regressor based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
Parameters
----------
`hidden_layer` : random_layer instance, optional
(default=MLPRandomLayer(random_state=0))
`regressor` : regressor instance, optional (default=None)
If provided, this object is used to perform the regression from hidden
unit activations to the outputs and subsequent predictions. If not
present, an ordinary linear least squares fit is performed
Attributes
----------
`coefs_` : numpy array
Fitted regression coefficients if no regressor supplied.
`fitted_` : bool
Flag set when fit has been called already.
`hidden_activations_` : numpy array of shape [n_samples, n_hidden]
Hidden layer activations for last input.
See Also
--------
RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self,
hidden_layer=MLPRandomLayer(random_state=0),
regressor=None):
super(GenELMRegressor, self).__init__(hidden_layer, regressor)
self.coefs_ = None
self.fitted_ = False
self.hidden_activations_ = None
def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
"""
if (self.regressor is None):
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
# fit random hidden layer and compute the hidden layer activations
self.hidden_activations_ = self.hidden_layer.fit_transform(X)
# solve the regression from hidden activations to outputs
self._fit_regression(as_float_array(y, copy=True))
return self
def _get_predictions(self):
"""get predictions using internal least squares/supplied regressor"""
if (self.regressor is None):
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
else:
preds = self.regressor.predict(self.hidden_activations_)
return preds
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if (not self.fitted_):
raise ValueError("ELMRegressor not fitted")
# compute hidden layer activations
self.hidden_activations_ = self.hidden_layer.transform(X)
# compute output predictions for new hidden activations
predictions = self._get_predictions()
return predictions
class GenELMClassifier(BaseELM, ClassifierMixin):
"""
GenELMClassifier is a classifier based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
Parameters
----------
`hidden_layer` : random_layer instance, optional
(default=MLPRandomLayer(random_state=0))
`binarizer` : LabelBinarizer, optional
(default=LabelBinarizer(-1, 1))
`regressor` : regressor instance, optional (default=None)
If provided, this object is used to perform the regression from hidden
unit activations to the outputs and subsequent predictions. If not
present, an ordinary linear least squares fit is performed
Attributes
----------
`classes_` : numpy array of shape [n_classes]
Array of class labels
`genelm_regressor_` : ELMRegressor instance
Performs actual fit of binarized values
See Also
--------
RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self,
hidden_layer=MLPRandomLayer(random_state=0),
binarizer=LabelBinarizer(-1, 1),
regressor=None):
super(GenELMClassifier, self).__init__(hidden_layer, regressor)
self.binarizer = binarizer
self.classes_ = None
self.genelm_regressor_ = GenELMRegressor(hidden_layer, regressor)
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
Returns
-------
C : array of shape [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,]
"""
return self.genelm_regressor_.predict(X)
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
self.genelm_regressor_.fit(X, y_bin)
return self
def predict(self, X):
"""Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
# ELMRegressor with default RandomLayer
class ELMRegressor(BaseEstimator, RegressorMixin):
"""
ELMRegressor is a regressor based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
ELMRegressor is a wrapper for an GenELMRegressor that uses a
RandomLayer and passes the __init__ parameters through
to the hidden layer generated by the fit() method.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate in the SimpleRandomLayer
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribase', 'sigmoid',
'hardlim', 'softlim', 'gaussian', 'multiquadric', 'inv_multiquadric',
'reclinear' or a callable. If none is given, 'tanh' will be used.
If a callable is given, it will be used to compute the hidden unit
activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_hidden, n_features]
`regressor` : regressor instance, optional (default=None)
If provided, this object is used to perform the regression from hidden
unit activations to the outputs and subsequent predictions. If not
present, an ordinary linear least squares fit is performed
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`genelm_regressor_` : GenELMRegressor object
Wrapped object that actually performs the fit.
See Also
--------
RandomLayer, RBFRandomLayer, MLPRandomLayer,
GenELMRegressor, GenELMClassifier, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0,
activation_func='tanh', activation_args=None,
user_components=None, regressor=None, random_state=None):
self.n_hidden = n_hidden
self.alpha = alpha
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.user_components = user_components
self.rbf_width = rbf_width
self.regressor = regressor
self._genelm_regressor = None
def _create_random_layer(self):
"""Pass init params to RandomLayer"""
return RandomLayer(n_hidden=self.n_hidden,
alpha=self.alpha, random_state=self.random_state,
activation_func=self.activation_func,
activation_args=self.activation_args,
user_components=self.user_components,
rbf_width=self.rbf_width)
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
rhl = self._create_random_layer()
self._genelm_regressor = GenELMRegressor(hidden_layer=rhl,
regressor=self.regressor)
self._genelm_regressor.fit(X, y)
return self
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if (self._genelm_regressor is None):
raise ValueError("SimpleELMRegressor not fitted")
return self._genelm_regressor.predict(X)
class ELMClassifier(ELMRegressor):
"""
ELMClassifier is a classifier based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
ELMClassifier is an ELMRegressor subclass that first binarizes the
data, then uses the superclass to compute the decision function that
is then unbinarized to yield the prediction.
The params for the RandomLayer used in the input transform are
exposed in the ELMClassifier constructor.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate in the SimpleRandomLayer
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribase', 'sigmoid',
'hardlim', 'softlim', 'gaussian', 'multiquadric', 'inv_multiquadric',
'reclinear' or a callable. If none is given, 'tanh' will be used.
If a callable is given, it will be used to compute the hidden unit
activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`classes_` : numpy array of shape [n_classes]
Array of class labels
See Also
--------
RandomLayer, RBFRandomLayer, MLPRandomLayer,
GenELMRegressor, GenELMClassifier, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0,
activation_func='tanh', activation_args=None,
user_components=None, regressor=None,
binarizer=LabelBinarizer(-1, 1),
random_state=None):
super(ELMClassifier, self).__init__(n_hidden=n_hidden,
alpha=alpha,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
rbf_width=rbf_width,
regressor=regressor)
self.classes_ = None
self.binarizer = binarizer
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
Returns
-------
C : array of shape [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,]
"""
return super(ELMClassifier, self).predict(X)
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
super(ELMClassifier, self).fit(X, y_bin)
return self
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
def score(self, X, y):
"""Force use of accuracy score since we don't inherit
from ClassifierMixin"""
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
| zygmuntz/Python-ELM | elm.py | Python | bsd-3-clause | 20,363 | [
"Gaussian"
] | e8cf1df0b8db9fa3a2dc6493d1f9756994258cce9e46873ecabbdd201f2f6e21 |
# -*- coding: utf-8 -*-
# Github issue #69, Also BhallaLab/moose-gui#3.
import moose
print( '[INFO] Importing moose from %s' % moose.__file__ )
print( '[INFO] Version : %s' % moose.__version__ )
moose.loadModel('../data/acc27.g','/acc27_1',"gsl")
compts = moose.wildcardFind('/acc27_1/##[ISA=ChemCompt]')
for compt in compts:
if moose.exists(compt.path+'/stoich'):
st = moose.element(compt.path+'/stoich')
#print " stoich ksolve ",st.ksolve.path
if moose.exists((st.ksolve).path):
moose.delete(st.ksolve)
moose.delete(st)
| dharmasam9/moose-core | tests/issues/issue_69.py | Python | gpl-3.0 | 575 | [
"MOOSE"
] | 1ab582425f782ccb30a921a522b73e075260cb574497c88fde323122807398fa |
# Copyright 2000-2002 Andrew Dalke.
# Copyright 2002-2004 Brad Chapman.
# Copyright 2006-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Represent a Sequence Record, a sequence with annotation."""
import sys
# Add path to Bio
sys.path.append('..')
from Bio._py3k import basestring
__docformat__ = "restructuredtext en" # Simple markup to show doctests nicely
# NEEDS TO BE SYNCH WITH THE REST OF BIOPYTHON AND BIOPERL
# In particular, the SeqRecord and BioSQL.BioSeq.DBSeqRecord classes
# need to be in sync (this is the BioSQL "Database SeqRecord", see
# also BioSQL.BioSeq.DBSeq which is the "Database Seq" class)
class _RestrictedDict(dict):
"""Dict which only allows sequences of given length as values (PRIVATE).
This simple subclass of the Python dictionary is used in the SeqRecord
object for holding per-letter-annotations. This class is intended to
prevent simple errors by only allowing python sequences (e.g. lists,
strings and tuples) to be stored, and only if their length matches that
expected (the length of the SeqRecord's seq object). It cannot however
prevent the entries being edited in situ (for example appending entries
to a list).
>>> x = _RestrictedDict(5)
>>> x["test"] = "hello"
>>> x
{'test': 'hello'}
Adding entries which don't have the expected length are blocked:
>>> x["test"] = "hello world"
Traceback (most recent call last):
...
TypeError: We only allow python sequences (lists, tuples or strings) of length 5.
The expected length is stored as a private attribute,
>>> x._length
5
In order that the SeqRecord (and other objects using this class) can be
pickled, for example for use in the multiprocessing library, we need to
be able to pickle the restricted dictionary objects.
Using the default protocol, which is 0 on Python 2.x,
>>> import pickle
>>> y = pickle.loads(pickle.dumps(x))
>>> y
{'test': 'hello'}
>>> y._length
5
Using the highest protocol, which is 2 on Python 2.x,
>>> import pickle
>>> z = pickle.loads(pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
>>> z
{'test': 'hello'}
>>> z._length
5
"""
def __init__(self, length):
"""Create an EMPTY restricted dictionary."""
dict.__init__(self)
self._length = int(length)
def __setitem__(self, key, value):
# The check hasattr(self, "_length") is to cope with pickle protocol 2
# I couldn't seem to avoid this with __getstate__ and __setstate__
if not hasattr(value, "__len__") or not hasattr(value, "__getitem__") \
or (hasattr(self, "_length") and len(value) != self._length):
raise TypeError("We only allow python sequences (lists, tuples or "
"strings) of length %i." % self._length)
dict.__setitem__(self, key, value)
def update(self, new_dict):
# Force this to go via our strict __setitem__ method
for (key, value) in new_dict.items():
self[key] = value
class SeqRecord(object):
"""A SeqRecord object holds a sequence and information about it.
Main attributes:
- id - Identifier such as a locus tag (string)
- seq - The sequence itself (Seq object or similar)
Additional attributes:
- name - Sequence name, e.g. gene name (string)
- description - Additional text (string)
- dbxrefs - List of database cross references (list of strings)
- features - Any (sub)features defined (list of SeqFeature objects)
- annotations - Further information about the whole sequence (dictionary).
Most entries are strings, or lists of strings.
- letter_annotations - Per letter/symbol annotation (restricted
dictionary). This holds Python sequences (lists, strings
or tuples) whose length matches that of the sequence.
A typical use would be to hold a list of integers
representing sequencing quality scores, or a string
representing the secondary structure.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly (see the __init__ method for further details):
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> print(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
If you want to save SeqRecord objects to a sequence file, use Bio.SeqIO
for this. For the special case where you want the SeqRecord turned into
a string in a particular file format there is a format method which uses
Bio.SeqIO internally:
>>> print(record.format("fasta"))
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
You can also do things like slicing a SeqRecord, checking its length, etc
>>> len(record)
44
>>> edited = record[:10] + record[11:]
>>> print(edited.seq)
MKQHKAMIVAIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
>>> print(record.seq)
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
"""
def __init__(self, seq, id="<unknown id>", name="<unknown name>",
description="<unknown description>", dbxrefs=None,
features=None, annotations=None,
letter_annotations=None):
"""Create a SeqRecord.
Arguments:
- seq - Sequence, required (Seq, MutableSeq or UnknownSeq)
- id - Sequence identifier, recommended (string)
- name - Sequence name, optional (string)
- description - Sequence description, optional (string)
- dbxrefs - Database cross references, optional (list of strings)
- features - Any (sub)features, optional (list of SeqFeature objects)
- annotations - Dictionary of annotations for the whole sequence
- letter_annotations - Dictionary of per-letter-annotations, values
should be strings, list or tuples of the same
length as the full sequence.
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects. However, you may want to create your own SeqRecord
objects directly.
Note that while an id is optional, we strongly recommend you supply a
unique id string for each record. This is especially important
if you wish to write your sequences to a file.
If you don't have the actual sequence, but you do know its length,
then using the UnknownSeq object from Bio.Seq is appropriate.
You can create a 'blank' SeqRecord object, and then populate the
attributes later.
"""
if id is not None and not isinstance(id, basestring):
# Lots of existing code uses id=None... this may be a bad idea.
raise TypeError("id argument should be a string")
if not isinstance(name, basestring):
raise TypeError("name argument should be a string")
if not isinstance(description, basestring):
raise TypeError("description argument should be a string")
self._seq = seq
self.id = id
self.name = name
self.description = description
# database cross references (for the whole sequence)
if dbxrefs is None:
dbxrefs = []
elif not isinstance(dbxrefs, list):
raise TypeError("dbxrefs argument should be a list (of strings)")
self.dbxrefs = dbxrefs
# annotations about the whole sequence
if annotations is None:
annotations = {}
elif not isinstance(annotations, dict):
raise TypeError("annotations argument should be a dict")
self.annotations = annotations
if letter_annotations is None:
# annotations about each letter in the sequence
if seq is None:
# Should we allow this and use a normal unrestricted dict?
self._per_letter_annotations = _RestrictedDict(length=0)
else:
try:
self._per_letter_annotations = \
_RestrictedDict(length=len(seq))
except:
raise TypeError("seq argument should be a Seq object or similar")
else:
# This will be handled via the property set function, which will
# turn this into a _RestrictedDict and thus ensure all the values
# in the dict are the right length
self.letter_annotations = letter_annotations
# annotations about parts of the sequence
if features is None:
features = []
elif not isinstance(features, list):
raise TypeError("features argument should be a list (of SeqFeature objects)")
self.features = features
# TODO - Just make this a read only property?
def _set_per_letter_annotations(self, value):
if not isinstance(value, dict):
raise TypeError("The per-letter-annotations should be a "
"(restricted) dictionary.")
# Turn this into a restricted-dictionary (and check the entries)
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
# e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
self._per_letter_annotations.update(value)
letter_annotations = property(
fget=lambda self: self._per_letter_annotations,
fset=_set_per_letter_annotations,
doc="""Dictionary of per-letter-annotation for the sequence.
For example, this can hold quality scores used in FASTQ or QUAL files.
Consider this example using Bio.SeqIO to read in an example Solexa
variant FASTQ file as a SeqRecord:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
The letter_annotations get sliced automatically if you slice the
parent SeqRecord, for example taking the last ten bases:
>>> sub_record = record[-10:]
>>> print("%s %s" % (sub_record.id, sub_record.seq))
slxa_0001_1_0001_01 ACGTNNNNNN
>>> print(sub_record.letter_annotations["solexa_quality"])
[4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Any python sequence (i.e. list, tuple or string) can be recorded in
the SeqRecord's letter_annotations dictionary as long as the length
matches that of the SeqRecord's sequence. e.g.
>>> len(sub_record.letter_annotations)
1
>>> sub_record.letter_annotations["dummy"] = "abcdefghij"
>>> len(sub_record.letter_annotations)
2
You can delete entries from the letter_annotations dictionary as usual:
>>> del sub_record.letter_annotations["solexa_quality"]
>>> sub_record.letter_annotations
{'dummy': 'abcdefghij'}
You can completely clear the dictionary easily as follows:
>>> sub_record.letter_annotations = {}
>>> sub_record.letter_annotations
{}
""")
def _set_seq(self, value):
# TODO - Add a deprecation warning that the seq should be write only?
if self._per_letter_annotations:
# TODO - Make this a warning? Silently empty the dictionary?
raise ValueError("You must empty the letter annotations first!")
self._seq = value
try:
self._per_letter_annotations = _RestrictedDict(length=len(self.seq))
except AttributeError:
# e.g. seq is None
self._per_letter_annotations = _RestrictedDict(length=0)
seq = property(fget=lambda self: self._seq,
fset=_set_seq,
doc="The sequence itself, as a Seq or MutableSeq object.")
def __getitem__(self, index):
"""Returns a sub-sequence or an individual letter.
Slicing, e.g. my_record[5:10], returns a new SeqRecord for
that sub-sequence with approriate annotation preserved. The
name, id and description are kept.
Any per-letter-annotations are sliced to match the requested
sub-sequence. Unless a stride is used, all those features
which fall fully within the subsequence are included (with
their locations adjusted accordingly).
However, the annotations dictionary and the dbxrefs list are
not used for the new SeqRecord, as in general they may not
apply to the subsequence. If you want to preserve them, you
must explictly copy them to the new SeqRecord yourself.
Using an integer index, e.g. my_record[5] is shorthand for
extracting that letter from the sequence, my_record.seq[5].
For example, consider this short protein and its secondary
structure as encoded by the PDB (e.g. H for alpha helices),
plus a simple feature for its histidine self phosphorylation
site:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.SeqFeature import SeqFeature, FeatureLocation
>>> from Bio.Alphabet import IUPAC
>>> rec = SeqRecord(Seq("MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLAT"
... "EMMSEQDGYLAESINKDIEECNAIIEQFIDYLR",
... IUPAC.protein),
... id="1JOY", name="EnvZ",
... description="Homodimeric domain of EnvZ from E. coli")
>>> rec.letter_annotations["secondary_structure"] = " S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT "
>>> rec.features.append(SeqFeature(FeatureLocation(20, 21),
... type = "Site"))
Now let's have a quick look at the full record,
>>> print(rec)
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein())
>>> print(rec.letter_annotations["secondary_structure"])
S SSSSSSHHHHHTTTHHHHHHHHHHHHHHHHHHHHHHTHHHHHHHHHHHHHHHHHHHHHTT
>>> print(rec.features[0].location)
[20:21]
Now let's take a sub sequence, here chosen as the first (fractured)
alpha helix which includes the histidine phosphorylation site:
>>> sub = rec[11:41]
>>> print(sub)
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('RTLLMAGVSHDLRTPLTRIRLATEMMSEQD', IUPACProtein())
>>> print(sub.letter_annotations["secondary_structure"])
HHHHHTTTHHHHHHHHHHHHHHHHHHHHHH
>>> print(sub.features[0].location)
[9:10]
You can also of course omit the start or end values, for
example to get the first ten letters only:
>>> print(rec[:10])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('MAAGVKQLAD', IUPACProtein())
Or for the last ten letters:
>>> print(rec[-10:])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 0
Per letter annotation for: secondary_structure
Seq('IIEQFIDYLR', IUPACProtein())
If you omit both, then you get a copy of the original record (although
lacking the annotations and dbxrefs):
>>> print(rec[:])
ID: 1JOY
Name: EnvZ
Description: Homodimeric domain of EnvZ from E. coli
Number of features: 1
Per letter annotation for: secondary_structure
Seq('MAAGVKQLADDRTLLMAGVSHDLRTPLTRIRLATEMMSEQDGYLAESINKDIEE...YLR', IUPACProtein())
Finally, indexing with a simple integer is shorthand for pulling out
that letter from the sequence directly:
>>> rec[5]
'K'
>>> rec.seq[5]
'K'
"""
if isinstance(index, int):
# NOTE - The sequence level annotation like the id, name, etc
# do not really apply to a single character. However, should
# we try and expose any per-letter-annotation here? If so how?
return self.seq[index]
elif isinstance(index, slice):
if self.seq is None:
raise ValueError("If the sequence is None, we cannot slice it.")
parent_length = len(self)
answer = self.__class__(self.seq[index],
id=self.id,
name=self.name,
description=self.description)
# TODO - The desription may no longer apply.
# It would be safer to change it to something
# generic like "edited" or the default value.
# Don't copy the annotation dict and dbxefs list,
# they may not apply to a subsequence.
# answer.annotations = dict(self.annotations.items())
# answer.dbxrefs = self.dbxrefs[:]
# TODO - Review this in light of adding SeqRecord objects?
# TODO - Cope with strides by generating ambiguous locations?
start, stop, step = index.indices(parent_length)
if step == 1:
# Select relevant features, add them with shifted locations
# assert str(self.seq)[index] == str(self.seq)[start:stop]
for f in self.features:
if f.ref or f.ref_db:
# TODO - Implement this (with lots of tests)?
import warnings
warnings.warn("When slicing SeqRecord objects, any "
"SeqFeature referencing other sequences (e.g. "
"from segmented GenBank records) are ignored.")
continue
if start <= f.location.nofuzzy_start \
and f.location.nofuzzy_end <= stop:
answer.features.append(f._shift(-start))
# Slice all the values to match the sliced sequence
# (this should also work with strides, even negative strides):
for key, value in self.letter_annotations.items():
answer._per_letter_annotations[key] = value[index]
return answer
raise ValueError("Invalid index")
def __iter__(self):
"""Iterate over the letters in the sequence.
For example, using Bio.SeqIO to read in a protein FASTA file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/loveliesbleeding.pro", "fasta")
>>> for amino in record:
... print(amino)
... if amino == "L": break
X
A
G
L
>>> print(record.seq[3])
L
This is just a shortcut for iterating over the sequence directly:
>>> for amino in record.seq:
... print(amino)
... if amino == "L": break
X
A
G
L
>>> print(record.seq[3])
L
Note that this does not facilitate iteration together with any
per-letter-annotation. However, you can achieve that using the
python zip function on the record (or its sequence) and the relevant
per-letter-annotation:
>>> from Bio import SeqIO
>>> rec = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (rec.id, rec.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(rec.letter_annotations))
['solexa_quality']
>>> for nuc, qual in zip(rec, rec.letter_annotations["solexa_quality"]):
... if qual > 35:
... print("%s %i" % (nuc, qual))
A 40
C 39
G 38
T 37
A 36
You may agree that using zip(rec.seq, ...) is more explicit than using
zip(rec, ...) as shown above.
"""
return iter(self.seq)
def __contains__(self, char):
"""Implements the 'in' keyword, searches the sequence.
e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> "GAATTC" in record
False
>>> "AAA" in record
True
This essentially acts as a proxy for using "in" on the sequence:
>>> "GAATTC" in record.seq
False
>>> "AAA" in record.seq
True
Note that you can also use Seq objects as the query,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> Seq("AAA") in record
True
>>> Seq("AAA", generic_dna) in record
True
See also the Seq object's __contains__ method.
"""
return char in self.seq
def __str__(self):
"""A human readable summary of the record and its annotation (string).
The python built in function str works by calling the object's ___str__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein, small")
>>> print(str(record))
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
In this example you don't actually need to call str explicity, as the
print command does this automatically:
>>> print(record)
ID: YP_025292.1
Name: HokC
Description: toxic membrane protein, small
Number of features: 0
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
Note that long sequences are shown truncated.
"""
lines = []
if self.id:
lines.append("ID: %s" % self.id)
if self.name:
lines.append("Name: %s" % self.name)
if self.description:
lines.append("Description: %s" % self.description)
if self.dbxrefs:
lines.append("Database cross-references: "
+ ", ".join(self.dbxrefs))
lines.append("Number of features: %i" % len(self.features))
for a in self.annotations:
lines.append("/%s=%s" % (a, str(self.annotations[a])))
if self.letter_annotations:
lines.append("Per letter annotation for: "
+ ", ".join(self.letter_annotations))
# Don't want to include the entire sequence,
# and showing the alphabet is useful:
lines.append(repr(self.seq))
return "\n".join(lines)
def __repr__(self):
"""A concise summary of the record for debugging (string).
The python built in function repr works by calling the object's ___repr__
method. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import generic_protein
>>> rec = SeqRecord(Seq("MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKAT"
... +"GEMKEQTEWHRVVLFGKLAEVASEYLRKGSQVYIEGQLRTRKWTDQ"
... +"SGQDRYTTEVVVNVGGTMQMLGGRQGGGAPAGGNIGGGQPQGGWGQ"
... +"PQQPQGGNQFSGGAQSRPQQSAPAAPSNEPPMDFDDDIPF",
... generic_protein),
... id="NP_418483.1", name="b4059",
... description="ssDNA-binding protein",
... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"])
>>> print(repr(rec))
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
At the python prompt you can also use this shorthand:
>>> rec
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF', ProteinAlphabet()), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
Note that long sequences are shown truncated. Also note that any
annotations, letter_annotations and features are not shown (as they
would lead to a very long string).
"""
return self.__class__.__name__ \
+ "(seq=%s, id=%s, name=%s, description=%s, dbxrefs=%s)" \
% tuple(map(repr, (self.seq, self.id, self.name,
self.description, self.dbxrefs)))
def format(self, format):
r"""Returns the record as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.SeqIO, which is used to turn the SeqRecord into a
string. e.g.
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Alphabet import IUPAC
>>> record = SeqRecord(Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein),
... id="YP_025292.1", name="HokC",
... description="toxic membrane protein")
>>> record.format("fasta")
'>YP_025292.1 toxic membrane protein\nMKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF\n'
>>> print(record.format("fasta"))
>YP_025292.1 toxic membrane protein
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
<BLANKLINE>
The python print command automatically appends a new line, meaning
in this example a blank line is shown. If you look at the string
representation you can see there is a trailing new line (shown as
slash n) which is important when writing to a file or if
concatenating multiple sequence strings together.
Note that this method will NOT work on every possible file format
supported by Bio.SeqIO (e.g. some are for multiple sequences only).
"""
# See also the __format__ added for Python 2.6 / 3.0, PEP 3101
# See also the Bio.Align.Generic.Alignment class and its format()
return self.__format__(format)
def __format__(self, format_spec):
"""Returns the record as a string in the specified file format.
This method supports the python format() function added in
Python 2.6/3.0. The format_spec should be a lower case string
supported by Bio.SeqIO as an output file format. See also the
SeqRecord's format() method.
Under Python 3 please note that for binary formats a bytes
string is returned, otherwise a (unicode) string is returned.
"""
if not format_spec:
# Follow python convention and default to using __str__
return str(self)
from Bio import SeqIO
if format_spec in SeqIO._BinaryFormats:
# Return bytes on Python 3
from io import BytesIO
handle = BytesIO()
else:
from Bio._py3k import StringIO
handle = StringIO()
SeqIO.write(self, handle, format_spec)
return handle.getvalue()
def __len__(self):
"""Returns the length of the sequence.
For example, using Bio.SeqIO to read in a FASTA nucleotide file:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/sweetpea.nu", "fasta")
>>> len(record)
309
>>> len(record.seq)
309
"""
return len(self.seq)
# Python 3:
def __bool__(self):
"""Boolean value of an instance of this class (True).
This behaviour is for backwards compatibility, since until the
__len__ method was added, a SeqRecord always evaluated as True.
Note that in comparison, a Seq object will evaluate to False if it
has a zero length sequence.
WARNING: The SeqRecord may in future evaluate to False when its
sequence is of zero length (in order to better match the Seq
object behaviour)!
"""
return True
# Python 2:
__nonzero__ = __bool__
def __add__(self, other):
"""Add another sequence or string to this sequence.
The other sequence can be a SeqRecord object, a Seq object (or
similar, e.g. a MutableSeq) or a plain Python string. If you add
a plain string or a Seq (like) object, the new SeqRecord will simply
have this appended to the existing data. However, any per letter
annotation will be lost:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> new = record + "ACT"
>>> print("%s %s" % (new.id, new.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNNACT
>>> print(list(new.letter_annotations))
[]
The new record will attempt to combine the annotation, but for any
ambiguities (e.g. different names) it defaults to omitting that
annotation.
>>> from Bio import SeqIO
>>> with open("GenBank/pBAD30.gb") as handle:
... plasmid = SeqIO.read(handle, "gb")
>>> print("%s %i" % (plasmid.id, len(plasmid)))
pBAD30 4923
Now let's cut the plasmid into two pieces, and join them back up the
other way round (i.e. shift the starting point on this plasmid, have
a look at the annotated features in the original file to see why this
particular split point might make sense):
>>> left = plasmid[:3765]
>>> right = plasmid[3765:]
>>> new = right + left
>>> print("%s %i" % (new.id, len(new)))
pBAD30 4923
>>> str(new.seq) == str(right.seq + left.seq)
True
>>> len(new.features) == len(left.features) + len(right.features)
True
When we add the left and right SeqRecord objects, their annotation
is all consistent, so it is all conserved in the new SeqRecord:
>>> new.id == left.id == right.id == plasmid.id
True
>>> new.name == left.name == right.name == plasmid.name
True
>>> new.description == plasmid.description
True
>>> new.annotations == left.annotations == right.annotations
True
>>> new.letter_annotations == plasmid.letter_annotations
True
>>> new.dbxrefs == left.dbxrefs == right.dbxrefs
True
However, we should point out that when we sliced the SeqRecord,
any annotations dictionary or dbxrefs list entries were lost.
You can explicitly copy them like this:
>>> new.annotations = plasmid.annotations.copy()
>>> new.dbxrefs = plasmid.dbxrefs[:]
"""
if not isinstance(other, SeqRecord):
# Assume it is a string or a Seq.
# Note can't transfer any per-letter-annotations
return SeqRecord(self.seq + other,
id=self.id, name=self.name,
description=self.description,
features=self.features[:],
annotations=self.annotations.copy(),
dbxrefs=self.dbxrefs[:])
# Adding two SeqRecord objects... must merge annotation.
answer = SeqRecord(self.seq + other.seq,
features=self.features[:],
dbxrefs=self.dbxrefs[:])
# Will take all the features and all the db cross refs,
l = len(self)
for f in other.features:
answer.features.append(f._shift(l))
del l
for ref in other.dbxrefs:
if ref not in answer.dbxrefs:
answer.dbxrefs.append(ref)
# Take common id/name/description/annotation
if self.id == other.id:
answer.id = self.id
if self.name == other.name:
answer.name = self.name
if self.description == other.description:
answer.description = self.description
for k, v in self.annotations.items():
if k in other.annotations and other.annotations[k] == v:
answer.annotations[k] = v
# Can append matching per-letter-annotation
for k, v in self.letter_annotations.items():
if k in other.letter_annotations:
answer.letter_annotations[k] = v + other.letter_annotations[k]
return answer
def __radd__(self, other):
"""Add another sequence or string to this sequence (from the left).
This method handles adding a Seq object (or similar, e.g. MutableSeq)
or a plain Python string (on the left) to a SeqRecord (on the right).
See the __add__ method for more details, but for example:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> new = "ACT" + record
>>> print("%s %s" % (new.id, new.seq))
slxa_0001_1_0001_01 ACTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(new.letter_annotations))
[]
"""
if isinstance(other, SeqRecord):
raise RuntimeError("This should have happened via the __add__ of "
"the other SeqRecord being added!")
# Assume it is a string or a Seq.
# Note can't transfer any per-letter-annotations
offset = len(other)
return SeqRecord(other + self.seq,
id=self.id, name=self.name,
description=self.description,
features=[f._shift(offset) for f in self.features],
annotations=self.annotations.copy(),
dbxrefs=self.dbxrefs[:])
def upper(self):
"""Returns a copy of the record with an upper case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> record = SeqRecord(Seq("acgtACGT", generic_dna), id="Test",
... description = "Made up for this example")
>>> record.letter_annotations["phred_quality"] = [1, 2, 3, 4, 5, 6, 7, 8]
>>> print(record.upper().format("fastq"))
@Test Made up for this example
ACGTACGT
+
"#$%&'()
<BLANKLINE>
Naturally, there is a matching lower method:
>>> print(record.lower().format("fastq"))
@Test Made up for this example
acgtacgt
+
"#$%&'()
<BLANKLINE>
"""
return SeqRecord(self.seq.upper(),
id=self.id, name=self.name,
description=self.description,
dbxrefs=self.dbxrefs[:],
features=self.features[:],
annotations=self.annotations.copy(),
letter_annotations=self.letter_annotations.copy())
def lower(self):
"""Returns a copy of the record with a lower case sequence.
All the annotation is preserved unchanged. e.g.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Fasta/aster.pro", "fasta")
>>> print(record.format("fasta"))
>gi|3298468|dbj|BAA31520.1| SAMIPF
GGHVNPAVTFGAFVGGNITLLRGIVYIIAQLLGSTVACLLLKFVTNDMAVGVFSLSAGVG
VTNALVFEIVMTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI
<BLANKLINE>
>>> print(record.lower().format("fasta"))
>gi|3298468|dbj|BAA31520.1| SAMIPF
gghvnpavtfgafvggnitllrgivyiiaqllgstvaclllkfvtndmavgvfslsagvg
vtnalvfeivmtfglvytvyataidpkkgslgtiapiaigfivgani
<BLANKLINE>
To take a more annotation rich example,
>>> from Bio import SeqIO
>>> old = SeqIO.read("EMBL/TRBG361.embl", "embl")
>>> len(old.features)
3
>>> new = old.lower()
>>> len(old.features) == len(new.features)
True
>>> old.annotations["organism"] == new.annotations["organism"]
True
>>> old.dbxrefs == new.dbxrefs
True
"""
return SeqRecord(self.seq.lower(),
id=self.id, name=self.name,
description=self.description,
dbxrefs=self.dbxrefs[:],
features=self.features[:],
annotations=self.annotations.copy(),
letter_annotations=self.letter_annotations.copy())
def reverse_complement(self, id=False, name=False, description=False,
features=True, annotations=False,
letter_annotations=True, dbxrefs=False):
"""Returns new SeqRecord with reverse complement sequence.
You can specify the returned record's id, name and description as
strings, or True to keep that of the parent, or False for a default.
You can specify the returned record's features with a list of
SeqFeature objects, or True to keep that of the parent, or False to
omit them. The default is to keep the original features (with the
strand and locations adjusted).
You can also specify both the returned record's annotations and
letter_annotations as dictionaries, True to keep that of the parent,
or False to omit them. The default is to keep the original
annotations (with the letter annotations reversed).
To show what happens to the pre-letter annotations, consider an
example Solexa variant FASTQ file with a single entry, which we'll
read in as a SeqRecord:
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/solexa_faked.fastq", "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(list(record.letter_annotations))
['solexa_quality']
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
Now take the reverse complement,
>>> rc_record = record.reverse_complement(id=record.id+"_rc")
>>> print("%s %s" % (rc_record.id, rc_record.seq))
slxa_0001_1_0001_01_rc NNNNNNACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT
Notice that the per-letter-annotations have also been reversed,
although this may not be appropriate for all cases.
>>> print(rc_record.letter_annotations["solexa_quality"])
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
Now for the features, we need a different example. Parsing a GenBank
file is probably the easiest way to get an nice example with features
in it...
>>> from Bio import SeqIO
>>> with open("GenBank/pBAD30.gb") as handle:
... plasmid = SeqIO.read(handle, "gb")
>>> print("%s %i" % (plasmid.id, len(plasmid)))
pBAD30 4923
>>> plasmid.seq
Seq('GCTAGCGGAGTGTATACTGGCTTACTATGTTGGCACTGATGAGGGTGTCAGTGA...ATG', IUPACAmbiguousDNA())
>>> len(plasmid.features)
13
Now, let's take the reverse complement of this whole plasmid:
>>> rc_plasmid = plasmid.reverse_complement(id=plasmid.id+"_rc")
>>> print("%s %i" % (rc_plasmid.id, len(rc_plasmid)))
pBAD30_rc 4923
>>> rc_plasmid.seq
Seq('CATGGGCAAATATTATACGCAAGGCGACAAGGTGCTGATGCCGCTGGCGATTCA...AGC', IUPACAmbiguousDNA())
>>> len(rc_plasmid.features)
13
Let's compare the first CDS feature - it has gone from being the
second feature (index 1) to the second last feature (index -2), its
strand has changed, and the location switched round.
>>> print(plasmid.features[1])
type: CDS
location: [1081:1960](-)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
>>> print(rc_plasmid.features[-2])
type: CDS
location: [2963:3842](+)
qualifiers:
Key: label, Value: ['araC']
Key: note, Value: ['araC regulator of the arabinose BAD promoter']
Key: vntifkey, Value: ['4']
<BLANKLINE>
You can check this new location, based on the length of the plasmid:
>>> len(plasmid) - 1081
3842
>>> len(plasmid) - 1960
2963
Note that if the SeqFeature annotation includes any strand specific
information (e.g. base changes for a SNP), this information is not
ammended, and would need correction after the reverse complement.
Note trying to reverse complement a protein SeqRecord raises an
exception:
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> protein_rec = SeqRecord(Seq("MAIVMGR", IUPAC.protein), id="Test")
>>> protein_rec.reverse_complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
Also note you can reverse complement a SeqRecord using a MutableSeq:
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Seq import MutableSeq
>>> from Bio.Alphabet import generic_dna
>>> rec = SeqRecord(MutableSeq("ACGT", generic_dna), id="Test")
>>> rec.seq[0] = "T"
>>> print("%s %s" % (rec.id, rec.seq))
Test TCGT
>>> rc = rec.reverse_complement(id=True)
>>> print("%s %s" % (rc.id, rc.seq))
Test ACGA
"""
from Bio.Seq import MutableSeq # Lazy to avoid circular imports
if isinstance(self.seq, MutableSeq):
# Currently the MutableSeq reverse complement is in situ
answer = SeqRecord(self.seq.toseq().reverse_complement())
else:
answer = SeqRecord(self.seq.reverse_complement())
if isinstance(id, basestring):
answer.id = id
elif id:
answer.id = self.id
if isinstance(name, basestring):
answer.name = name
elif name:
answer.name = self.name
if isinstance(description, basestring):
answer.description = description
elif description:
answer.description = self.description
if isinstance(dbxrefs, list):
answer.dbxrefs = dbxrefs
elif dbxrefs:
# Copy the old dbxrefs
answer.dbxrefs = self.dbxrefs[:]
if isinstance(features, list):
answer.features = features
elif features:
# Copy the old features, adjusting location and string
l = len(answer)
answer.features = [f._flip(l) for f in self.features]
# The old list should have been sorted by start location,
# reversing it will leave it sorted by what is now the end position,
# so we need to resort in case of overlapping features.
# NOTE - In the common case of gene before CDS (and similar) with
# the exact same locations, this will still maintain gene before CDS
answer.features.sort(key=lambda x: x.location.start.position)
if isinstance(annotations, dict):
answer.annotations = annotations
elif annotations:
# Copy the old annotations,
answer.annotations = self.annotations.copy()
if isinstance(letter_annotations, dict):
answer.letter_annotations = letter_annotations
elif letter_annotations:
# Copy the old per letter annotations, reversing them
for key, value in self.letter_annotations.items():
answer._per_letter_annotations[key] = value[::-1]
return answer
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| Ambuj-UF/ConCat-1.0 | src/Utils/Bio/SeqRecord.py | Python | gpl-2.0 | 46,653 | [
"BioPerl",
"Biopython"
] | 987326822497c6e694b917cb4d8a54609917b5bd834b48371e329ca64a19783e |
import os
output_path = os.path.dirname(os.path.abspath(__file__)) + "/"
output_test_flow = output_path + "test.hdf5"
output_test_fib = output_path + "test.fib"
output_test_vtk = output_path + "test.vtk"
| StongeEtienne/trimeshpy | trimeshpy/data/out/__init__.py | Python | mit | 205 | [
"VTK"
] | c6d0fb8fd67aa723ff9297b8b7bc2663b9050ddb39006118ea785841d1698413 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developpers.
# =============================================================================
# @file ostap/math/ve.py
# Simple ``value-with-error'' concept
# @code
# a = VE (1,0.1**2)
# b = VE (2,0.2**2)
# c = a + b
# @endcode
# @see Ostap::Math::ValueWithError
# =============================================================================
r""" Simple `value-with-error' concept
>>> a = VE (1,0.1**2)
>>> b = VE (2,0.2**2)
>>> c = a + b
"""
# =============================================================================
__all__ = (
'VE' , # Value with error
'VVE' , # vector of values with errors
)
# =============================================================================
import ROOT
from ostap.math.base import Ostap, std , iszero, isequal
from builtins import range
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.math.ve' )
else : logger = getLogger ( __name__ )
# =============================================================================
VE = Ostap.Math.ValueWithError
VVE = std.vector ( VE )
VE.Vector = VVE
VE.Vector .__str__ = lambda s : str( [ i for i in s ])
VE.Vector .__repr__ = lambda s : str( [ i for i in s ])
VVVE = std.vector( VVE )
VVE.Vector = VVVE
VVE.Vector . __str__ = lambda s : str( [ i for i in s ] )
VVE.Vector . __repr__ = lambda s : str( [ i for i in s ] )
VVE.Vector . __len__ = lambda s : s.size ()
# =============================================================================
## Sum the contents of the vector
def _ve_sum_ ( s ) :
"""Sum the contents of the vector.
>>> v = ...
>>> s = v.sum()
"""
return Ostap.Math.sum ( s )
# =============================================================================
## Sum the contents of the vector
def _ve_asum_ ( s ) :
"""Sum the contents of the vector.
>>> v = ...
>>> s = v.abssum()
"""
return Ostap.Math.abssum ( s )
_ve_sum_ . __doc__ += '\n' + Ostap.Math.sum .__doc__
_ve_asum_ . __doc__ += '\n' + Ostap.Math.abssum .__doc__
for t in ( Ostap.Math.ValueWithError ,
Ostap.Math.Point3DWithError ,
Ostap.Math.Vector3DWithError ,
Ostap.Math.LorentzVectorWithError ) :
if not hasattr ( t , '_new_str_' ) :
t._new_str_ = t.toString
t.__str__ = t.toString
t.__repr__ = t.toString
# =============================================================================
## Calculate the "effective" background-to-signal ratio from the valeu
# and its uncertainty using the identity
# \f$ \frac{\sigma(S)}{S} = \frac{\sqrt{S}}{S}\sqrt{1+\frac{B}{S}}\f$.
# From this identity one gets
# \f$ \left.\frac{B}{S}\right|_{\mathrm{eff}} \equiv \frac{\sigma^2(S)}{S} -1 \f$
# @param v the value
# @return the effective backround-to-signal ratio or -1
# @code
# v = VE( ... )
# print 'B/S=', v.b2s()
# @endcode
def _ve_b2s_ ( s ) :
"""Get background-over-signal ratio B/S estimate from the equation:
error(S) = 1/sqrt(S) * sqrt ( 1 + B/S).
>>> v = ...
>>> b2s = v.b2s() ## get B/S estimate
"""
#
vv = s.value ()
if vv <= 0 or iszero ( vv ) : return VE(-1,0)
#
c2 = s.cov2 ()
if c2 <= 0 or iszero ( c2 ) : return VE(-1,0)
elif isequal ( vv , c2 ) : return VE( 1,0)
elif c2 < vv : return VE(-1,0)
#
return c2 / s - 1.0
# =============================================================================
## Calculate the "effective purity" ratio using the identity
# \f$ p_{\mathrm{eff}} = \frac{S}{S+B} = \frac{1}{1+\frac{B}{S}}\f$
# and the effective "background-to-signal" ratio is estimated as
# \f$ \left.\frac{B}{S}\right|_{\mathrm{eff}} = \frac{\sigma^2(S)}{S} -1 \f$,
# finally one gets
# \f$ p_{\mathrm{eff}} \equiv \frac{S}{\sigma^2(S)}\f$
# @see Ostap::Math::b2s
# @see Ostap::Math::purity
# @param v the value
# @return the effective purity or -1
def _ve_purity_ ( s ) :
"""Calculate the ``effective purity'' ratio using the identity
p = S/(S+B) = 1/( 1 + B/S ),
- and the effective ``background-to-signal'' ratio B/S is estimated as
B/S = sigma^2(S)/S - 1
- Finally one gets
p = S / sigma^2(S)
- see Ostap::Math::b2s
- see Ostap::Math::purity
"""
#
vv = s.value ()
if vv <= 0 or iszero ( vv ) : return VE ( -1 , 0 )
#
c2 = s.cov2()
#
if c2 <= 0 or iszero ( c2 ) : return VE ( -1 , 0 )
elif isequal ( vv , c2 ) : return VE ( 1 , 0 )
elif c2 < vv : return VE ( -1 , 0 )
#
return s / c2
# =============================================================================
## Get precision with ``some'' error estimate.
def _ve_prec2_ ( s ) :
"""Get precision with ``some'' error estimate.
>>> v = ...
>>> p = v.prec()
"""
if not hasattr ( s , 'value' ) :
return _prec_ ( VE ( s , 0 ) )
#
c = s.error ()
#
if c < 0 or s.value() == 0 : return VE(-1,0)
elif c == 0 : return VE( 0,0)
#
return c / abs ( s )
VE . b2s = _ve_b2s_
VE . prec = _ve_prec2_
VE . precision = _ve_prec2_
VE . purity = _ve_purity_
_is_le_ = Ostap.Math.LessOrEqual ( 'double' )()
# =============================================================================
## Comparison of ValueWithError object with other objects
# @attention it is comparison by value only, errors are ignored
def _ve_lt_ ( self , other ) :
"""Comparison of ValueWithError object with other objects
>>> a = VE( ... )
>>> print a < b
Attention: comparison by value only!
"""
return float(self) < float(other)
# =============================================================================
## Comparison of ValueWithError object with other objects
# @attention it is comparison by value only, errors are ignored
def _ve_le_ ( self , other ) :
"""Comparison of ValueWithError object with other objects
>>> a = VE( ... )
>>> print a <= b
Attention: comparison by value only!
"""
return _is_le_ ( float(self) , float(other) )
# =============================================================================
## Comparison of ValueWithError object with other objects
# @attention it is comparison by value only, errors are ignored
def _ve_gt_ ( self , other ) :
"""Comparison of ValueWithError object with other objects
>>> a = VE( ... )
>>> print a > b
Attention: comparison by value only!
"""
return float(self) > float(other)
# =============================================================================
## Comparison of ValueWithError object with other objects
# @attention it is comparison by value only, errors are ignored
def _ve_ge_ ( self , other ) :
"""Comparison of ValueWithError object with other objects
>>> a = VE( ... )
>>> print a >= b
Attention: comparison by value only!
"""
return _is_le_ ( float(other) , float(self) )
VE.__lt__ = _ve_lt_
VE.__le__ = _ve_le_
VE.__gt__ = _ve_gt_
VE.__ge__ = _ve_ge_
_is_equal_ = Ostap.Math.Equal_To ( 'double' )()
_is_zero_ = Ostap.Math.Zero ( 'double' )()
if not hasattr ( VE , '__truediv__' ) : VE. __truediv__ = VE. __div__
if not hasattr ( VE , '__itruediv__' ) : VE.__itruediv__ = VE.__idiv__
if not hasattr ( VE , '__rtruediv__' ) : VE.__rtruediv__ = VE.__rdiv__
# =============================================================================
## Equality for ValueWithError objects
def _ve_eq_ ( self , other ) :
"""Equality for ValueWithError objects
>>> a = VE( ... )
>>> b = VE( ... )
>>> print a == b
"""
if isinstance ( other , VE ) :
v1 = self .value()
v2 = other.value()
return _is_equal_ ( v1 , v2 ) and _is_equal_ ( self.cov2() , other.cov2() )
elif _is_zero_ ( self.cov2() ) :
return _is_equal_ ( float ( self ) , float ( other ) )
else :
raise NotImplementedError ( ' Equality for %s and %s is not implemented' % ( self , other ) )
# =============================================================================
## inequality for ValueWithError objects
def _ve_ne_ ( self , other ) :
"""Inequality for ValueWithError objects
>>> a = VE( ... )
>>> b = VE( ... )
>>> print a != b
"""
try:
return not self == other
except NotImplemented :
raise NotImplementedError ( ' Inequality for %s and %s is not implemented' % ( self , other ) )
VE . __eq__ = _ve_eq_
VE . __ne__ = _ve_ne_
# ==============================================================================
## get easy (and coherent) way to access min/max for
# the value with error object: (value-n*error,value+n*error)
# @code
# ve = VE(2,2)
# print ve.minmax()
# print ve.minmax(2)
# @endcode
def _ve_minmax_ ( s , n = 1 ) :
"""Get an easy and coherent way to access ``min/max'' for
the value with error object: (value-n*error,value+n*error)
>>> ve = VE(2,2)
>>> print ve.minmax()
>>> print ve.minmax(2)
"""
v = s.value()
e = s.error()
if e <= 0 : return v,v
v1 = v + e * n
v2 = v - e * n
if v1 <= v2 : return v1 , v2
return v2,v1
VE.minmax = _ve_minmax_
# =============================================================================
## hashing for VE object
# @code
# v = VE ( ... )
# h = hash ( v )
# @endcode
def _ve_hash_ ( v ) :
"""HAshing function for VE objecte
>>> v = VE ( ... )
>>> h = hash ( v )
"""
return hash ( ( v.value() , v.cov2() ) )
VE.__hash__ = _ve_hash_
# =============================================================================
from random import gauss as _gauss
# =============================================================================
## get the (gaussian) random number according to parameters
#
# @code
# >>> v = ... ## the number with error
#
# ## get 100 random numbers
# >>> for i in range ( 0, 100 ) : print v.gauss()
#
# ## get only non-negative numbers
# >>> for j in range ( 0, 100 ) : print v.gauss( lambda s : s > 0 )
#
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2013-08-10
#
def _ve_gauss_ ( s , accept = lambda a : True , nmax = 1000 ) :
""" Get the gaussian random number
>>> v = ... ## the number with error
## get 100 random numbers
>>> for i in range ( 0, 100 ) : print v.gauss()
## get only non-negative numbers
>>> for j in range ( 0, 100 ) : print v.gauss( lambda s : s > 0 )
"""
#
if 0 >= s.cov2() or iszero ( s.cov2 () ) : return s.value() ## return
#
v = s.value ()
e = s.error ()
#
for i in range ( nmax ) :
r = _gauss ( v , e )
if accept ( r ) : return r
logger.warning("Can'n generate proper random number %s" % s )
return v
# =============================================================================
from ostap.math.random_ext import poisson as _poisson
# =============================================================================
## generate poisson random number according to parameters
# @code
# >>> v = ... ## the number with error
#
# ## get 100 random numbers
# >>> for i in range ( 0, 100 ) : print v.poisson ( fluctuate = True )
#
# ## get only odd numbers
# >>> for j in range ( 0, 100 ) : print v.poisson ( fluctuate = True , accept = lambda s : 1 ==s%2 )
#
# ## do not fluctuate the mean of poisson:
# >>> for j in range ( 0, 100 ) : print v.poisson ( fluctuate = False )
#
# @endcode
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2013-08-10
def _ve_poisson_ ( s , fluctuate , accept = lambda s : True ) :
"""Generate poisson random number according to parameters
>>> v = ... ## the number with error
## get 100 random numbers
>>> for i in range ( 0, 100 ) : print v.poisson()
## get only odd numbers
>>> for j in range ( 0, 100 ) : print v.poisson ( accept = lambda s : 1 ==s%2 )
## do not fluctuate the mean of poisson:
>>> for j in range ( 0, 100 ) : print v.poisson ( fluctuate = False )
"""
s = VE( s )
v = s.value()
if v < 0 and not fluctuate :
raise TypeError ( 'Negative mean without fluctuations (1)' )
if v < 0 and s.cov2() <= 0 :
raise TypeError ( 'Negative mean without fluctuations (2)' )
e = s.error()
if v < 0 and abs(v) > 3 * e :
logger.warning ( "Very inefficient mean fluctuations: %s" % s )
mu = v
if fluctuate :
mu = s.gauss ()
while mu < 0 :
mu = s.gauss ()
return _poisson ( mu )
VE.gauss = _ve_gauss_
VE.poisson = _ve_poisson_
# ==============================================================================
## factory for unpickling of <code>Ostap::Math::ValueWithError</code>
# @see Ostap::Math::ValueWithError
def ve_factory ( value , cov2 ) :
"""Factory for unpickling of <code>Ostap::Math::ValueWithError</code>
- see Ostap::Math::ValueWithError
"""
return VE ( value , cov2 )
# =============================================================================
## reduce <code>Ostap::Math::ValueWithError</code>
# @see Ostap::Math::ValueWithError
def ve_reduce ( v ) :
"""reduce `Ostap.Math.ValueWithError`
- see Ostap.Math.ValueWithError
"""
return ve_factory , ( v.value() , v.cov2() )
Ostap.Math.ValueWithError.__reduce__ = ve_reduce
# =============================================================================
## decorated classes
_decorated_classes_ = (
Ostap.Math.ValueWithError ,
Ostap.Math.ValueWithError.Vector ,
Ostap.Math.Point3DWithError ,
Ostap.Math.Vector3DWithError ,
Ostap.Math.LorentzVectorWithError )
# =============================================================================
## decorated methods
_new_methods_ = (
VE.Vector . __str__ ,
VE.Vector . __repr__ ,
VVE.Vector . __str__ ,
VVE.Vector . __repr__ ,
VVE.Vector . __len__ ,
VE . b2s ,
VE . purity ,
VE . prec ,
VE . precision ,
VE . __lt__ ,
VE . __le__ ,
VE . __gt__ ,
VE . __ge__ ,
VE . __eq__ ,
VE . __ne__ ,
VE . __hash__ ,
VE . minmax ,
VE . gauss ,
VE . poisson ,
VE . __reduce__ ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
a = VE(100,100)
b = VE(400,400)
logger.info ( 'a=%s, b=%s' % ( a , b ) )
logger.info ( 'a+b %s' % ( a + b ) )
logger.info ( 'a-b %s' % ( a - b ) )
logger.info ( 'a*b %s' % ( a * b ) )
logger.info ( 'a/b %s' % ( a / b ) )
logger.info ( 'a/(a+b) %s' % ( a.frac ( b ) ) )
logger.info ( '(a-b)/(a+b) %s' % ( a.asym ( b ) ) )
logger.info ( 80*'*')
# =============================================================================
## The END
# =============================================================================
| OstapHEP/ostap | ostap/math/ve.py | Python | bsd-3-clause | 15,818 | [
"Gaussian"
] | 46d82be499e588ae7d88ac65c8e21cf01cd202f51f4020a88a112b45a4cda050 |
# -*- coding: utf-8 -*-
import re
from modules import client,webutils,cloudflare,decryptionUtils,cache
from modules.constants import resolver_dict
from modules.log_utils import log
from modules.liveresolver_utils import *
import urlparse,urllib,base64
from BeautifulSoup import BeautifulSoup as bs
global limit
limit=0
from modules import constants
FLASH = constants.flash_ver()
'''
Pass any url containing video to this function.
It will try to find the embedded video and resolve it, returning the resolved
and playable video link.
cache_timeout (in hours) - how long to cache the resolved video for the given page.
html - pass html content to resolver and it will search for embedded links from it, instead
of requesting the given url and searching from there.
'''
def resolve(url, cache_timeout=3, html=None):
try:
log("Resolver called with url: " + url)
resolved=None
if html==None:
resolved=resolve_it(url)
if resolved==None:
if html==None:
#semi-cached resolving
url=cache.get(find_link,cache_timeout,url)
else:
url = find_link(url,html=html)
resolved=url
url=resolve_it(url)
if url!=None:
resolved=url
log("Resolved url: " + resolved)
return resolved
except:
log("Failed to find link.")
return url
'''
Check if your video link is resolvable through the liveresolver module.
'''
def isValid(url):
return prepare(urlparse.urlparse(url).netloc) in resolver_dict.keys()
'''
Flush the liveresolver cache.
'''
def delete_cache():
cache.clear()
'''
Not intended for external use.
This method is used internally for resolving the found link.
'''
def resolve_it(url):
if '.m3u8' in url or 'rtmp:' in url or '.flv' in url or '.mp4' in url or '.ts' in url or url.startswith('plugin://'):
if '.m3u8' in url and '|' not in url:
url += '|%s' % urllib.urlencode({'User-Agent': client.agent()})
return url
if '.f4m' in url:
from resolvers import f4m
resolved = f4m.resolve(url)
return resolved
if url.startswith('acestream://') or url.startswith('sop://') or '.acelive' in url:
from resolvers import sop_ace
resolved = sop_ace.resolve(url, 'Video')
return resolved
netloc = prepare(urlparse.urlparse(url).netloc)
if netloc in resolver_dict.keys():
resolver = resolver_dict[netloc]
log("Calling resolver: " + resolver)
exec "from resolvers import %s"%resolver
resolved = eval(resolver+".resolve(url)")
return resolved
else:
return
def find_link(url, html=''):
log('Finding in : %s'%url)
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = 'http://' + urlparse.urlparse(url).netloc
host = urlparse.urlparse(url).netloc
headers = {'Referer':referer, 'Host':host, 'User-Agent' : client.agent(), 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.5'}
if html=='':
url = manual_url_fix(url)
html = client.request(url, headers=headers)
html = manual_html_fix(url,html,headers)
ref=url
fs=list(globals().copy())
for f in fs:
if 'finder' in f:
resolved = eval (f+"(html,ref)")
if resolved:
log('Resolved with %s: %s'%(f,resolved))
return resolved
break
return
#embeded iframes
def finder1(html,url):
global limit
limit+=1
ref=url
try:
urls = re.findall('<i?frame.+?src=(?:\'|\")(.+?)(?:\'|\")',html)
try:
urls.append(re.findall("playStream\('iframe', '(.+?)'\)",html)[0])
except: pass
urls += re.findall('<a.+?href=[\'\"](/live-.+?stream.+?)[\'\"]',html)
from random import shuffle
shuffle(urls)
for url in urls:
if 'c4.zedo' in url:
continue
if "micast" in url or 'turbocast' in url:
return finder47(html,ref)
rr = resolve_it(url)
if rr:
return rr
uri = manual_fix(url,ref)
if limit>=25:
log("Exiting - iframe visit limit reached")
return
resolved = find_link(uri)
if resolved:
break
headers = {'User-Agent': client.agent(), 'Referer': ref}
if '.m3u8' in resolved and '|' not in resolved:
headers.update({'X-Requested-With':'ShockwaveFlash/20.0.0.286', 'Host':urlparse.urlparse(resolved).netloc, 'Connection':'keep-alive'})
resolved += '|%s' % urllib.urlencode(headers)
return resolved
except:
return
#lsh stream
def finder2(html,url):
try:
reg = re.compile('[^\"\'](http://www.lshstream.com[^\"\']*)')
url = re.findall(reg,html)[0]
return url
except:
try:
reg = re.compile('<script type="text/javascript"> fid="(.+?)"; v_width=.+?;\s*v_height=.+?;</script><script type="text/javascript" src="http://cdn.lshstream.com/embed.js">')
fid = re.findall(reg,html)[0]
url = 'http://www.lshstream.com/embed.php?u=%s&vw=720&vh=420&live.realstreamunited.com=%s'%(fid,url)
return url
except:
return
#castalba
def finder3(html,url):
try:
reg=re.compile('id="(.+?)";.+?src="http://www.castalba.tv/.+?.js')
id=re.findall(reg,html)[0]
url = 'http://castalba.tv/embed.php?cid=%s&wh=600&ht=380'%id
return url
except:
return
#jw_config
def finder4(html,url):
ref = url
try:
try:
link = re.compile('file\s*:\s*"(.+?)"').findall(html)[0]
except:
link = re.compile("file\s*:\s*'(.+?)'").findall(html)[0]
if '.png' in link or link == '.flv':
return
if '.f4m' in link:
link = link+'?referer=%s'%url
if '.m3u8' in link and '|' not in link:
link += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/20.0.0.228', 'Host':urlparse.urlparse(link).netloc, 'Connection':'keep-alive','Accept':'*/*'})
return link
except:
return
#vlc_config
def finder5(html,url):
try:
soup=bs(html)
try:
link=soup.find('embed',{'id':'vlc'})
link=link['target']
except:
link=soup.find('embed',{'name':'vlc'})
link=link['target']
return link
except:
return
#sawlive
def finder6(html,url):
try:
uri = re.compile("[\"']([^\"\']*sawlive.tv\/embed\/[^\"'\/]+)\"").findall(html)[0]
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(uri)[0]
host = urlparse.urlparse(uri).netloc
uri = 'http://sawlive.tv/embed/%s?referer=%s&host=%s' % (page,url,host)
return uri
except:
try:
uri = re.compile("src=(?:\'|\")(http:\/\/(?:www\.)?sawlive.tv\/embed\/.+?)(?:\'|\")").findall(html)[0]
page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(uri)[0]
host = urlparse.urlparse(uri).netloc
uri = 'http://sawlive.tv/embed/%s?referer=%s&host=%s' % (page,url,host)
return uri
except:
return
#yocast
def finder7(html,url):
try:
reg=re.compile('<script>fid\s*=\s*(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://www.yocast.tv/.+?.js(?:\'|\")')
id = re.findall(reg,html)[0]
url='http://www.yocast.tv/embed.php?live=%s&vw=600&vh=450'%id
return url
except:
return
#miplayer
def finder8(html,url):
try:
reg = re.compile("(http://(?:www\.)?miplayer.net/embed[^'\"]+)")
url = re.findall(reg,html)[0]
return url
except:
return
#castamp
def finder9(html,url):
try:
reg = re.compile("(http://(?:www.)?castamp.com/embed.php\?c=[^\"&]+)")
url = re.findall(reg,html)[0]
return url
except:
return
#04 stream
def finder10(html,url):
try:
reg = re.compile('04stream.com/\w+\.js\?stream=([^ "\'&]+)')
url = re.findall(reg,html)[0]
url = 'http://www.04stream.com/weed.js?stream=%s&width=600&height=460&str=is&link=1&cat=3'%url
return url
except:
return
#leton
def finder11(html,url):
try:
html = urllib.unquote(html)
reg = re.compile('leton.tv/player.php\?streampage=([^&]+)&')
url = re.findall(reg,html)[0]
url = 'http://leton.tv/player.php?streampage=%s&width=600&height=450'%url
return url
except:
return
#yotv.co
def finder12(html,url):
try:
ref=url
reg = re.compile("<script type='text/javascript'>\s*fid=(?:\'|\")(.+?)(?:\'|\");\s*v_width=.+?;\s*v_height=.+?;</script><script type='text/javascript' src='http://www.yotv.co/player.js'></script>")
url = re.findall(reg,html)[0]
url = 'http://www.yotv.co/embed.php?live=%s&vw=620&vh=490&referer=%s'%(url,ref)
return url
except:
return
#hdcast
def finder13(html,url):
try:
url = re.compile('src="(http://(?:www\.)?hdcast.me/embed[^\'"]+)').findall(html)[0]
return url
except:
pass
#zerocast
def finder14(html,url):
try:
ref=url
url = re.compile('zerocast\.(?:tv|in)/(?:channel|embed)?\.php\?a=(\d+)').findall(html)[0]
url = 'http://zerocast.tv/channel.php?a=%s&width=640&height=480&autostart=true'%url
return url
except:
pass
#castup
def finder15(html,url):
try:
ref = url
reg = '<script type="text/javascript">\s*fid=(?:\'|\")(.+?)(?:\'|\");.+?src="http://www.castup.tv/js/.+?.js">'
url = re.findall(reg,html)[0]
url = 'http://www.castup.tv/embed_2.php?channel=%s&vw=650&vh=410&referer=%s'%(url,ref)
return url
except:
return
#mybeststream(not implemented)
def finder16(html,url):
try:
ref=url
id = re.findall('id=(?:\'|\")(\d+)(?:\'|\");width=.*?pt987.googlecode.com',html)[0]
url = 'http://mybeststream.xyz/?id=%s&width=640&height=385&referer=%s'%(id,ref)
return url
except:
pass
#sunhd(not implemented)
def finder17(html,url):
try:
ref=url
url = re.findall('src="(http://www.sunhd.info/channel.php\?file=.+?)"',html)[0]
return url+'&referer=%s'%ref
except:
pass
#youtube
def finder18(html,url):
try:
url = re.findall('src="?(https?://(?:www.|)youtube(?:-nocookie)?.com.+?[^\'\"]+)',html)[0]
return url.replace('amp;','').replace('-nocookie','')
except:
return
#livestream
def finder19(html,url):
try:
url = re.findall('(http://(?:new\.)?livestream.com[^"]+)',html)[0]
if 'player' in url:
return url
except:
return
#privatestream
def finder20(html,url):
try:
try:
id = re.findall('privatestream.tv/player\?streamname=([^&]+)&', html)[0]
except:
id = re.findall('privatestream.tv/((?!player)[^\.&\?\=]+)',html)[0]
if id != 'js/jquery-1':
url = 'http://privatestream.tv/player?streamname=%s&width=640&height=490'%id
return url
else:
return
except:
return
#airq.tv
def finder21(html,url):
try:
id = re.findall('(?:SRC|src)="http://airq.tv/(\w+)',html)[0]
url = 'http://airq.tv/%s/'%id
return url
except:
return
#aliez
def finder22(html,url):
try:
ref = url
try:
id = re.findall('emb.aliez[\w\.]+?/player/live.php\?id=([^&"]+)',html)[0]
return 'http://emb.aliez.me/player/live.php?id=%s&w=728&h=480&referer=%s'%(id,ref)
except:
try:
id = re.findall('(?:94.242.255.35|195.154.44.194|aliez\.\w+)/player/(?:live|embed).php\?id=(\d+)',html)[0]
except:
id = re.findall('http://aliez.(?:me|tv)/live/(.+?)(?:/|"|\')',html)[0]
return 'http://emb.aliez.me/player/live.php?id=%s&w=728&h=480&referer=%s'%(id,ref)
return
except:
return
#p3g
def finder23(html,url):
try:
id = re.findall("channel='(.+?)',\s*g='.+?';</script><script type='text/javascript' src='http://p3g.tv/resources/scripts/p3g.js'",html)[0]
url = 'http://www.p3g.tv/embedplayer/%s/2/600/420'%id
return url
except:
return
#dinozap (not implemented)
def finder24(html,url):
try:
url = re.findall('(http://(?:www\.)?dinozap.info/redirect/channel.php\?id=[^"\']+)',html)[0]
return url
except:
return
#liveflashplayer
def finder25(html,url):
try:
id = re.findall("channel='(.+?)', g='.+?';</script><script type='text/javascript' src='http://www.liveflashplayer.net/resources/scripts/liveFlashEmbed.js'>",html)[0]
url = 'http://www.liveflashplayer.net/membedplayer/%s/1/620/430'%id
return url
except:
return
#laola1
def finder26(html,url):
try:
url = re.findall('(http://www.laola1.tv[^"]+)', html)[0]
return url
except:
pass
#ehftv
def finder27(html,url):
try:
url = re.findall('src=(?:\'|\")(http:\/\/(?:www\.)?ehftv.com(?:/|//)player\.php[^\'\"]+)',html)[0]
return url
except:
return
#zoomtv
def finder28(html,url):
try:
ref=url
fid = re.findall('fid="(.+?)".+?</script><script type="text/javascript" src="http://zome.zoomtv.me/.+?.js',html)[0]
pid = re.findall('pid\s*=\s*(.+?);',html)[0]
url = 'http://www.zoomtv.me/embed.php?v=' + fid + '&vw=660&vh=450&referer=%s&pid=%s'%(ref,pid)
return url
except:
return
#streamlive
def finder29(html,url):
try:
ref = url
url = re.findall('src="(http://(?:www.)?streamlive.to/embed/[^"]+)"',html)[0]
url = url + '&referer=%s'%ref
return url
except:
return
#roja redirect links
def finder30(html,url):
try:
html = client.request(url, referer=urlparse.urlparse(url).netloc)
url = re.findall('href="(.+?)">click here...',html)[0]
resolved = find_link(url+'&referer=http://rojedirecta.me')
return resolved
except:
return
#iguide
def finder31(html,url):
try:
ref=url
url = re.findall('(http://(?:www.)?iguide.to/embed/[^"\']+)"',html)[0]
return url+'&referer='+ref
except:
return
#letgo
def finder32(html,url):
try:
id = re.findall('fid="(.+?)"; v_width=.+?; v_height=.+?;</script><script type="text/javascript" src="http://www.letgo.tv/js/embed.js"',html)[0]
url = 'http://www.letgo.tv/embed.php?channel=%s&vw=630&vh=450'%id
return url
except:
return
#streamup
def finder33(html,url):
ref = url
try:
id = re.findall("streamup.com/rooms/([^/\'\"?&\s]+)",html)[0]
url = 'http://streamup.com/%s'%id
return url
except:
try:
id = re.findall('streamup.com/([^/\'\"?&\s]+)/embed',html)[0]
url = 'http://streamup.com/%s'%(id)
return url
except:
return
#p2pcast
def finder34(html,url):
try:
ref = url
try:
id = re.findall('http://p2pcast.tv/(?:p2pembed|stream).php\?id=([^&]+)',html)[0]
except:
id = re.findall("id=(?:\'|\")(.+?)(?:\'|\");.+?src='http://js.p2pcast.+?.js'>",html)[0]
url = 'http://p2pcast.tv/stream.php?id=%s&referer=%s'%(id,ref)
return url
except:
return
def finder35(html,url):
try:
try:
id = re.findall('cast3d.tv/embed.php\?(?:u|channel)=([^&]+)&',html)[0]
except:
id = re.findall('fid\s*=\s*(?:\'|\")(.+?)(?:\'|\");.*\s*.+?src=(?:\'|\")http://www.cast3d.tv/js/.+?.js',html)[0]
url = 'http://www.cast3d.tv/embed.php?channel=%s&vw=600&vh=400'%id
return url
except:
return
#xvtr
def finder36(html,url):
try:
ref = url
id = re.findall("fid=\"(.+?)\".+?</script><script type='text/javascript' src='http://www.xvtr.pw/embed.js'></script>",html)[0]
url = 'http://www.xvtr.pw/channel/%s.htm?referer=%s'%(id,ref)
return url
except:
return
#acestream
def finder37(html,url):
try:
try:
ace = re.findall('this.load(?:Player|Torrent)\((?:\'|\")(.+?)(?:\'|\")',html)[0]
except:
ace = re.findall('"http://torrentstream.net/p/(.+?)"',html)[0]
url = 'plugin://program.plexus/?mode=1&url=%s&name=Video'%(ace)
return url
except:
return
#sopcast
def finder38(html,url):
try:
sop = re.findall("(sop://[^\"\']+)['\"]",html)[0]
url = 'plugin://program.plexus/?mode=2&url=%s&name=Video'%(sop)
return url
except:
return
#turbocast
def finder39(html,url):
try:
url = re.findall('(http://www.turbocast.tv[^\'\"]+)',html)[0]
return url
except:
try:
url = re.findall('(.+?turbocast.tv.+?)',url)[0]
return url
except:
return
#directstream
def finder40(html,url):
try:
ref=url
fid = re.findall('fid=(?:\'|\")(.+?)(?:\'|\").+?</script><script type="text/javascript" src="http://direct-stream.org/embedStream.js"',html)[0]
url = 'http://direct-stream.org/e.php?id=%s&vw=740&vh=490&referer=%s'%(fid,ref)
return url
except:
return
#pxstream
def finder42(html,url):
try:
ref=url
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\");.+?src='http://pxstream.tv/.+?.js",html)[0]
url = 'http://pxstream.tv/embedrouter.php?file=%s&width=730&height=430&jwplayer=flash&referer=%s'%(id,ref)
return url
except:
return
#publishpublish
def finder43(html,url):
try:
ref=url
id = re.findall('fid="(.+?)";.+?</script><script type="text/javascript" src="http://www.pushpublish.tv/js/embed.js"',html)[0]
loc = (urlparse.urlparse(url).netloc).replace('www.','')
url ='http://www.pushpublish.tv/player.php?channel=%s&vw=650&vh=400&domain=%s&referer=%s'%(id,loc,ref)
return url
except:
return
#ucaster
def finder44(html,url):
try:
ref=url
id = re.findall('channel=[\'"]([^\'"]+)[\'"].*?ucaster.(?:eu|com)', html)[0]
url = 'http://www.embeducaster.com/embedplayer/%s/1/595/500?referer=%s'%(id,ref)
return url
except:
return
#rocktv
def finder45(html,url):
try:
ref=url
id = re.findall("fid=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://www.rocktv.co/players?.js(?:\'|\")",html)[0]
url = 'http://rocktv.co/embed.php?live=%s&vw=620&vh=490&referer=%s'%(id,ref)
return url
except:
return
#ezcast
def finder46(html,url):
try:
ref=url
id = re.findall("channel=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://www.ezcast.tv/static/scripts/ezcast.js(?:\'|\")>",html)[0]
url = 'http://www.embedezcast.com/embedplayer/%s/1/790/420?referer=%s'%(id,ref)
return url
except:
return
#micast
def finder47(html,url):
try:
ref=url
try:
id = re.findall('micast.tv/.*?\.php\?ch=([^"\']+)',html)[0]
except:
try:
id = re.findall('turbocast.tv/.*?\.php\?ch=([^"]+)',html)[0]
except:
id = re.findall('(?:ca|ch)=(?:\'|\")(.+?)(?:\'|\").+?micast.tv/embed.js(?:\'|\")',html)[0]
url = 'http://micast.tv/iframe.php?ch=%s&referer=%s'%(id,ref)
return url
except:
return
#openlive
def finder48(html,url):
try:
ref=url
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://openlive.org/live.js(?:\'|\")>",html)[0]
url = 'http://openlive.org/embed.php?file=%s&width=640&height=380&referer=%s'%(id,ref)
return url
except:
return
#helper
def finder49(html,url):
try:
ch = re.findall('fid=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://www.webspor.pw/HD/TV/info/channel.js(?:\'|\")>',html)[0]
url = 'http://worldsport.me/%s'%ch
return find_link(url)
except:
return
#sostart
def finder50(html,url):
try:
ref=url
id = re.findall("id=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://sostart.org/.+?.js(?:\'|\")>",html)[0]
url = 'http://sostart.org/stream.php?id=%s&width=630&height=450&referer=%s'%(id,ref)
return url
except:
return
#hdmyt
def finder51(html,url):
try:
ref = url
url = re.findall('(http://(?:www.)?hdmyt.info/channel.php\?file=[^\"\']+)',html)[0]
url+='&referer=%s'%ref
return url
except:
return
#lsh
def finder52(html,url):
try:
ref=url
id = re.findall('fid=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://cdn.lshstream.com/embed.js(?:\'|\")>')
url = 'http://cdn.lshstream.com/embed.php?u=%s&referer=' + ref
return url
except:
return
#hqstream
def finder53(html,url):
try:
ref=url
id = re.findall('http://hqstream.tv/.+?\?streampage=([^&/ ]+)',html)[0]
url = 'http://hqstream.tv/player.php?streampage=%s&height=480&width=700&referer=%s'%(id,ref)
return url
except:
return
#jw rtmp
def finder54(html,url):
try:
rtmp = re.findall('jwplayer("player").setup({\s*file: "(rtmp://.+?)"',html)[0]
return rtmp
except:
return
#tutele
def finder55(html,url):
try:
ref = url
id = re.findall("channel=(?:\'|\")(.+?)(?:\'|\").+?src='http://tutelehd.com/embedPlayer.js'>",html)[0]
url = 'http://tutelehd.com/embed/embed.php?channel=%s&referer=%s'%(id,ref)
return url
except:
return
#janjua
def finder56(html,url):
try:
ref = url
id = re.findall("channel=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://www.janjua.tv/resources/scripts/janjua.js(?:\'|\")>",html)[0]
url = 'http://www.janjua.tv/embedplayer/%s/1/500/400?referer=%s'%(id,ref)
return url
except:
return
#abcast
def finder57(html,url):
try:
ref = url
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\").+?src=(?:\'|\")http://abcast.net/simple.js(?:\'|\")",html)[0]
url = 'http://abcast.net/embed.php?file=%s&referer=%s'%(id,ref)
return url
except:
return
#castfree
def finder58(html,url):
try:
ref = url
id = re.findall('castfree.me/channel.php\?a=(\d+)',html)[0]
url = 'http://www.castfree.me/embed.php?a=%s&id=&width=640&height=460&autostart=true&referer=%s'%(id,ref)
return url
except:
return
#dinozap
def finder59(html,url):
try:
ref = url
import requests
html = requests.get(url).text
chars = re.findall('&#(\d+)',html)
for c in chars:
html = html.replace('&#%s'%c, chr(int(c)))
html = html.replace(';','')
url = re.findall('(http://.+?.(?:pw|info|org|com|me)/(?:redirect/)?channel(?:fr|\w+)?.php\?file=[^"\']+)',html)[0]
return url + '&referer=' + ref
except:
return
#dinozap
def finder60(html,url):
try:
ref = url
id = re.findall('(livesports15.ucoz.com/new.html\?id=(\d+))',html)[0]
return url + 'http://www.sitenow.me/channel.php?file=%s&width=670&height=470&autostart=true&referer=s'%(id,ref)
except:
return
#streamcasttv
def finder61(html,url):
try:
id = re.findall("file=(?:\'|\")(.+?)(?:\'|\");.+?src=(?:\'|\")http://streamcasttv.biz/.+?.js",html)[0]
url ='http://streamcasttv.biz/embed.php?file=%s&referer=%s'%(id,url)
return url
except:
return
#dinozap
def finder62(html,url):
try:
ref = url
url = re.findall('(http://.+?.(?:pw)/(?:public/)?embed(?:fr|\w+)?.php\?id=[^"\']+)',html)[0]
url = url.replace(';&#','&#').replace('&#','\\x').replace(';&','&')
url= url.encode("string-escape")
url = urllib.unquote(url)
return url + '&referer=' + ref
except:
return
#rtmp
def finder63(html,url):
try:
swf = re.findall('src=(?:\'|\")(.+?.swf)',html)[0]
file, rtmp = re.findall('flashvars=(?:\'|\")file=(.+?)&.+?streamer=(.+?)&',html)[0]
url = rtmp + ' playpath=' + file +' swfUrl=' + swf + ' flashver=WIN\\2019,0,0,226 live=true timeout=15 swfVfy=true pageUrl=' + url
return url
except:
return
def finder64(html,url):
try:
url = re.findall('(http://vaughnlive.tv/embed/video/[^/\'"?&\s]+)',html)[0]
return url
except:
return
def finder65(html,url):
try:
referer = url
url = re.findall('src=(?:\'|\")(.+?)(?:\'|\").+?type="video/mp4"',html)[0]
if len(url)<10:
raise
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': referer})
return url
except:
return
#hdcast.org
def finder66(html,url):
try:
ref = url
id,id2 = re.findall('fid="(.+?)";.+?src="http://hdcast.org/(.+?).js">',html)[0]
url = 'http://www.hdcast.org/%s.php?u=%s&vw=854&vh=480&domain=%s&referer=%s'%(id2,id,urlparse.urlparse(ref).netloc,ref)
return url
except:
return
#serbiaplus
def finder67(html,url):
try:
if 'serbiaplus' not in url:
return
id = re.findall('fid="(.+?)";.+?src="/live.js"',html)[0]
url = 'http://serbiaplus.com/' + id
resolved = find_link(url)
return resolved
except:
pass
#streamking
def finder68(html,url):
try:
ref = url
url = re.findall('(http://streamking.cc/[^"\']+)(?:\'|\")',html)[0]
return url+'&referer=%s'%ref
except:
return
#beba
def finder69(html,url):
try:
url = re.findall('http://beba.ucoz.com/playerlive.html\?id=(.+?)$',url)[0]
return find_link(url)
except:
return
#stream-sports
def finder70(html,url):
try:
ref = url
url = re.findall('http://www.stream\-sports.eu/uploads/video.html\?id=(.+?)$',url)[0]
return url+'&referer=%s'%ref
except:
return
#ustream
def finder71(html,url):
try:
ref=url
url=re.findall('(https?://(?:www.)?ustream.tv/embed/.+?[^\'\"]+)',html)[0]
url+='&referer='+ref
return url
except:
return
#config finder
def finder72(html,ref):
try:
url = re.findall('src\s*:\s*\'(.+?(?:.m3u8)?)\'',html)[0]
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref})
return url
except:
pass
#config finder
def finder73(html,url):
try:
ref = url
url = re.findall('Player\(\{\n\s*source\:\s*\'(.+?)\'\,',html)[0]
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref})
return url
except:
return
#cast4u
def finder74(html,url):
try:
ref = url
id = re.findall('fid="(.+?)";.+?src="http://www.cast4u.tv/.+?.js">',html)[0]
url = 'http://www.cast4u.tv/embed.php?live=%s&vw=620&vh=490&referer=%s'%(id,ref)
return url
except:
return
#m3u8 config finder
def finder75(html,url):
try:
ref = url
url = re.findall('file: window.atob\(\'(.+?)\'\),', html)[0]
file = base64.b64decode(url)
file += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/19.0.0.245', 'Host':urlparse.urlparse(file).netloc, 'Connection':'keep-alive','Accept':'*/*'})
return file
except:
return
#direct stream 2nd finder
def finder76(html,url):
ref = url
try:
id = re.findall('fid=[\"\'](.+?)[\"\'];.+?data-rocketsrc="http://direct-stream.org/.+?.js',html)[0]
url ="http://direct-stream.org/e.php?id=%s&vw=700&vh=400&referer=%s"%(id,ref)
return url
except:
return
#zona priority
def finder77(html,url):
try:
html = urllib.unquote(html)
url = finder4(html,url)
if client.request(url) != None:
return url
return
except:
return
#weplayer
def finder78(html,url):
try:
id = re.findall("id=['\"](.+?)['\"];.+?src=['\"]http://weplayer.pw/.+?.js([^$]+)",html)[0]
url = 'http://weplayer.pw/stream.php?id=%s&width=640&height=480&stretching=&referer=%s'%(id[0],url)
if '-->' in id[1]:
return
return find_link(url)
except:
return
def finder79(html,url):
try:
ref = url
url = re.findall("playStream\('hls', '(.+?)'",html)[0]
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/19.0.0.245', 'Host':urlparse.urlparse(url).netloc, 'Connection':'keep-alive','Accept':'*/*'})
return url
except:
return
#tvope
def finder80(html,ref):
try:
id = re.findall('c="(.+?)";.+?</script>\s*<script.+?src="http://i.tvope.com/js/.+?.js',html)[0]
url = 'http://tvope.com/emb/player.php?c=%s&w=700&h=480&referer=%s&d=www.popofthestreams.xyz'%(id,ref)
return url
except:
return
#acestream
def finder90(html,ref):
try:
url = re.findall('(acestream://[^"\']+)["\']',html)[0]
return url
except:
return
#sopcast
def finder91(html,ref):
try:
url = re.findall('(sop://[^"\']+)["\']',html)[0]
return url
except:
return
#shadownet
def finder92(html,ref):
try:
url = re.findall('source\s*src=\s*"\s*(.+?)\s*"\s*type=\s*"\s*application/x-mpegURL\s*"\s*/>',html)[0]
if 'rtmp' in url:
url+=' swfUrl=http://www.shadow-net.biz/javascript/videojs/flashls/video-js.swf flashver=%s live=true timeout=18 swfVfy=1 pageUrl=http://www.shadow-net.biz/'%FLASH
elif 'm3u8' in url:
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': ref, 'X-Requested-With':'ShockwaveFlash/20.0.0.286', 'Host':urlparse.urlparse(url).netloc, 'Connection':'keep-alive','Accept':'*/*', 'Origin':'http://shadow.go.ro'})
return url
except:
return
#filmon
def finder93(html,ref):
try:
id = re.findall('filmon.(?:com|tv)/tv/channel/export\?channel_id=(\d+)',html)[0]
url = 'http://www.filmon.com/channel/' + id
return url
except:
return
#castto
def finder94(html,ref):
try:
id = re.findall('fid=["\'](.+?)["\'];.+?src=["\'](http://static.castto.me/js/.+?.js)', html)[0]
url = id[1]+'?id=%s&referer=%s'%(id[0],ref)
return url
except:
return
#redirect
def finder95(html,url):
try:
url = re.findall('<meta http-equiv="refresh".+?; url=(.+?)"',html)[0]
return find_link(url)
except:
return
#acelive
def finder96(html,url):
try:
url = re.findall('[\"\'](.+?.acelive.+?)[\"\']',html)[0]
return url
except:
return
#castasap
def finder97(html,url):
try:
ref = url
import requests
html = requests.get(url).text
chars = re.findall('&#(\d+)',html)
for c in chars:
html = html.replace('&#%s'%c, chr(int(c)))
html = html.replace(';','')
try:
url = re.findall('src="(http://www.(?:castasap|castflash|flashlive|fastflash).pw/.+?embed.php\?id=.+?)"',html)[0]
except:
url = re.findall('src="(http://www.(?:castasap|castflash|flashlive|fastflash).pw/embed.+?)"',html)[0]
url = add_args(url,{'referer':ref})
return url
except:
return
#deltatv
def finder98(html,ref):
try:
x = re.findall('id=[\'\"](.+?)[\'\"].+?src=[\'\"]http://deltatv.xyz/.+?.js',html)[0]
url = 'http://deltatv.xyz/stream.php?id=%s&width=640&height=480&referer=%s'%(x,ref)
return url
except:
return
#hdcast.info
def finder99(html,ref):
try:
id,rr = re.findall('fid=[\'\"](.+?)[\'\"].+?src=[\'\"]http://(?:www.)?hdcast.info/(embed.+?).js',html)[0]
url = 'http://www.hdcast.info/%s.php?live=%s&vw=620&vh=490&referer=%s'%(rr,id,ref)
return url
except:
return
#blowfish decrypt
def finder100(html,ref):
#try:
if 'Blowfish' not in html:
return
if 'wib' in ref:
log('yes')
key = re.findall('new Blowfish\([\"\'](.+?)[\"\']\)',html)[0]
if len(key)>56:
key=key[:56]
crypted = re.findall('.decrypt\([\"\'](.+?)[\"\']\)',html)[0]
from modules import blowfish
cipher = blowfish.Blowfish(key)
decrypted = cipher.decrypt(crypted)
log(decrypted)
return find_link(ref,html=decrypted)
def finder101(html,ref):
try:
id = re.findall('id=[\"\'](.+?)[\"\'];.+?src=[\"\']http://theactionlive.com.+?.js',html)[0]
url = 'http://theactionlive.com?id=%s&referer=%s'%(id,ref)
return url
except:
return
| azumimuo/family-xbmc-addon | script.module.liveresolver/lib/liveresolver/__init__.py | Python | gpl-2.0 | 33,380 | [
"VisIt"
] | 8d5cc0738637c6ca9b5a81990ca6657b1572408b801301d5dc64be7b51888ab8 |
# -*- coding: utf-8 -*-
""" GIS Module
@requires: U{B{I{gluon}} <http://web2py.com>}
@requires: U{B{I{shapely}} <http://trac.gispython.org/lab/wiki/Shapely>}
@copyright: (c) 2010-2013 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["GIS",
"S3Map",
"S3ExportPOI",
"S3ImportPOI",
]
import os
import re
import sys
#import logging
import urllib # Needed for urlencoding
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from datetime import timedelta # Needed for Feed Refresh checks
try:
from lxml import etree # Needed to follow NetworkLinks
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
KML_NAMESPACE = "http://earth.google.com/kml/2.2"
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.html import *
#from gluon.http import HTTP, redirect
from gluon.dal import Rows
from gluon.storage import Storage, Messages
from s3fields import s3_all_meta_field_names
from s3rest import S3Method
from s3track import S3Trackable
from s3track import S3Trackable
from s3utils import s3_debug, s3_fullname, s3_fullname_bulk, s3_has_foreign_key, s3_include_ext
DEBUG = False
if DEBUG:
import datetime
print >> sys.stderr, "S3GIS: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# Map WKT types to db types
GEOM_TYPES = {"point": 1,
"linestring": 2,
"polygon": 3,
"multipoint": 4,
"multilinestring": 5,
"multipolygon": 6,
"geometrycollection": 7,
}
# km
RADIUS_EARTH = 6371.01
# Compact JSON encoding
SEPARATORS = (",", ":")
# Map Defaults
# Also in static/S3/s3.gis.js
# http://dev.openlayers.org/docs/files/OpenLayers/Strategy/Cluster-js.html
CLUSTER_ATTRIBUTE = "colour"
CLUSTER_DISTANCE = 20 # pixels
CLUSTER_THRESHOLD = 2 # minimum # of features to form a cluster
# Garmin GPS Symbols
GPS_SYMBOLS = ["Airport",
"Amusement Park"
"Ball Park",
"Bank",
"Bar",
"Beach",
"Bell",
"Boat Ramp",
"Bowling",
"Bridge",
"Building",
"Campground",
"Car",
"Car Rental",
"Car Repair",
"Cemetery",
"Church",
"Circle with X",
"City (Capitol)",
"City (Large)",
"City (Medium)",
"City (Small)",
"Civil",
"Contact, Dreadlocks",
"Controlled Area",
"Convenience Store",
"Crossing",
"Dam",
"Danger Area",
"Department Store",
"Diver Down Flag 1",
"Diver Down Flag 2",
"Drinking Water",
"Exit",
"Fast Food",
"Fishing Area",
"Fitness Center",
"Flag",
"Forest",
"Gas Station",
"Geocache",
"Geocache Found",
"Ghost Town",
"Glider Area",
"Golf Course",
"Green Diamond",
"Green Square",
"Heliport",
"Horn",
"Hunting Area",
"Information",
"Levee",
"Light",
"Live Theater",
"Lodging",
"Man Overboard",
"Marina",
"Medical Facility",
"Mile Marker",
"Military",
"Mine",
"Movie Theater",
"Museum",
"Navaid, Amber",
"Navaid, Black",
"Navaid, Blue",
"Navaid, Green",
"Navaid, Green/Red",
"Navaid, Green/White",
"Navaid, Orange",
"Navaid, Red",
"Navaid, Red/Green",
"Navaid, Red/White",
"Navaid, Violet",
"Navaid, White",
"Navaid, White/Green",
"Navaid, White/Red",
"Oil Field",
"Parachute Area",
"Park",
"Parking Area",
"Pharmacy",
"Picnic Area",
"Pizza",
"Post Office",
"Private Field",
"Radio Beacon",
"Red Diamond",
"Red Square",
"Residence",
"Restaurant",
"Restricted Area",
"Restroom",
"RV Park",
"Scales",
"Scenic Area",
"School",
"Seaplane Base",
"Shipwreck",
"Shopping Center",
"Short Tower",
"Shower",
"Skiing Area",
"Skull and Crossbones",
"Soft Field",
"Stadium",
"Summit",
"Swimming Area",
"Tall Tower",
"Telephone",
"Toll Booth",
"TracBack Point",
"Trail Head",
"Truck Stop",
"Tunnel",
"Ultralight Area",
"Water Hydrant",
"Waypoint",
"White Buoy",
"White Dot",
"Zoo"
]
# -----------------------------------------------------------------------------
class GIS(object):
"""
GeoSpatial functions
"""
def __init__(self):
messages = current.messages
#messages.centroid_error = str(A("Shapely", _href="http://pypi.python.org/pypi/Shapely/", _target="_blank")) + " library not found, so can't find centroid!"
messages.centroid_error = "Shapely library not functional, so can't find centroid! Install Geos & Shapely for Line/Polygon support"
messages.unknown_type = "Unknown Type!"
messages.invalid_wkt_point = "Invalid WKT: must be like POINT(3 4)"
messages.invalid_wkt = "Invalid WKT: see http://en.wikipedia.org/wiki/Well-known_text"
messages.lon_empty = "Invalid: Longitude can't be empty if Latitude specified!"
messages.lat_empty = "Invalid: Latitude can't be empty if Longitude specified!"
messages.unknown_parent = "Invalid: %(parent_id)s is not a known Location"
self.DEFAULT_SYMBOL = "White Dot"
self.hierarchy_level_keys = ["L0", "L1", "L2", "L3", "L4", "L5"]
self.hierarchy_levels = {}
self.max_allowed_level_num = 4
# -------------------------------------------------------------------------
@staticmethod
def gps_symbols():
return GPS_SYMBOLS
# -------------------------------------------------------------------------
def download_kml(self, record_id, filename, session_id_name, session_id):
"""
Download a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Save the file to the /uploads folder
Designed to be called asynchronously using:
current.s3task.async("download_kml", [record_id, filename])
@param record_id: id of the record in db.gis_layer_kml
@param filename: name to save the file as
@param session_id_name: name of the session
@param session_id: id of the session
@ToDo: Pass error messages to Result & have JavaScript listen for these
"""
request = current.request
table = current.s3db.gis_layer_kml
record = current.db(table.id == record_id).select(table.url,
limitby=(0, 1)
).first()
url = record.url
filepath = os.path.join(request.global_settings.applications_parent,
request.folder,
"uploads",
"gis_cache",
filename)
warning = self.fetch_kml(url, filepath, session_id_name, session_id)
# @ToDo: Handle errors
#query = (cachetable.name == name)
if "URLError" in warning or "HTTPError" in warning:
# URL inaccessible
if os.access(filepath, os.R_OK):
statinfo = os.stat(filepath)
if statinfo.st_size:
# Use cached version
#date = db(query).select(cachetable.modified_on,
# limitby=(0, 1)).first().modified_on
#response.warning += "%s %s %s\n" % (url,
# T("not accessible - using cached version from"),
# str(date))
#url = URL(c="default", f="download",
# args=[filename])
pass
else:
# 0k file is all that is available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# No cached version available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# Download was succesful
#db(query).update(modified_on=request.utcnow)
if "ParseError" in warning:
# @ToDo Parse detail
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("couldn't be parsed so NetworkLinks not followed."))
pass
if "GroundOverlay" in warning or "ScreenOverlay" in warning:
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly."))
# Code to support GroundOverlay:
# https://github.com/openlayers/openlayers/pull/759
pass
# -------------------------------------------------------------------------
def fetch_kml(self, url, filepath, session_id_name, session_id):
"""
Fetch a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Returns a file object
Designed as a helper function for download_kml()
"""
from gluon.tools import fetch
response = current.response
public_url = current.deployment_settings.get_base_public_url()
warning = ""
local = False
if not url.startswith("http"):
local = True
url = "%s%s" % (public_url, url)
elif len(url) > len(public_url) and url[:len(public_url)] == public_url:
local = True
if local:
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[session_id_name] = session_id
# For sync connections
current.session._unlock(response)
try:
file = fetch(url, cookie=cookie)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
else:
try:
file = fetch(url)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
filenames = []
if file[:2] == "PK":
# Unzip
fp = StringIO(file)
import zipfile
myfile = zipfile.ZipFile(fp)
files = myfile.infolist()
main = None
candidates = []
for _file in files:
filename = _file.filename
if filename == "doc.kml":
main = filename
elif filename[-4:] == ".kml":
candidates.append(filename)
if not main:
if candidates:
# Any better way than this to guess which KML file is the main one?
main = candidates[0]
else:
response.error = "KMZ contains no KML Files!"
return ""
# Write files to cache (other than the main one)
request = current.request
path = os.path.join(request.folder, "static", "cache", "kml")
if not os.path.exists(path):
os.makedirs(path)
for _file in files:
filename = _file.filename
if filename != main:
if "/" in filename:
_filename = filename.split("/")
dir = os.path.join(path, _filename[0])
if not os.path.exists(dir):
os.mkdir(dir)
_filepath = os.path.join(path, *_filename)
else:
_filepath = os.path.join(path, filename)
try:
f = open(_filepath, "wb")
except:
# Trying to write the Folder
pass
else:
filenames.append(filename)
__file = myfile.read(filename)
f.write(__file)
f.close()
# Now read the main one (to parse)
file = myfile.read(main)
myfile.close()
# Check for NetworkLink
if "<NetworkLink>" in file:
try:
# Remove extraneous whitespace
parser = etree.XMLParser(recover=True, remove_blank_text=True)
tree = etree.XML(file, parser)
# Find contents of href tag (must be a better way?)
url = ""
for element in tree.iter():
if element.tag == "{%s}href" % KML_NAMESPACE:
url = element.text
if url:
# Follow NetworkLink (synchronously)
warning2 = self.fetch_kml(url, filepath)
warning += warning2
except (etree.XMLSyntaxError,):
e = sys.exc_info()[1]
warning += "<ParseError>%s %s</ParseError>" % (e.line, e.errormsg)
# Check for Overlays
if "<GroundOverlay>" in file:
warning += "GroundOverlay"
if "<ScreenOverlay>" in file:
warning += "ScreenOverlay"
for filename in filenames:
replace = "%s/%s" % (URL(c="static", f="cache", args=["kml"]),
filename)
# Rewrite all references to point to the correct place
# need to catch <Icon><href> (which could be done via lxml)
# & also <description><![CDATA[<img src=" (which can't)
file = file.replace(filename, replace)
# Write main file to cache
f = open(filepath, "w")
f.write(file)
f.close()
return warning
# -------------------------------------------------------------------------
@staticmethod
def geocode(address, postcode=None, Lx_ids=None, geocoder="google"):
"""
Geocode an Address
- used by S3LocationSelectorWidget2
settings.get_gis_geocode_imported_addresses
@param address: street address
@param postcode: postcode
@param Lx_ids: list of ancestor IDs
@param geocoder: which geocoder service to use
"""
from geopy import geocoders
if geocoder == "google":
g = geocoders.GoogleV3()
elif geocoder == "yahoo":
apikey = current.deployment_settings.get_gis_api_yahoo()
g = geocoders.Yahoo(apikey)
else:
# @ToDo
raise NotImplementedError
location = address
if postcode:
location = "%s,%s" % (location, postcode)
L5 = L4 = L3 = L2 = L1 = L0 = None
if Lx_ids:
# Convert Lx IDs to Names
table = current.s3db.gis_location
limit = len(Lx_ids)
if limit > 1:
query = (table.id.belongs(Lx_ids))
else:
query = (table.id == Lx_ids[0])
db = current.db
Lx = db(query).select(table.id,
table.name,
table.level,
table.gis_feature_type,
# Better as separate query
#table.lon_min,
#table.lat_min,
#table.lon_max,
#table.lat_max,
# Better as separate query
#table.wkt,
limitby=(0, limit),
orderby=~table.level
)
if Lx:
Lx_names = ",".join([l.name for l in Lx])
location = "%s,%s" % (location, Lx_names)
for l in Lx:
if l.level == "L0":
L0 = l.id
continue
elif l.level == "L1":
L1 = l.id
continue
elif l.level == "L2":
L2 = l.id
continue
elif l.level == "L3":
L3 = l.id
continue
elif l.level == "L4":
L4 = l.id
continue
elif l.level == "L5":
L5 = l.id
Lx = Lx.as_dict()
try:
results = g.geocode(location, exactly_one=False)
if len(results) == 1:
place, (lat, lon) = results[0]
if Lx:
# Check Results are for a specific address & not just that for the City
results = g.geocode(Lx_names, exactly_one=False)
if len(results) == 1:
place2, (lat2, lon2) = results[0]
if place == place2:
results = "We can only geocode to the Lx"
else:
if Lx:
# Check Results are within relevant bounds
L0_row = None
wkt = None
if L5 and Lx[L5]["gis_feature_type"] != 1:
wkt = db(table.id == L5).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = "L5"
elif L4 and Lx[L4]["gis_feature_type"] != 1:
wkt = db(table.id == L4).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = "L4"
elif L3 and Lx[L3]["gis_feature_type"] != 1:
wkt = db(table.id == L3).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = "L3"
elif L2 and Lx[L2]["gis_feature_type"] != 1:
wkt = db(table.id == L2).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = "L2"
elif L1 and Lx[L1]["gis_feature_type"] != 1:
wkt = db(table.id == L1).select(table.wkt,
limitby=(0, 1)
).first().wkt
used_Lx = "L1"
elif L0:
L0_row = db(table.id == L0).select(table.wkt,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
limitby=(0, 1)
).first()
wkt = L0_row.wkt
used_Lx = "L0"
if wkt:
from shapely.geometry import point
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
test = point.Point(lon, lat)
shape = wkt_loads(wkt)
ok = test.intersects(shape)
if not ok:
results = "Returned value not within %s" % Lx[used_Lx].name
elif L0:
# Check within country at least
if not L0_row:
L0_row = db(table.id == L0).select(table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
limitby=(0, 1)
).first()
if lat < L0_row["lat_max"] and \
lat > L0_row["lat_min"] and \
lon < L0_row["lon_max"] and \
lon > L0_row["lon_min"]:
ok = True
else:
ok = False
results = "Returned value not within %s" % check.name
else:
# We'll just have to trust it!
ok = True
if ok:
results = dict(lat=lat, lon=lon)
elif not results:
results = "Can't check that these results are specific enough"
else:
results = "Can't check that these results are specific enough"
else:
# We'll just have to trust it!
results = dict(lat=lat, lon=lon)
elif len(results):
results = "Multiple results found"
# @ToDo: Iterate through the results to see if just 1 is within the right bounds
else:
results = "No results found"
except:
import sys
error = sys.exc_info()[1]
results = str(error)
return results
# -------------------------------------------------------------------------
@staticmethod
def get_bearing(lat_start, lon_start, lat_end, lon_end):
"""
Given a Start & End set of Coordinates, return a Bearing
Formula from: http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
delta_lon = lon_start - lon_end
bearing = math.atan2(sin(delta_lon) * cos(lat_end),
(cos(lat_start) * sin(lat_end)) - \
(sin(lat_start) * cos(lat_end) * cos(delta_lon))
)
# Convert to a compass bearing
bearing = (bearing + 360) % 360
return bearing
# -------------------------------------------------------------------------
def get_bounds(self, features=None, parent=None):
"""
Calculate the Bounds of a list of Point Features
e.g. When a map is displayed that focuses on a collection of points,
the map is zoomed to show just the region bounding the points.
e.g. To use in GPX export for correct zooming
`
Ensure a minimum size of bounding box, and that the points
are inset from the border.
@param features: A list of point features
@param parent: A location_id to provide a polygonal bounds suitable
for validating child locations
@ToDo: Support Polygons (separate function?)
"""
if parent:
table = current.s3db.gis_location
db = current.db
parent = db(table.id == parent).select(table.id,
table.level,
table.name,
table.parent,
table.path,
table.lon,
table.lat,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
if parent.lon_min is None or \
parent.lon_max is None or \
parent.lat_min is None or \
parent.lat_max is None or \
parent.lon == parent.lon_min or \
parent.lon == parent.lon_max or \
parent.lat == parent.lat_min or \
parent.lat == parent.lat_max:
# This is unsuitable - try higher parent
if parent.level == "L1":
if parent.parent:
# We can trust that L0 should have the data from prepop
L0 = db(table.id == parent.parent).select(table.name,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
return L0.lat_min, L0.lon_min, L0.lat_max, L0.lon_max, L0.name
if parent.path:
path = parent.path
else:
path = GIS.update_location_tree(dict(id=parent.id))
path_list = map(int, path.split("/"))
rows = db(table.id.belongs(path_list)).select(table.level,
table.name,
table.lat,
table.lon,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
orderby=table.level)
row_list = rows.as_list()
row_list.reverse()
ok = False
for row in row_list:
if row["lon_min"] is not None and row["lon_max"] is not None and \
row["lat_min"] is not None and row["lat_max"] is not None and \
row["lon"] != row["lon_min"] != row["lon_max"] and \
row["lat"] != row["lat_min"] != row["lat_max"]:
ok = True
break
if ok:
# This level is suitable
return row["lat_min"], row["lon_min"], row["lat_max"], row["lon_max"], row["name"]
else:
# This level is suitable
return parent.lat_min, parent.lon_min, parent.lat_max, parent.lon_max, parent.name
return -90, -180, 90, 180, None
# Minimum Bounding Box
# - gives a minimum width and height in degrees for the region shown.
# Without this, a map showing a single point would not show any extent around that point.
bbox_min_size = 0.05
# Bounding Box Insets
# - adds a small amount of distance outside the points.
# Without this, the outermost points would be on the bounding box, and might not be visible.
bbox_inset = 0.007
if features:
lon_min = 180
lat_min = 90
lon_max = -180
lat_max = -90
# Is this a simple feature set or the result of a join?
try:
lon = features[0].lon
simple = True
except (AttributeError, KeyError):
simple = False
# @ToDo: Optimised Geospatial routines rather than this crude hack
for feature in features:
try:
if simple:
lon = feature.lon
lat = feature.lat
else:
# A Join
lon = feature.gis_location.lon
lat = feature.gis_location.lat
except AttributeError:
# Skip any rows without the necessary lat/lon fields
continue
# Also skip those set to None. Note must use explicit test,
# as zero is a legal value.
if lon is None or lat is None:
continue
lon_min = min(lon, lon_min)
lat_min = min(lat, lat_min)
lon_max = max(lon, lon_max)
lat_max = max(lat, lat_max)
# Assure a reasonable-sized box.
delta_lon = (bbox_min_size - (lon_max - lon_min)) / 2.0
if delta_lon > 0:
lon_min -= delta_lon
lon_max += delta_lon
delta_lat = (bbox_min_size - (lat_max - lat_min)) / 2.0
if delta_lat > 0:
lat_min -= delta_lat
lat_max += delta_lat
# Move bounds outward by specified inset.
lon_min -= bbox_inset
lon_max += bbox_inset
lat_min -= bbox_inset
lat_max += bbox_inset
else:
# no features
config = GIS.get_config()
if config.lat_min is not None:
lat_min = config.lat_min
else:
lat_min = -90
if config.lon_min is not None:
lon_min = config.lon_min
else:
lon_min = -180
if config.lat_max is not None:
lat_max = config.lat_max
else:
lat_max = 90
if config.lon_max is not None:
lon_max = config.lon_max
else:
lon_max = 180
return dict(lon_min=lon_min, lat_min=lat_min,
lon_max=lon_max, lat_max=lat_max)
# -------------------------------------------------------------------------
@staticmethod
def _lookup_parent_path(feature_id):
"""
Helper that gets parent and path for a location.
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.name,
table.level,
table.path,
table.parent,
limitby=(0, 1)).first()
return feature
# -------------------------------------------------------------------------
@staticmethod
def get_children(id, level=None):
"""
Return a list of IDs of all GIS Features which are children of
the requested feature, using Materialized path for retrieving
the children
This has been chosen over Modified Preorder Tree Traversal for
greater efficiency:
http://eden.sahanafoundation.org/wiki/HaitiGISToDo#HierarchicalTrees
@param: level - optionally filter by level
@return: Rows object containing IDs & Names
Note: This does NOT include the parent location itself
"""
db = current.db
try:
table = db.gis_location
except:
# Being run from CLI for debugging
table = current.s3db.gis_location
query = (table.deleted == False)
if level:
query &= (table.level == level)
term = str(id)
path = table.path
query &= ((path.like(term + "/%")) | \
(path.like("%/" + term + "/%")))
children = db(query).select(table.id,
table.name)
return children
# -------------------------------------------------------------------------
@staticmethod
def get_parents(feature_id, feature=None, ids_only=False):
"""
Returns a list containing ancestors of the requested feature.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If ids_only is false, each element in the list is a gluon.sql.Row
containing the gis_location record of an ancestor of the specified
location.
If ids_only is true, just returns a list of ids of the parents.
This avoids a db lookup for the parents if the specified feature
has a path.
List elements are in the opposite order as the location path and
exclude the specified location itself, i.e. element 0 is the parent
and the last element is the most distant ancestor.
Assists lazy update of a database without location paths by calling
update_location_tree to get the path.
"""
if not feature or "path" not in feature or "parent" not in feature:
feature = GIS._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = GIS.update_location_tree(feature)
path_list = map(int, path.split("/"))
if len(path_list) == 1:
# No parents - path contains only this feature.
return None
# Get path in the desired order, without current feature.
reverse_path = path_list[:-1]
reverse_path.reverse()
# If only ids are wanted, stop here.
if ids_only:
return reverse_path
# Retrieve parents - order in which they're returned is arbitrary.
s3db = current.s3db
table = s3db.gis_location
query = (table.id.belongs(reverse_path))
fields = [table.id, table.name, table.level, table.lat, table.lon]
unordered_parents = current.db(query).select(cache=s3db.cache,
*fields)
# Reorder parents in order of reversed path.
unordered_ids = [row.id for row in unordered_parents]
parents = [unordered_parents[unordered_ids.index(path_id)]
for path_id in reverse_path if path_id in unordered_ids]
return parents
else:
return None
# -------------------------------------------------------------------------
def get_parent_per_level(self, results, feature_id,
feature=None,
ids=True,
names=True):
"""
Adds ancestor of requested feature for each level to supplied dict.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If a dict is not supplied in results, one is created. The results
dict is returned in either case.
If ids=True and names=False (used by old S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.id
If ids=False and names=True (used by address_onvalidation):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.name
If ids=True and names=True (used by new S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : {name : ancestor.name, id: ancestor.id}
"""
if not results:
results = {}
id = feature_id
# if we don't have a feature or a feature id return the dict as-is
if not feature_id and not feature:
return results
if not feature_id and "path" not in feature and "parent" in feature:
# gis_location_onvalidation on a Create => no ID yet
# Read the Parent's path instead
feature = self._lookup_parent_path(feature.parent)
id = feature.id
elif not feature or "path" not in feature or "parent" not in feature:
feature = self._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = self.update_location_tree(feature)
# Get ids of ancestors at each level.
if feature.parent:
strict = self.get_strict_hierarchy(feature.parent)
else:
strict = self.get_strict_hierarchy(id)
if path and strict and not names:
# No need to do a db lookup for parents in this case -- we
# know the levels of the parents from their position in path.
# Note ids returned from db are ints, not strings, so be
# consistent with that.
path_ids = map(int, path.split("/"))
# This skips the last path element, which is the supplied
# location.
for (i, id) in enumerate(path_ids[:-1]):
results["L%i" % i] = id
elif path:
ancestors = self.get_parents(id, feature=feature)
if ancestors:
for ancestor in ancestors:
if ancestor.level and ancestor.level in self.hierarchy_level_keys:
if names and ids:
results[ancestor.level] = Storage()
results[ancestor.level].name = ancestor.name
results[ancestor.level].id = ancestor.id
elif names:
results[ancestor.level] = ancestor.name
else:
results[ancestor.level] = ancestor.id
if not feature_id:
# Add the Parent in (we only need the version required for gis_location onvalidation here)
results[feature.level] = feature.name
if names:
# We need to have entries for all levels
# (both for address onvalidation & new LocationSelector)
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if not results.has_key(key):
results[key] = None
return results
# -------------------------------------------------------------------------
def update_table_hierarchy_labels(self, tablename=None):
"""
Re-set table options that depend on location_hierarchy
Only update tables which are already defined
"""
levels = ["L1", "L2", "L3", "L4"]
labels = self.get_location_hierarchy()
db = current.db
if tablename and tablename in db:
# Update the specific table which has just been defined
table = db[tablename]
if tablename == "gis_location":
labels["L0"] = current.messages["COUNTRY"]
table.level.requires = \
IS_NULL_OR(IS_IN_SET(labels))
else:
for level in levels:
table[level].label = labels[level]
else:
# Do all Tables which are already defined
# gis_location
if "gis_location" in db:
table = db.gis_location
table.level.requires = \
IS_NULL_OR(IS_IN_SET(labels))
# These tables store location hierarchy info for XSLT export.
# Labels are used for PDF & XLS Reports
tables = ["org_office",
#"pr_person",
"pr_address",
"cr_shelter",
"asset_asset",
#"hms_hospital",
]
for tablename in tables:
if tablename in db:
table = db[tablename]
for level in levels:
table[level].label = labels[level]
# -------------------------------------------------------------------------
@staticmethod
def set_config(config_id=None, force_update_cache=False):
"""
Reads the specified GIS config from the DB, caches it in response.
Passing in a false or non-existent id will cause the personal config,
if any, to be used, else the site config (uuid SITE_DEFAULT), else
their fallback values defined in this class.
If force_update_cache is true, the config will be read and cached in
response even if the specified config is the same as what's already
cached. Used when the config was just written.
The config itself will be available in response.s3.gis.config.
Scalar fields from the gis_config record and its linked
gis_projection record have the same names as the fields in their
tables and can be accessed as response.s3.gis.<fieldname>.
Returns the id of the config it actually used, if any.
@param: config_id. use '0' to set the SITE_DEFAULT
@ToDo: Merge configs for Event
"""
_gis = current.response.s3.gis
# If an id has been supplied, try it first. If it matches what's in
# response, there's no work to do.
if config_id and not force_update_cache and \
_gis.config and \
_gis.config.id == config_id:
return
db = current.db
s3db = current.s3db
ctable = s3db.gis_config
mtable = s3db.gis_marker
ptable = s3db.gis_projection
stable = s3db.gis_symbology
ltable = s3db.gis_layer_config
fields = [ctable.id,
ctable.default_location_id,
ctable.geocoder,
ctable.lat_min,
ctable.lat_max,
ctable.lon_min,
ctable.lon_max,
ctable.zoom,
ctable.lat,
ctable.lon,
ctable.pe_id,
ctable.symbology_id,
ctable.wmsbrowser_url,
ctable.wmsbrowser_name,
ctable.zoom_levels,
mtable.image,
mtable.height,
mtable.width,
ptable.epsg,
ptable.proj4js,
ptable.maxExtent,
ptable.units,
]
cache = Storage()
row = None
rows = None
if config_id:
# Merge this one with the Site Default
query = (ctable.id == config_id) | \
(ctable.uuid == "SITE_DEFAULT")
# May well not be complete, so Left Join
left = [ptable.on(ptable.id == ctable.projection_id),
stable.on(stable.id == ctable.symbology_id),
mtable.on(mtable.id == stable.marker_id),
]
rows = db(query).select(*fields,
left=left,
orderby=ctable.pe_type,
limitby=(0, 2))
if len(rows) == 1:
# The requested config must be invalid, so just use site default
row = rows.first()
elif config_id is 0:
# Use site default
query = (ctable.uuid == "SITE_DEFAULT") & \
(mtable.id == stable.marker_id) & \
(stable.id == ctable.symbology_id) & \
(ptable.id == ctable.projection_id)
row = db(query).select(*fields,
limitby=(0, 1)).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
# If no id supplied, extend the site config with any personal or OU configs
if not rows and not row:
# Read personalised config, if available.
auth = current.auth
if auth.is_logged_in():
pe_id = auth.user.pe_id
# OU configs
# List of roles to check (in order)
roles = ["Staff", "Volunteer"]
role_paths = s3db.pr_get_role_paths(pe_id, roles=roles)
# Unordered list of PEs
pes = []
for role in roles:
if role in role_paths:
# @ToDo: Allow selection of which OU a person's config should inherit from for disambiguation
# - store in s3db.gis_config?
# - needs a method in gis_config_form_setup() to populate the dropdown from the OUs (in this person's Path for this person's, would have to be a dynamic lookup for Admins)
pes = role_paths[role].nodes()
# Staff don't check Volunteer's OUs
break
query = (ctable.uuid == "SITE_DEFAULT") | \
((ctable.pe_id == pe_id) & \
(ctable.pe_default != False))
len_pes = len(pes)
if len_pes == 1:
query |= (ctable.pe_id == pes[0])
elif len_pes:
query |= (ctable.pe_id.belongs(pes))
# Personal may well not be complete, so Left Join
left = [ptable.on(ptable.id == ctable.projection_id),
stable.on(stable.id == ctable.symbology_id),
mtable.on(mtable.id == stable.marker_id),
]
# Order by pe_type (defined in gis_config)
# @ToDo: Do this purely from the hierarchy
rows = db(query).select(*fields,
left=left,
orderby=ctable.pe_type)
if len(rows) == 1:
row = rows.first()
if rows and not row:
# Merge Configs
cache["ids"] = []
for row in rows:
config = row["gis_config"]
if not config_id:
config_id = config.id
cache["ids"].append(config.id)
for key in config:
if key in ["delete_record", "gis_layer_config", "gis_menu", "update_record"]:
continue
if key not in cache or cache[key] is None:
cache[key] = config[key]
if "epsg" not in cache or cache["epsg"] is None:
projection = row["gis_projection"]
for key in ["epsg", "units", "maxExtent", "proj4js"]:
cache[key] = projection[key] if key in projection \
else None
if "marker_image" not in cache or \
cache["marker_image"] is None:
marker = row["gis_marker"]
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker \
else None
# Add NULL values for any that aren't defined, to avoid KeyErrors
for key in ["epsg", "units", "proj4js", "maxExtent",
"marker_image", "marker_height", "marker_width",
]:
if key not in cache:
cache[key] = None
if not row:
# No personal config or not logged in. Use site default.
query = (ctable.uuid == "SITE_DEFAULT") & \
(mtable.id == stable.marker_id) & \
(stable.id == ctable.symbology_id) & \
(ptable.id == ctable.projection_id)
row = db(query).select(*fields,
limitby=(0, 1)).first()
if not row:
# No configs found at all
_gis.config = cache
return cache
if not cache:
# We had a single row
config = row["gis_config"]
config_id = config.id
cache["ids"] = [config_id]
projection = row["gis_projection"]
marker = row["gis_marker"]
for key in config:
cache[key] = config[key]
for key in ["epsg", "maxExtent", "proj4js", "units"]:
cache[key] = projection[key] if key in projection else None
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker \
else None
# Store the values
_gis.config = cache
return cache
# -------------------------------------------------------------------------
@staticmethod
def get_config():
"""
Returns the current GIS config structure.
@ToDo: Config() class
"""
gis = current.response.s3.gis
if not gis.config:
# Ask set_config to put the appropriate config in response.
if current.session.s3.gis_config_id:
GIS.set_config(current.session.s3.gis_config_id)
else:
GIS.set_config()
return gis.config
# -------------------------------------------------------------------------
def get_location_hierarchy(self, level=None, location=None):
"""
Returns the location hierarchy and it's labels
@param: level - a specific level for which to lookup the label
@param: location - the location_id to lookup the location for
currently only the actual location is supported
@ToDo: Do a search of parents to allow this
lookup for any location
"""
_levels = self.hierarchy_levels
_location = location
if not location and _levels:
# Use cached value
if level:
if level in _levels:
return _levels[level]
else:
return level
else:
return _levels
T = current.T
COUNTRY = current.messages["COUNTRY"]
if level == "L0":
return COUNTRY
db = current.db
s3db = current.s3db
table = s3db.gis_hierarchy
fields = [table.uuid,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
]
query = (table.uuid == "SITE_DEFAULT")
if not location:
config = GIS.get_config()
location = config.region_location_id
if location:
# Try the Region, but ensure we have the fallback available in a single query
query = query | (table.location_id == location)
rows = db(query).select(cache=s3db.cache,
*fields)
if len(rows) > 1:
# Remove the Site Default
filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(filter)
elif not rows:
# prepop hasn't run yet
if level:
return level
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
else:
levels[key] = key
return levels
row = rows.first()
if level:
try:
return T(row[level])
except:
return level
else:
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
elif key in row and row[key]:
# Only include rows with values
levels[key] = str(T(row[key]))
if not _location:
# Cache the value
self.hierarchy_levels = levels
if level:
return levels[level]
else:
return levels
# -------------------------------------------------------------------------
def get_strict_hierarchy(self, location=None):
"""
Returns the strict hierarchy value from the current config.
@param: location - the location_id of the record to check
"""
s3db = current.s3db
table = s3db.gis_hierarchy
# Read the system default
# @ToDo: Check for an active gis_config region?
query = (table.uuid == "SITE_DEFAULT")
if location:
# Try the Location's Country, but ensure we have the fallback available in a single query
query = query | (table.location_id == self.get_parent_country(location))
rows = current.db(query).select(table.uuid,
table.strict_hierarchy,
cache=s3db.cache)
if len(rows) > 1:
# Remove the Site Default
filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(filter)
row = rows.first()
if row:
strict = row.strict_hierarchy
else:
# Pre-pop hasn't run yet
return False
return strict
# -------------------------------------------------------------------------
def get_max_hierarchy_level(self):
"""
Returns the deepest level key (i.e. Ln) in the current hierarchy.
- used by gis_location_onvalidation()
"""
location_hierarchy = self.get_location_hierarchy()
return max(location_hierarchy)
# -------------------------------------------------------------------------
def get_all_current_levels(self, level=None):
"""
Get the current hierarchy levels plus non-hierarchy levels.
"""
all_levels = OrderedDict()
all_levels.update(self.get_location_hierarchy())
#T = current.T
#all_levels["GR"] = T("Location Group")
#all_levels["XX"] = T("Imported")
if level:
try:
return all_levels[level]
except Exception, exception:
return level
else:
return all_levels
# -------------------------------------------------------------------------
@staticmethod
def get_countries(key_type="id"):
"""
Returns country code or L0 location id versus name for all countries.
The lookup is cached in the session
If key_type is "code", these are returned as an OrderedDict with
country code as the key. If key_type is "id", then the location id
is the key. In all cases, the value is the name.
"""
session = current.session
if "gis" not in session:
session.gis = Storage()
gis = session.gis
if gis.countries_by_id:
cached = True
else:
cached = False
if not cached:
s3db = current.s3db
table = s3db.gis_location
ttable = s3db.gis_location_tag
query = (table.level == "L0") & \
(ttable.tag == "ISO2") & \
(ttable.location_id == table.id)
countries = current.db(query).select(table.id,
table.name,
ttable.value,
orderby=table.name)
if not countries:
return []
countries_by_id = OrderedDict()
countries_by_code = OrderedDict()
for row in countries:
location = row["gis_location"]
countries_by_id[location.id] = location.name
countries_by_code[row["gis_location_tag"].value] = location.name
# Cache in the session
gis.countries_by_id = countries_by_id
gis.countries_by_code = countries_by_code
if key_type == "id":
return countries_by_id
else:
return countries_by_code
elif key_type == "id":
return gis.countries_by_id
else:
return gis.countries_by_code
# -------------------------------------------------------------------------
@staticmethod
def get_country(key, key_type="id"):
"""
Returns country name for given code or id from L0 locations.
The key can be either location id or country code, as specified
by key_type.
"""
if key:
if current.gis.get_countries(key_type):
if key_type == "id":
return current.session.gis.countries_by_id[key]
else:
return current.session.gis.countries_by_code[key]
return None
# -------------------------------------------------------------------------
def get_parent_country(self, location, key_type="id"):
"""
Returns the parent country for a given record
@param: location: the location or id to search for
@param: key_type: whether to return an id or code
@ToDo: Optimise to not use try/except
"""
if not location:
return None
db = current.db
s3db = current.s3db
# @ToDo: Avoid try/except here!
# - separate parameters best as even isinstance is expensive
try:
# location is passed as integer (location_id)
table = s3db.gis_location
location = db(table.id == location).select(table.id,
table.path,
table.level,
limitby=(0, 1),
cache=s3db.cache).first()
except:
# location is passed as record
pass
if location.level == "L0":
if key_type == "id":
return location.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == location.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
else:
parents = self.get_parents(location.id,
feature=location)
if parents:
for row in parents:
if row.level == "L0":
if key_type == "id":
return row.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == row.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
return None
# -------------------------------------------------------------------------
def get_default_country(self, key_type="id"):
"""
Returns the default country for the active gis_config
@param: key_type: whether to return an id or code
"""
config = GIS.get_config()
if config.default_location_id:
return self.get_parent_country(config.default_location_id,
key_type=key_type)
return None
# -------------------------------------------------------------------------
def get_features_in_polygon(self, location, tablename=None, category=None):
"""
Returns a gluon.sql.Rows of Features within a Polygon.
The Polygon can be either a WKT string or the ID of a record in the
gis_location table
Currently unused.
@ToDo: Optimise to not use try/except
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
db = current.db
s3db = current.s3db
locations = s3db.gis_location
try:
location_id = int(location)
# Check that the location is a polygon
query = (locations.id == location_id)
location = db(query).select(locations.wkt,
locations.lon_min,
locations.lon_max,
locations.lat_min,
locations.lat_max,
limitby=(0, 1)).first()
if location:
wkt = location.wkt
if wkt and (wkt.startswith("POLYGON") or \
wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = location.lon_min
lon_max = location.lon_max
lat_min = location.lat_min
lat_max = location.lat_max
else:
s3_debug("Location searched within isn't a Polygon!")
return None
except: # @ToDo: need specific exception
wkt = location
if (wkt.startswith("POLYGON") or wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = None
else:
s3_debug("This isn't a Polygon!")
return None
try:
polygon = wkt_loads(wkt)
except: # @ToDo: need specific exception
s3_debug("Invalid Polygon!")
return None
table = s3db[tablename]
if "location_id" not in table.fields():
# @ToDo: Add any special cases to be able to find the linked location
s3_debug("This table doesn't have a location_id!")
return None
query = (table.location_id == locations.id)
if "deleted" in table.fields:
query &= (table.deleted == False)
# @ToDo: Check AAA (do this as a resource filter?)
features = db(query).select(locations.wkt,
locations.lat,
locations.lon,
table.ALL)
output = Rows()
# @ToDo: provide option to use PostGIS/Spatialite
# settings = current.deployment_settings
# if settings.gis.spatialdb and settings.database.db_type == "postgres":
if lon_min is None:
# We have no BBOX so go straight to the full geometry check
for row in features:
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
s3_debug(
"Error reading wkt of location with id",
value=row.id
)
else:
# 1st check for Features included within the bbox (faster)
def in_bbox(row):
_location = row.gis_location
return (_location.lon > lon_min) & \
(_location.lon < lon_max) & \
(_location.lat > lat_min) & \
(_location.lat < lat_max)
for row in features.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
s3_debug(
"Error reading wkt of location with id",
value = row.id,
)
return output
# -------------------------------------------------------------------------
def get_features_in_radius(self, lat, lon, radius, tablename=None, category=None):
"""
Returns Features within a Radius (in km) of a LatLon Location
Unused
"""
import math
db = current.db
settings = current.deployment_settings
if settings.gis.spatialdb and settings.database.db_type == "postgres":
# Use PostGIS routine
# The ST_DWithin function call will automatically include a bounding box comparison that will make use of any indexes that are available on the geometries.
# @ToDo: Support optional Category (make this a generic filter?)
import psycopg2
import psycopg2.extras
dbname = settings.database.database
username = settings.database.username
password = settings.database.password
host = settings.database.host
port = settings.database.port or "5432"
# Convert km to degrees (since we're using the_geom not the_geog)
radius = math.degrees(float(radius) / RADIUS_EARTH)
connection = psycopg2.connect("dbname=%s user=%s password=%s host=%s port=%s" % (dbname, username, password, host, port))
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
info_string = "SELECT column_name, udt_name FROM information_schema.columns WHERE table_name = 'gis_location' or table_name = '%s';" % tablename
cursor.execute(info_string)
# @ToDo: Look at more optimal queries for just those fields we need
if tablename:
# Lookup the resource
query_string = cursor.mogrify("SELECT * FROM gis_location, %s WHERE %s.location_id = gis_location.id and ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (tablename, tablename, lat, lon, radius))
else:
# Lookup the raw Locations
query_string = cursor.mogrify("SELECT * FROM gis_location WHERE ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (lat, lon, radius))
cursor.execute(query_string)
# @ToDo: Export Rows?
features = []
for record in cursor:
d = dict(record.items())
row = Storage()
# @ToDo: Optional support for Polygons
if tablename:
row.gis_location = Storage()
row.gis_location.id = d["id"]
row.gis_location.lat = d["lat"]
row.gis_location.lon = d["lon"]
row.gis_location.lat_min = d["lat_min"]
row.gis_location.lon_min = d["lon_min"]
row.gis_location.lat_max = d["lat_max"]
row.gis_location.lon_max = d["lon_max"]
row[tablename] = Storage()
row[tablename].id = d["id"]
row[tablename].name = d["name"]
else:
row.name = d["name"]
row.id = d["id"]
row.lat = d["lat"]
row.lon = d["lon"]
row.lat_min = d["lat_min"]
row.lon_min = d["lon_min"]
row.lat_max = d["lat_max"]
row.lon_max = d["lon_max"]
features.append(row)
return features
#elif settings.database.db_type == "mysql":
# Do the calculation in MySQL to pull back only the relevant rows
# Raw MySQL Formula from: http://blog.peoplesdns.com/archives/24
# PI = 3.141592653589793, mysql's pi() function returns 3.141593
#pi = math.pi
#query = """SELECT name, lat, lon, acos(SIN( PI()* 40.7383040 /180 )*SIN( PI()*lat/180 ))+(cos(PI()* 40.7383040 /180)*COS( PI()*lat/180) *COS(PI()*lon/180-PI()* -73.99319 /180))* 3963.191
#AS distance
#FROM gis_location
#WHERE 1=1
#AND 3963.191 * ACOS( (SIN(PI()* 40.7383040 /180)*SIN(PI() * lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180))) < = 1.5
#ORDER BY 3963.191 * ACOS((SIN(PI()* 40.7383040 /180)*SIN(PI()*lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180)))"""
# db.executesql(query)
else:
# Calculate in Python
# Pull back all the rows within a square bounding box (faster than checking all features manually)
# Then check each feature within this subset
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
# @ToDo: Support optional Category (make this a generic filter?)
# shortcuts
radians = math.radians
degrees = math.degrees
MIN_LAT = radians(-90) # -PI/2
MAX_LAT = radians(90) # PI/2
MIN_LON = radians(-180) # -PI
MAX_LON = radians(180) # PI
# Convert to radians for the calculation
r = float(radius) / RADIUS_EARTH
radLat = radians(lat)
radLon = radians(lon)
# Calculate the bounding box
minLat = radLat - r
maxLat = radLat + r
if (minLat > MIN_LAT) and (maxLat < MAX_LAT):
deltaLon = math.asin(math.sin(r) / math.cos(radLat))
minLon = radLon - deltaLon
if (minLon < MIN_LON):
minLon += 2 * math.pi
maxLon = radLon + deltaLon
if (maxLon > MAX_LON):
maxLon -= 2 * math.pi
else:
# Special care for Poles & 180 Meridian:
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates#PolesAnd180thMeridian
minLat = max(minLat, MIN_LAT)
maxLat = min(maxLat, MAX_LAT)
minLon = MIN_LON
maxLon = MAX_LON
# Convert back to degrees
minLat = degrees(minLat)
minLon = degrees(minLon)
maxLat = degrees(maxLat)
maxLon = degrees(maxLon)
# shortcut
locations = db.gis_location
query = (locations.lat > minLat) & (locations.lat < maxLat) & (locations.lon > minLon) & (locations.lon < maxLon)
deleted = (locations.deleted == False)
empty = (locations.lat != None) & (locations.lon != None)
query = deleted & empty & query
if tablename:
# Lookup the resource
table = current.s3db[tablename]
query &= (table.location_id == locations.id)
records = db(query).select(table.ALL,
locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
else:
# Lookup the raw Locations
records = db(query).select(locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
features = Rows()
for record in records:
# Calculate the Great Circle distance
if tablename:
distance = self.greatCircleDistance(lat,
lon,
record.gis_location.lat,
record.gis_location.lon)
else:
distance = self.greatCircleDistance(lat,
lon,
record.lat,
record.lon)
if distance < radius:
features.records.append(record)
else:
# skip
continue
return features
# -------------------------------------------------------------------------
def get_latlon(self, feature_id, filter=False):
"""
Returns the Lat/Lon for a Feature
used by display_feature() in gis controller
@param feature_id: the feature ID
@param filter: Filter out results based on deployment_settings
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.lat,
table.lon,
table.parent,
table.path,
limitby=(0, 1)).first()
# Zero is an allowed value, hence explicit test for None.
if "lon" in feature and "lat" in feature and \
(feature.lat is not None) and (feature.lon is not None):
return dict(lon=feature.lon, lat=feature.lat)
else:
# Step through ancestors to first with lon, lat.
parents = self.get_parents(feature.id, feature=feature)
if parents:
lon = lat = None
for row in parents:
if "lon" in row and "lat" in row and \
(row.lon is not None) and (row.lat is not None):
return dict(lon=row.lon, lat=row.lat)
# Invalid feature_id
return None
# -------------------------------------------------------------------------
@staticmethod
def get_marker(controller=None,
function=None,
):
"""
Returns a Marker dict
- called by S3REST: S3Resource.export_tree() for non-geojson resources
- called by S3Search
"""
marker = None
if controller and function:
# Lookup marker in the gis_feature table
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
ltable = s3db.gis_layer_symbology
mtable = s3db.gis_marker
try:
symbology_id = current.response.s3.gis.config.symbology_id
except:
# Config not initialised yet
config = GIS.get_config()
symbology_id = config.symbology_id
query = (ftable.controller == controller) & \
(ftable.function == function) & \
(ftable.layer_id == ltable.layer_id) & \
(ltable.symbology_id == symbology_id) & \
(ltable.marker_id == mtable.id)
marker = db(query).select(mtable.image,
mtable.height,
mtable.width,
ltable.gps_marker).first()
if marker:
_marker = marker["gis_marker"]
marker = dict(image=_marker.image,
height=_marker.height,
width=_marker.width,
gps_marker=marker["gis_layer_symbology"].gps_marker
)
if not marker:
# Default
marker = Marker().as_dict()
return marker
# -------------------------------------------------------------------------
@staticmethod
def get_location_data(resource):
"""
Returns the locations, markers and popup tooltips for an XML export
e.g. Feature Layers or Search results (Feature Resources)
e.g. Exports in KML, GeoRSS or GPX format
Called by S3REST: S3Resource.export_tree()
@param: resource - S3Resource instance (required)
"""
NONE = current.messages["NONE"]
#if DEBUG:
# start = datetime.datetime.now()
db = current.db
s3db = current.s3db
request = current.request
get_vars = request.get_vars
format = current.auth.permission.format
ftable = s3db.gis_layer_feature
layer = None
layer_id = get_vars.get("layer", None)
if layer_id:
# Feature Layer
query = (ftable.id == layer_id)
layer = db(query).select(ftable.trackable,
ftable.polygons,
ftable.popup_label,
ftable.popup_fields,
ftable.attr_fields,
limitby=(0, 1)).first()
else:
# e.g. Search results loaded as a Feature Resource layer
# e.g. Volunteer Layer in Vulnerability module
# e.g. KML, geoRSS or GPX export
controller = request.controller
function = request.function
query = (ftable.controller == controller) & \
(ftable.function == function)
layers = db(query).select(ftable.style_default,
ftable.trackable,
ftable.polygons,
ftable.popup_label,
ftable.popup_fields,
ftable.attr_fields,
)
if len(layers) > 1:
layers.exclude(lambda row: row.style_default == False)
if len(layers) > 1:
# We can't provide details for the whole layer, but need to do a per-record check
return None
if layers:
layer = layers.first()
attr_fields = get_vars.get("attr", [])
if attr_fields:
attr_fields = attr_fields.split(",")
popup_fields = get_vars.get("popup", [])
if popup_fields:
popup_fields = popup_fields.split(",")
if layer:
popup_label = layer.popup_label
if not popup_fields:
popup_fields = layer.popup_fields or []
if not attr_fields:
attr_fields = layer.attr_fields or []
trackable = layer.trackable
polygons = layer.polygons
else:
popup_label = ""
popup_fields = ["name"]
trackable = False
polygons = False
table = resource.table
tablename = resource.tablename
pkey = table._id.name
markers = {}
tooltips = {}
attributes = {}
represents = {}
if format == "geojson":
if popup_fields or attr_fields:
# Build the Attributes &/Popup Tooltips now so that representations can be
# looked-up in bulk rather than as a separate lookup per record
if popup_fields:
tips = {}
label_off = get_vars.get("label_off", None)
if popup_label and not label_off:
_tooltip = "(%s)" % current.T(popup_label)
else:
_tooltip = ""
attr = {}
fields = list(set(popup_fields + attr_fields))
if pkey not in fields:
fields.insert(0, pkey)
data = resource.select(fields,
limit=None,
represent=True)
rfields = data["rfields"]
popup_cols = []
attr_cols = []
for f in rfields:
fname = f.fname
selector = f.selector
if fname in popup_fields or selector in popup_fields:
popup_cols.append(f.colname)
if fname in attr_fields or selector in attr_fields:
attr_cols.append(f.colname)
rows = data["rows"]
for row in rows:
record_id = int(row[str(table[pkey])])
if attr_cols:
attribute = {}
for fieldname in attr_cols:
represent = row[fieldname]
if represent and represent != NONE:
# Skip empty fields
fname = fieldname.split(".")[1]
attribute[fname] = represent
attr[record_id] = attribute
if popup_cols:
tooltip = _tooltip
first = True
for fieldname in popup_cols:
represent = row[fieldname]
if represent and represent != NONE:
# Skip empty fields
if first:
tooltip = "%s %s" % (represent, tooltip)
first = False
else:
tooltip = "%s<br />%s" % (tooltip, represent)
tips[record_id] = tooltip
if attr_fields:
attributes[tablename] = attr
if popup_fields:
tooltips[tablename] = tips
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# if layer_id:
# query = (ftable.id == layer_id)
# layer_name = db(query).select(ftable.name,
# limitby=(0, 1)).first().name
# else:
# layer_name = "Unknown"
# _debug("Attributes/Tooltip lookup of layer %s completed in %s seconds" % \
# (layer_name, duration))
_markers = get_vars.get("markers", None)
if _markers:
# Add a per-feature Marker
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
for record in resource:
markers[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
markers = GIS.get_marker(c, f)
markers[tablename] = markers
else:
# KML, GeoRSS or GPX
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
# Add a per-feature Marker
for record in resource:
markers[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
markers = GIS.get_marker(c, f)
markers[tablename] = markers
# Lookup the LatLons now so that it can be done as a single
# query rather than per record
#if DEBUG:
# start = datetime.datetime.now()
latlons = {}
wkts = {}
geojsons = {}
gtable = s3db.gis_location
if trackable:
# Use S3Track
ids = resource._ids
# Ensure IDs in ascending order
ids.sort()
try:
tracker = S3Trackable(table, record_ids=ids)
except SyntaxError:
# This table isn't trackable
pass
else:
_latlons = tracker.get_location(_fields=[gtable.lat,
gtable.lon])
index = 0
for id in ids:
_location = _latlons[index]
latlons[id] = (_location.lat, _location.lon)
index += 1
if not latlons:
if "location_id" in table.fields:
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
elif "site_id" in table.fields:
stable = s3db.org_site
query = (table.id.belongs(resource._ids)) & \
(table.site_id == stable.site_id) & \
(stable.location_id == gtable.id)
else:
# Can't display this resource on the Map
return None
if polygons:
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
if settings.get_gis_spatialdb():
if format == "geojson":
# Do the Simplify & GeoJSON direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_asgeojson(precision=4).with_alias("geojson"))
for row in rows:
geojsons[row[tablename].id] = row.geojson
else:
# Do the Simplify direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(tolerance).st_astext().with_alias("wkt"))
for row in rows:
wkts[row[tablename].id] = row.wkt
else:
rows = db(query).select(table.id,
gtable.wkt)
simplify = GIS.simplify
if format == "geojson":
for row in rows:
# Simplify the polygon to reduce download size
geojson = simplify(row["gis_location"].wkt,
tolerance=tolerance,
output="geojson")
if geojson:
geojsons[row[tablename].id] = geojson
else:
for row in rows:
# Simplify the polygon to reduce download size
# & also to work around the recursion limit in libxslt
# http://blog.gmane.org/gmane.comp.python.lxml.devel/day=20120309
wkt = simplify(row["gis_location"].wkt)
if wkt:
wkts[row[tablename].id] = wkt
else:
# Points
rows = db(query).select(table.id,
gtable.lat,
gtable.lon)
for row in rows:
_location = row["gis_location"]
latlons[row[tablename].id] = (_location.lat, _location.lon)
_latlons = {}
if latlons:
_latlons[tablename] = latlons
_wkts = {}
_wkts[tablename] = wkts
_geojsons = {}
_geojsons[tablename] = geojsons
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# _debug("latlons lookup of layer %s completed in %s seconds" % \
# (layer_name, duration))
# Used by S3XML's gis_encode()
return dict(latlons = _latlons,
wkts = _wkts,
geojsons = _geojsons,
markers = markers,
tooltips = tooltips,
attributes = attributes,
)
# -------------------------------------------------------------------------
@staticmethod
def get_shapefile_geojson(resource):
"""
Lookup Shapefile Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
@ToDo: Vary simplification level & precision by Zoom level
- store this in the style?
"""
db = current.db
id = resource._ids[0]
tablename = "gis_layer_shapefile_%s" % id
table = db[tablename]
query = resource.get_query()
fields = []
fappend = fields.append
for f in table.fields:
if f not in ("layer_id", "lat", "lon"):
fappend(f)
attributes = {}
geojsons = {}
settings = current.deployment_settings
tolerance = settings.get_gis_simplify_tolerance()
if settings.get_gis_spatialdb():
# Do the Simplify & GeoJSON direct from the DB
fields.remove("the_geom")
fields.remove("wkt")
_fields = [table[f] for f in fields]
rows = db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson(precision=4).with_alias("geojson"),
*_fields)
for row in rows:
_row = row[tablename]
id = _row.id
geojsons[id] = row.geojson
_attributes = {}
for f in fields:
if f not in ("id"):
_attributes[f] = _row[f]
attributes[id] = _attributes
else:
_fields = [table[f] for f in fields]
rows = db(query).select(*_fields)
simplify = GIS.simplify
for row in rows:
# Simplify the polygon to reduce download size
geojson = simplify(row.wkt, tolerance=tolerance,
output="geojson")
id = row.id
if geojson:
geojsons[id] = geojson
_attributes = {}
for f in fields:
if f not in ("id", "wkt"):
_attributes[f] = row[f]
attributes[id] = _attributes
_attributes = {}
_attributes[tablename] = attributes
_geojsons = {}
_geojsons[tablename] = geojsons
# return 'locations'
return dict(attributes = _attributes,
geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def get_theme_geojson(resource):
"""
Lookup Theme Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
@ToDo: Vary precision by Lx
- store this (& tolerance map) in the style?
"""
s3db = current.s3db
tablename = "gis_theme_data"
table = s3db.gis_theme_data
gtable = s3db.gis_location
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
geojsons = {}
# @ToDo: How to get the tolerance to vary by level?
# - add Stored Procedure?
#if current.deployment_settings.get_gis_spatialdb():
# # Do the Simplify & GeoJSON direct from the DB
# rows = current.db(query).select(table.id,
# gtable.the_geom.st_simplify(0.01).st_asgeojson(precision=4).with_alias("geojson"))
# for row in rows:
# geojsons[row["gis_theme_data.id"]] = row.geojson
#else:
rows = current.db(query).select(table.id,
gtable.level,
gtable.wkt)
simplify = GIS.simplify
tolerance = {"L0": 0.01,
"L1": 0.005,
"L2": 0.00125,
"L3": 0.000625,
"L4": 0.0003125,
"L5": 0.00015625,
}
for row in rows:
grow = row.gis_location
# Simplify the polygon to reduce download size
geojson = simplify(grow.wkt,
tolerance=tolerance[grow.level],
output="geojson")
if geojson:
geojsons[row["gis_theme_data.id"]] = geojson
_geojsons = {}
_geojsons[tablename] = geojsons
# return 'locations'
return dict(geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def greatCircleDistance(lat1, lon1, lat2, lon2, quick=True):
"""
Calculate the shortest distance (in km) over the earth's sphere between 2 points
Formulae from: http://www.movable-type.co.uk/scripts/latlong.html
(NB We could also use PostGIS functions, where possible, instead of this query)
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
radians = math.radians
if quick:
# Spherical Law of Cosines (accurate down to around 1m & computationally quick)
lat1 = radians(lat1)
lat2 = radians(lat2)
lon1 = radians(lon1)
lon2 = radians(lon2)
distance = math.acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon2 - lon1)) * RADIUS_EARTH
return distance
else:
# Haversine
#asin = math.asin
sqrt = math.sqrt
pow = math.pow
dLat = radians(lat2 - lat1)
dLon = radians(lon2 - lon1)
a = pow(sin(dLat / 2), 2) + cos(radians(lat1)) * cos(radians(lat2)) * pow(sin(dLon / 2), 2)
c = 2 * math.atan2(sqrt(a), sqrt(1 - a))
#c = 2 * asin(sqrt(a)) # Alternate version
# Convert radians to kilometers
distance = RADIUS_EARTH * c
return distance
# -------------------------------------------------------------------------
@staticmethod
def create_poly(feature):
"""
Create a .poly file for OpenStreetMap exports
http://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format
"""
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
name = feature.name
if "wkt" in feature:
wkt = feature.wkt
else:
# WKT not included by default in feature, so retrieve this now
wkt = current.db(table.id == feature.id).select(table.wkt,
limitby=(0, 1)
).first().wkt
try:
shape = wkt_loads(wkt)
except:
error = "Invalid WKT: %s" % name
s3_debug(error)
return error
geom_type = shape.geom_type
if geom_type == "MultiPolygon":
polygons = shape.geoms
elif geom_type == "Polygon":
polygons = [shape]
else:
error = "Unsupported Geometry: %s, %s" % (name, geom_type)
s3_debug(error)
return error
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
filename = "%s.poly" % name
filepath = os.path.join(TEMP, filename)
File = open(filepath, "w")
File.write("%s\n" % filename)
count = 1
for polygon in polygons:
File.write("%s\n" % count)
points = polygon.exterior.coords
for point in points:
File.write("\t%s\t%s\n" % (point[0], point[1]))
File.write("END\n")
++count
File.write("END\n")
File.close()
return None
# -------------------------------------------------------------------------
@staticmethod
def export_admin_areas(countries=[],
levels=["L0", "L1", "L2", "L3"],
format="geojson",
simplify=0.01,
decimals=4,
):
"""
Export admin areas to /static/cache for use by interactive web-mapping services
- designed for use by the Vulnerability Mapping
@param countries: list of ISO2 country codes
@param levels: list of which Lx levels to export
@param format: Only GeoJSON supported for now (may add KML &/or OSM later)
@param simplify: tolerance for the simplification algorithm. False to disable simplification
@param decimals: number of decimal points to include in the coordinates
"""
db = current.db
s3db = current.s3db
table = s3db.gis_location
ifield = table.id
if countries:
ttable = s3db.gis_location_tag
cquery = (table.level == "L0") & \
(table.end_date == None) & \
(ttable.location_id == ifield) & \
(ttable.tag == "ISO2") & \
(ttable.value.belongs(countries))
else:
# All countries
cquery = (table.level == "L0") & \
(table.end_date == None) & \
(table.deleted != True)
if current.deployment_settings.get_gis_spatialdb():
spatial = True
_field = table.the_geom
if simplify:
# Do the Simplify & GeoJSON direct from the DB
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
else:
# Do the GeoJSON direct from the DB
field = _field.st_asgeojson(precision=decimals).with_alias("geojson")
else:
spatial = False
field = table.wkt
if simplify:
_simplify = GIS.simplify
else:
from shapely.wkt import loads as wkt_loads
from ..geojson import dumps
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
folder = os.path.join(current.request.folder, "static", "cache")
features = []
append = features.append
if "L0" in levels:
# Reduce the decimals in output by 1
_decimals = decimals -1
if spatial:
if simplify:
field = _field.st_simplify(simplify).st_asgeojson(precision=_decimals).with_alias("geojson")
else:
field = _field.st_asgeojson(precision=_decimals).with_alias("geojson")
countries = db(cquery).select(ifield,
field)
for row in countries:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=_decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L0 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "countries.geojson")
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
q1 = (table.level == "L1") & \
(table.deleted != True) & \
(table.end_date == None)
q2 = (table.level == "L2") & \
(table.deleted != True) & \
(table.end_date == None)
q3 = (table.level == "L3") & \
(table.deleted != True) & \
(table.end_date == None)
q4 = (table.level == "L4") & \
(table.deleted != True) & \
(table.end_date == None)
if "L1" in levels:
if "L0" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.005 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
_id = country.id
else:
_id = country["gis_location"].id
query = q1 & (table.parent == _id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L1 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "1_%s.geojson" % _id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
s3_debug("No L1 features in %s" % _id)
if "L2" in levels:
if "L0" not in levels and "L1" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 4 # 0.00125 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L2 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "2_%s.geojson" % l1.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
s3_debug("No L2 features in %s" % l1.id)
if "L3" in levels:
if "L0" not in levels and "L1" not in levels and "L2" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.000625 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
l2s = db(query).select(ifield)
for l2 in l2s:
query = q3 & (table.parent == l2.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L3 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "3_%s.geojson" % l2.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
s3_debug("No L3 features in %s" % l2.id)
if "L4" in levels:
if "L0" not in levels and "L1" not in levels and "L2" not in levels and "L3" not in levels:
countries = db(cquery).select(ifield)
if simplify:
# We want greater precision when zoomed-in more
simplify = simplify / 2 # 0.0003125 with default setting
if spatial:
field = _field.st_simplify(simplify).st_asgeojson(precision=decimals).with_alias("geojson")
for country in countries:
if not spatial or "L0" not in levels:
id = country.id
else:
id = country["gis_location"].id
query = q1 & (table.parent == id)
l1s = db(query).select(ifield)
for l1 in l1s:
query = q2 & (table.parent == l1.id)
l2s = db(query).select(ifield)
for l2 in l2s:
query = q3 & (table.parent == l2.id)
l3s = db(query).select(ifield)
for l3 in l3s:
query = q4 & (table.parent == l3.id)
features = []
append = features.append
rows = db(query).select(ifield,
field)
for row in rows:
if spatial:
id = row["gis_location"].id
geojson = row.geojson
elif simplify:
id = row.id
wkt = row.wkt
if wkt:
geojson = _simplify(wkt, tolerance=simplify,
decimals=decimals,
output="geojson")
else:
name = db(table.id == id).select(table.name,
limitby=(0, 1)).first().name
print >> sys.stderr, "No WKT: L4 %s %s" % (name, id)
continue
else:
id = row.id
shape = wkt_loads(row.wkt)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
if geojson:
f = dict(type = "Feature",
properties = {"id": id},
geometry = json.loads(geojson)
)
append(f)
if features:
data = dict(type = "FeatureCollection",
features = features
)
# Output to file
filename = os.path.join(folder, "4_%s.geojson" % l3.id)
File = open(filename, "w")
File.write(json.dumps(data, separators=SEPARATORS))
File.close()
else:
s3_debug("No L4 features in %s" % l3.id)
# -------------------------------------------------------------------------
def import_admin_areas(self,
source="gadmv1",
countries=[],
levels=["L0", "L1", "L2"]
):
"""
Import Admin Boundaries into the Locations table
@param source - Source to get the data from.
Currently only GADM is supported: http://gadm.org
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@param levels - Which levels of the hierarchy to import.
defaults to all 3 supported levels
"""
if source == "gadmv1":
try:
from osgeo import ogr
except:
s3_debug("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm1_L0(ogr, countries=countries)
if "L1" in levels:
self.import_gadm1(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm1(ogr, "L2", countries=countries)
s3_debug("All done!")
elif source == "gadmv1":
try:
from osgeo import ogr
except:
s3_debug("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm2(ogr, "L0", countries=countries)
if "L1" in levels:
self.import_gadm2(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm2(ogr, "L2", countries=countries)
s3_debug("All done!")
else:
s3_debug("Only GADM is currently supported")
return
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm1_L0(ogr, countries=[]):
"""
Import L0 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new records need to be created
@param ogr - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
db = current.db
s3db = current.s3db
ttable = s3db.gis_location_tag
table = db.gis_location
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev0_shp.zip",
"zipfile" : "gadm_v1_lev0_shp.zip",
"shapefile" : "gadm1_lev0",
"codefield" : "ISO2", # This field is used to uniquely identify the L0 for updates
"code2field" : "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
}
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Create the working directory
TEMP = os.path.join(cwd, "temp")
if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
if not os.path.exists(tempPath):
try:
os.mkdir(tempPath)
except OSError:
s3_debug("Unable to create temp folder %s!" % tempPath)
return
# Set the current working directory
os.chdir(tempPath)
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
s3_debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
s3_debug(exception)
return
fp = StringIO(file)
else:
s3_debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
s3_debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ["dbf", "prj", "sbn", "sbx", "shp", "shx"]:
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
s3_debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
s3_debug("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
codeField = layer["codefield"]
code2Field = layer["code2field"]
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
query = (table.id == ttable.location_id) & \
(ttable.tag == "ISO2") & \
(ttable.value == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
code2 = feat.GetField(code2Field)
#area = feat.GetField("Shape_Area")
try:
id = db(query).select(table.id,
limitby=(0, 1)).first().id
query = (table.id == id)
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
ttable.insert(location_id = id,
tag = "ISO3",
value = code2)
#ttable.insert(location_id = location_id,
# tag = "area",
# value = area)
except db._adapter.driver.OperationalError, exception:
s3_debug(exception)
else:
s3_debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(cwd)
return
# -------------------------------------------------------------------------
def import_gadm1(self, ogr, level="L1", countries=[]):
"""
Import L1 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes a fresh database with just Countries imported
@param ogr - The OGR Python module
@param level - "L1" or "L2"
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
if level == "L1":
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev1_shp.zip",
"zipfile" : "gadm_v1_lev1_shp.zip",
"shapefile" : "gadm1_lev1",
"namefield" : "NAME_1",
# Uniquely identify the L1 for updates
"sourceCodeField" : "ID_1",
"edenCodeField" : "GADM1",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L0",
"parentSourceCodeField" : "ISO",
"parentEdenCodeField" : "ISO3",
}
elif level == "L2":
layer = {
"url" : "http://biogeo.ucdavis.edu/data/gadm/gadm_v1_lev2_shp.zip",
"zipfile" : "gadm_v1_lev2_shp.zip",
"shapefile" : "gadm_v1_lev2",
"namefield" : "NAME_2",
# Uniquely identify the L2 for updates
"sourceCodeField" : "ID_2",
"edenCodeField" : "GADM2",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L1",
"parentSourceCodeField" : "ID_1",
"parentEdenCodeField" : "GADM1",
}
else:
s3_debug("Level %s not supported!" % level)
return
import csv
import shutil
import zipfile
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db.gis_location
ttable = s3db.gis_location_tag
csv.field_size_limit(2**20 * 100) # 100 megs
# Not all the data is encoded like this
# (unable to determine encoding - appears to be damaged in source):
# Azerbaijan L1
# Vietnam L1 & L2
ENCODING = "cp1251"
# from http://docs.python.org/library/csv.html#csv-examples
def latin_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
for row in csv.reader(unicode_csv_data):
yield [unicode(cell, ENCODING) for cell in row]
def latin_dict_reader(data, dialect=csv.excel, **kwargs):
reader = latin_csv_reader(data, dialect=dialect, **kwargs)
headers = reader.next()
for r in reader:
yield dict(zip(headers, r))
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Create the working directory
TEMP = os.path.join(cwd, "temp")
if not os.path.exists(TEMP): # use web2py/temp/GADMv1 as a cache
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
if not os.path.exists(tempPath):
try:
os.mkdir(tempPath)
except OSError:
s3_debug("Unable to create temp folder %s!" % tempPath)
return
# Set the current working directory
os.chdir(tempPath)
# Remove any existing CSV folder to allow the new one to be created
try:
shutil.rmtree("CSV")
except OSError:
# Folder doesn't exist, so should be creatable
pass
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
s3_debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
s3_debug(exception)
# Revert back to the working directory as before.
os.chdir(cwd)
return
fp = StringIO(file)
else:
s3_debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
s3_debug("Unzipping %s" % layerName)
myfile = zipfile.ZipFile(fp)
for ext in ["dbf", "prj", "sbn", "sbx", "shp", "shx"]:
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Convert to CSV
s3_debug("Converting %s.shp to CSV" % layerName)
# Simplified version of generic Shapefile Importer:
# http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/ogr2ogr.py
bSkipFailures = False
nGroupTransactions = 200
nFIDToFetch = ogr.NullFID
inputFileName = "%s.shp" % layerName
inputDS = ogr.Open(inputFileName, False)
outputFileName = "CSV"
outputDriver = ogr.GetDriverByName("CSV")
outputDS = outputDriver.CreateDataSource(outputFileName, options=[])
# GADM only has 1 layer/source
inputLayer = inputDS.GetLayer(0)
inputFDefn = inputLayer.GetLayerDefn()
# Create the output Layer
outputLayer = outputDS.CreateLayer(layerName)
# Copy all Fields
papszFieldTypesToString = []
inputFieldCount = inputFDefn.GetFieldCount()
panMap = [-1 for i in range(inputFieldCount)]
outputFDefn = outputLayer.GetLayerDefn()
nDstFieldCount = 0
if outputFDefn is not None:
nDstFieldCount = outputFDefn.GetFieldCount()
for iField in range(inputFieldCount):
inputFieldDefn = inputFDefn.GetFieldDefn(iField)
oFieldDefn = ogr.FieldDefn(inputFieldDefn.GetNameRef(),
inputFieldDefn.GetType())
oFieldDefn.SetWidth(inputFieldDefn.GetWidth())
oFieldDefn.SetPrecision(inputFieldDefn.GetPrecision())
# The field may have been already created at layer creation
iDstField = -1;
if outputFDefn is not None:
iDstField = outputFDefn.GetFieldIndex(oFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iField] = iDstField
elif outputLayer.CreateField(oFieldDefn) == 0:
# now that we've created a field, GetLayerDefn() won't return NULL
if outputFDefn is None:
outputFDefn = outputLayer.GetLayerDefn()
panMap[iField] = nDstFieldCount
nDstFieldCount = nDstFieldCount + 1
# Transfer features
nFeaturesInTransaction = 0
iSrcZField = -1
inputLayer.ResetReading()
if nGroupTransactions > 0:
outputLayer.StartTransaction()
while True:
poDstFeature = None
if nFIDToFetch != ogr.NullFID:
# Only fetch feature on first pass.
if nFeaturesInTransaction == 0:
poFeature = inputLayer.GetFeature(nFIDToFetch)
else:
poFeature = None
else:
poFeature = inputLayer.GetNextFeature()
if poFeature is None:
break
nParts = 0
nIters = 1
for iPart in range(nIters):
nFeaturesInTransaction = nFeaturesInTransaction + 1
if nFeaturesInTransaction == nGroupTransactions:
outputLayer.CommitTransaction()
outputLayer.StartTransaction()
nFeaturesInTransaction = 0
poDstFeature = ogr.Feature(outputLayer.GetLayerDefn())
if poDstFeature.SetFromWithMap(poFeature, 1, panMap) != 0:
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
s3_debug("Unable to translate feature %d from layer %s" % \
(poFeature.GetFID(), inputFDefn.GetName()))
# Revert back to the working directory as before.
os.chdir(cwd)
return
poDstGeometry = poDstFeature.GetGeometryRef()
if poDstGeometry is not None:
if nParts > 0:
# For -explodecollections, extract the iPart(th) of the geometry
poPart = poDstGeometry.GetGeometryRef(iPart).Clone()
poDstFeature.SetGeometryDirectly(poPart)
poDstGeometry = poPart
if outputLayer.CreateFeature(poDstFeature) != 0 and \
not bSkipFailures:
if nGroupTransactions > 0:
outputLayer.RollbackTransaction()
# Revert back to the working directory as before.
os.chdir(cwd)
return
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
# Cleanup
outputDS.Destroy()
inputDS.Destroy()
fileName = "%s.csv" % layerName
filePath = os.path.join("CSV", fileName)
os.rename(filePath, fileName)
os.removedirs("CSV")
# Use OGR to read SHP for geometry
s3_debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
s3_debug("Open failed.\n")
# Revert back to the working directory as before.
os.chdir(cwd)
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
# Use CSV for Name
s3_debug("Opening %s.csv" % layerName)
rows = latin_dict_reader(open("%s.csv" % layerName))
nameField = layer["namefield"]
sourceCodeField = layer["sourceCodeField"]
edenCodeField = layer["edenCodeField"]
parentSourceCodeField = layer["parentSourceCodeField"]
parentLevel = layer["parent"]
parentEdenCodeField = layer["parentEdenCodeField"]
parentCodeQuery = (ttable.tag == parentEdenCodeField)
count = 0
for row in rows:
# Read Attributes
feat = lyr[count]
parentCode = feat.GetField(parentSourceCodeField)
query = (table.level == parentLevel) & \
parentCodeQuery & \
(ttable.value == parentCode)
parent = db(query).select(table.id,
ttable.value,
limitby=(0, 1),
cache=cache).first()
if not parent:
# Skip locations for which we don't have a valid parent
s3_debug("Skipping - cannot find parent with key: %s, value: %s" % \
(parentEdenCodeField, parentCode))
count += 1
continue
if countries:
# Skip the countries which we're not interested in
if level == "L1":
if parent["gis_location_tag"].value not in countries:
#s3_debug("Skipping %s as not in countries list" % parent["gis_location_tag"].value)
count += 1
continue
else:
# Check grandparent
country = self.get_parent_country(parent.id,
key_type="code")
if country not in countries:
count += 1
continue
# This is got from CSV in order to be able to handle the encoding
name = row.pop(nameField)
name.encode("utf8")
code = feat.GetField(sourceCodeField)
area = feat.GetField("Shape_Area")
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
lat = geom.GetX()
lon = geom.GetY()
id = table.insert(name=name,
level=level,
gis_feature_type=1,
lat=lat,
lon=lon,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
id = table.insert(name=name,
level=level,
gis_feature_type=gis_feature_type,
wkt=wkt,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
s3_debug("No geometry\n")
count += 1
# Close the shapefile
ds.Destroy()
db.commit()
s3_debug("Updating Location Tree...")
try:
self.update_location_tree()
except MemoryError:
# If doing all L2s, it can break memory limits
# @ToDo: Check now that we're doing by level
s3_debug("Memory error when trying to update_location_tree()!")
db.commit()
# Revert back to the working directory as before.
os.chdir(cwd)
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm2(ogr, level="L0", countries=[]):
"""
Import Admin Boundaries into the Locations table from GADMv2
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new L0 records need to be created
@param ogr - The OGR Python module
@param level - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@ToDo: Complete this
- not currently possible to get all data from the 1 file easily
- no ISO2
- needs updating for gis_location_tag model
- only the lowest available levels accessible
- use GADMv1 for L0, L1, L2 & GADMv2 for specific lower?
"""
if level == "L0":
codeField = "ISO2" # This field is used to uniquely identify the L0 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
elif level == "L1":
nameField = "NAME_1"
codeField = "ID_1" # This field is used to uniquely identify the L1 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
parent = "L0"
parentCode = "code2"
elif level == "L2":
nameField = "NAME_2"
codeField = "ID_2" # This field is used to uniquely identify the L2 for updates
code2Field = "ID_1" # This field is used to uniquely identify the L1 for parenting the L2s
parent = "L1"
parentCode = "code"
else:
s3_debug("Level %s not supported!" % level)
return
db = current.db
s3db = current.s3db
table = s3db.gis_location
url = "http://gadm.org/data2/gadm_v2_shp.zip"
zipfile = "gadm_v2_shp.zip"
shapefile = "gadm2"
# Copy the current working directory to revert back to later
old_working_directory = os.getcwd()
# Create the working directory
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv2 as a cache
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv2")
try:
os.mkdir(tempPath)
except OSError:
# Folder already exists - reuse
pass
# Set the current working directory
os.chdir(tempPath)
layerName = shapefile
# Check if file has already been downloaded
fileName = zipfile
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
s3_debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
s3_debug(exception)
return
fp = StringIO(file)
else:
s3_debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
s3_debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ["dbf", "prj", "sbn", "sbx", "shp", "shx"]:
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
s3_debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
s3_debug("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
## FIXME
##query = (table.code == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
code2 = feat.GetField(code2Field)
area = feat.GetField("Shape_Area")
try:
## FIXME
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
#code2=code2,
#area=area
except db._adapter.driver.OperationalError, exception:
s3_debug(exception)
else:
s3_debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
# -------------------------------------------------------------------------
def import_geonames(self, country, level=None):
"""
Import Locations from the Geonames database
@param country: the 2-letter country code
@param level: the ADM level to import
Designed to be run from the CLI
Levels should be imported sequentially.
It is assumed that L0 exists in the DB already
L1-L3 may have been imported from Shapefiles with Polygon info
Geonames can then be used to populate the lower levels of hierarchy
"""
import codecs
from shapely.geometry import point
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
db = current.db
s3db = current.s3db
cache = s3db.cache
request = current.request
settings = current.deployment_settings
table = s3db.gis_location
ttable = s3db.gis_location_tag
url = "http://download.geonames.org/export/dump/" + country + ".zip"
cachepath = os.path.join(request.folder, "cache")
filename = country + ".txt"
filepath = os.path.join(cachepath, filename)
if os.access(filepath, os.R_OK):
cached = True
else:
cached = False
if not os.access(cachepath, os.W_OK):
s3_debug("Folder not writable", cachepath)
return
if not cached:
# Download File
from gluon.tools import fetch
try:
f = fetch(url)
except (urllib2.URLError,):
e = sys.exc_info()[1]
s3_debug("URL Error", e)
return
except (urllib2.HTTPError,):
e = sys.exc_info()[1]
s3_debug("HTTP Error", e)
return
# Unzip File
if f[:2] == "PK":
# Unzip
fp = StringIO(f)
import zipfile
myfile = zipfile.ZipFile(fp)
try:
# Python 2.6+ only :/
# For now, 2.5 users need to download/unzip manually to cache folder
myfile.extract(filename, cachepath)
myfile.close()
except IOError:
s3_debug("Zipfile contents don't seem correct!")
myfile.close()
return
f = codecs.open(filepath, encoding="utf-8")
# Downloaded file is worth keeping
#os.remove(filepath)
if level == "L1":
fc = "ADM1"
parent_level = "L0"
elif level == "L2":
fc = "ADM2"
parent_level = "L1"
elif level == "L3":
fc = "ADM3"
parent_level = "L2"
elif level == "L4":
fc = "ADM4"
parent_level = "L3"
else:
# 5 levels of hierarchy or 4?
# @ToDo make more extensible still
gis_location_hierarchy = self.get_location_hierarchy()
try:
label = gis_location_hierarchy["L5"]
level = "L5"
parent_level = "L4"
except:
# ADM4 data in Geonames isn't always good (e.g. PK bad)
level = "L4"
parent_level = "L3"
finally:
fc = "PPL"
deleted = (table.deleted == False)
query = deleted & (table.level == parent_level)
# Do the DB query once (outside loop)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
if not all_parents:
# No locations in the parent level found
# - use the one higher instead
parent_level = "L" + str(int(parent_level[1:]) + 1)
query = deleted & (table.level == parent_level)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
# Parse File
current_row = 0
for line in f:
current_row += 1
# Format of file: http://download.geonames.org/export/dump/readme.txt
geonameid,
name,
asciiname,
alternatenames,
lat,
lon,
feature_class,
feature_code,
country_code,
cc2,
admin1_code,
admin2_code,
admin3_code,
admin4_code,
population,
elevation,
gtopo30,
timezone,
modification_date = line.split("\t")
if feature_code == fc:
# Add WKT
lat = float(lat)
lon = float(lon)
wkt = self.latlon_to_wkt(lat, lon)
shape = point.Point(lon, lat)
# Add Bounds
lon_min = lon_max = lon
lat_min = lat_max = lat
# Locate Parent
parent = ""
# 1st check for Parents whose bounds include this location (faster)
def in_bbox(row):
return (row.lon_min < lon_min) & \
(row.lon_max > lon_max) & \
(row.lat_min < lat_min) & \
(row.lat_max > lat_max)
for row in all_parents.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
# @ToDo provide option to use PostGIS/Spatialite
try:
parent_shape = wkt_loads(row.wkt)
if parent_shape.intersects(shape):
parent = row.id
# Should be just a single parent
break
except ReadingError:
s3_debug("Error reading wkt of location with id", row.id)
# Add entry to database
new_id = table.insert(name=name,
level=level,
parent=parent,
lat=lat,
lon=lon,
wkt=wkt,
lon_min=lon_min,
lon_max=lon_max,
lat_min=lat_min,
lat_max=lat_max)
ttable.insert(location_id=new_id,
tag="geonames",
value=geonames_id)
else:
continue
s3_debug("All done!")
return
# -------------------------------------------------------------------------
@staticmethod
def latlon_to_wkt(lat, lon):
"""
Convert a LatLon to a WKT string
>>> s3gis.latlon_to_wkt(6, 80)
'POINT(80 6)'
"""
WKT = "POINT(%f %f)" % (lon, lat)
return WKT
# -------------------------------------------------------------------------
@staticmethod
def parse_location(wkt, lon=None, lat=None):
"""
Parses a location from wkt, returning wkt, lat, lon, bounding box and type.
For points, wkt may be None if lat and lon are provided; wkt will be generated.
For lines and polygons, the lat, lon returned represent the shape's centroid.
Centroid and bounding box will be None if Shapely is not available.
"""
if not wkt:
if not lon is not None and lat is not None:
raise RuntimeError, "Need wkt or lon+lat to parse a location"
wkt = "POINT(%f %f)" % (lon, lat)
geom_type = GEOM_TYPES["point"]
bbox = (lon, lat, lon, lat)
else:
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
if SHAPELY:
shape = wkt_loads(wkt)
centroid = shape.centroid
lat = centroid.y
lon = centroid.x
geom_type = GEOM_TYPES[shape.type.lower()]
bbox = shape.bounds
else:
lat = None
lon = None
geom_type = GEOM_TYPES[wkt.split("(")[0].lower()]
bbox = None
res = {"wkt": wkt, "lat": lat, "lon": lon, "gis_feature_type": geom_type}
if bbox:
res["lon_min"], res["lat_min"], res["lon_max"], res["lat_max"] = bbox
return res
# -------------------------------------------------------------------------
@staticmethod
def update_location_tree(feature=None):
"""
Update GIS Locations' Materialized path, Lx locations, Lat/Lon & the_geom
@param feature: a feature dict to update the tree for
- if not provided then update the whole tree
returns the path of the feature
Called onaccept for locations (async, where-possible)
"""
db = current.db
try:
table = db.gis_location
except:
table = current.s3db.gis_location
spatial = current.deployment_settings.get_gis_spatialdb()
wkt_centroid = GIS.wkt_centroid
def bounds_centroid_wkt(feature):
form = Storage()
form.vars = feature
form.errors = Storage()
wkt_centroid(form)
vars = form.vars
if "lat_max" in vars:
wkt = vars.wkt
_vars = dict(gis_feature_type = vars.gis_feature_type,
lat = vars.lat,
lon = vars.lon,
wkt = wkt,
lat_max = vars.lat_max,
lat_min = vars.lat_min,
lon_min = vars.lon_min,
lon_max = vars.lon_max)
if wkt:
if not wkt.startswith("POI"):
# Polygons aren't inherited
_vars.update(inherited = False)
if spatial:
_vars.update(the_geom = wkt)
db(table.id == feature.id).update(**_vars)
if not feature:
# Do the whole database
# Do in chunks to save memory and also do in correct order
fields = [table.id, table.name, table.gis_feature_type,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.wkt, table.inherited,
# Handle Countries which start with Bounds set, yet are Points
table.lat_min, table.lon_min, table.lat_max, table.lon_max,
table.path, table.parent]
update_location_tree = GIS.update_location_tree
for level in ["L0", "L1", "L2", "L3", "L4", "L5", None]:
features = db(table.level == level).select(*fields)
for feature in features:
feature["level"] = level
wkt = feature["wkt"]
if wkt and not wkt.startswith("POI"):
# Polygons aren't inherited
feature["inherited"] = False
update_location_tree(feature)
# Also do the Bounds/Centroid/WKT
bounds_centroid_wkt(feature)
return
# Single Feature
id = str(feature["id"]) if "id" in feature else None
if not id:
# Nothing we can do
raise ValueError
# L0
name = feature.get("name", False)
level = feature.get("level", False)
path = feature.get("path", False)
L0 = feature.get("L0", False)
if level == "L0":
if name:
if path == id and L0 == name:
# No action required
return path
else:
db(table.id == id).update(L0=name,
path=id)
else:
# Look this up
feature = db(table.id == id).select(table.name,
table.path,
table.L0,
limitby=(0, 1)).first()
if feature:
name = feature["name"]
path = feature["path"]
L0 = feature["L0"]
if path == id and L0 == name:
# No action required
return path
else:
db(table.id == id).update(L0=name,
path=id)
return id
# L1
parent = feature.get("parent", False)
L1 = feature.get("L1", False)
lat = feature.get("lat", False)
lon = feature.get("lon", False)
inherited = feature.get("inherited", None)
if level == "L1":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
if parent:
_path = "%s/%s" % (parent, id)
_L0 = db(table.id == parent).select(table.name,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = _L0.name
L0_lat = _L0.lat
L0_lon = _L0.lon
else:
_path = id
L0_name = None
L0_lat = None
L0_lon = None
if path == _path and L1 == name and L0 == L0_name:
if inherited and lat == L0_lat and lon == L0_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
vars = dict(inherited=True,
lat=L0_lat,
lon=L0_lon,
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
elif inherited and lat == L0_lat and lon == L0_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=name)
return _path
elif inherited or lat is None or lon is None:
vars = dict(path=_path,
L0=L0_name,
L1=name,
inherited=True,
lat=L0_lat,
lon=L0_lon)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
else:
db(table.id == id).update(path=_path,
inherited=False,
L0=L0_name,
L1=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) & \
(table.inherited == True)
fields = [table.id, table.name, table.level, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
update_location_tree = GIS.update_location_tree
for row in rows:
update_location_tree(row)
return _path
# L2
L2 = feature.get("L2", False)
if level == "L2":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
if parent:
Lx = db(table.id == parent).select(table.name,
table.level,
table.parent,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L1":
L1_name = Lx.name
_parent = Lx.parent
if _parent:
_path = "%s/%s/%s" % (_parent, parent, id)
L0_name = db(table.id == _parent).select(table.name,
limitby=(0, 1),
cache=current.s3db.cache).first().name
else:
_path = "%s/%s" % (parent, id)
L0_name = None
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
else:
s3_debug("Parent of L2 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L2 == name and L0 == L0_name and \
L1 == L1_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
vars = dict(inherited=True,
lat=Lx_lat,
lon=Lx_lon,
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=name,
)
return _path
elif inherited or lat is None or lon is None:
vars = dict(path=_path,
L0=L0_name,
L1=L1_name,
L2=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
else:
db(table.id == id).update(path=_path,
inherited=False,
L0=L0_name,
L1=L1_name,
L2=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) & \
(table.inherited == True)
fields = [table.id, table.name, table.level, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
update_location_tree = GIS.update_location_tree
for row in rows:
update_location_tree(row)
return _path
# L3
L3 = feature.get("L3", False)
if level == "L3":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.path,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
else:
s3_debug("Parent of L3 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L3 == name and L0 == L0_name and \
L1 == L1_name and L2 == L2_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
vars = dict(inherited=True,
lat=Lx_lat,
lon=Lx_lon,
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
bounds_centroid_wkt(feature)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=name,
)
return _path
elif inherited or lat is None or lon is None:
vars = dict(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
else:
db(table.id == id).update(path=_path,
inherited=False,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) & \
(table.inherited == True)
fields = [table.id, table.name, table.level, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
update_location_tree = GIS.update_location_tree
for row in rows:
update_location_tree(row)
return _path
# L4
L4 = feature.get("L4", False)
if level == "L4":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False or \
L4 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.L2,
table.path,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
else:
s3_debug("Parent of L4 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L4 == name and L0 == L0_name and \
L1 == L1_name and L2 == L2_name and \
L3 == L3_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
vars = dict(inherited=True,
lat=Lx_lat,
lon=Lx_lon,
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=name,
)
return _path
elif inherited or lat is None or lon is None:
vars = dict(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
else:
db(table.id == id).update(path=_path,
inherited=False,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) & \
(table.inherited == True)
fields = [table.id, table.name, table.level, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
update_location_tree = GIS.update_location_tree
for row in rows:
update_location_tree(row)
return _path
# L5
L5 = feature.get("L5", False)
if level == "L5":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False or \
L4 is False or L5 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
L5 = feature.L5
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.L2,
table.L3,
table.path,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)
).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
L4_name = None
else:
s3_debug("Parent of L5 Location ID %s has invalid level: %s is %s" % \
(id, parent, Lx.level))
#raise ValueError
return "%s/%s" % (parent, id)
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
L4_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L5 == name and L0 == L0_name and \
L1 == L1_name and L2 == L2_name and \
L3 == L3_name and L4 == L4_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
vars = dict(inherited=True,
lat=Lx_lat,
lon=Lx_lon,
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
L5=name,
)
return _path
elif inherited or lat is None or lon is None:
vars = dict(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
L5=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
else:
db(table.id == id).update(path=_path,
inherited=False,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
L5=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) & \
(table.inherited == True)
fields = [table.id, table.name, table.level, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4, table.L5,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
update_location_tree = GIS.update_location_tree
for row in rows:
update_location_tree(row)
return _path
# Specific Location
# - or unspecified (which we should avoid happening)
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False or \
L4 is False or L5 is False:
# Get the whole feature
feature = db(table.id == id).select(table.id,
table.name,
table.level,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
L5 = feature.L5
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.path,
table.lat,
table.lon,
limitby=(0, 1)).first()
if Lx.level == "L5":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.L4
L5_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name and L4_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.L4
elif Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
L5_name = None
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
L4_name = None
L5_name = None
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
L4_name = None
L5_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
L4_name = None
L5_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = GIS.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1)).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
L4_name = None
L5_name = None
else:
#raise ValueError
return id
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
if feature.level == "L0":
L0_name = name
else:
L0_name = None
L1_name = None
L2_name = None
L3_name = None
L4_name = None
L5_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L0 == L0_name and \
L1 == L1_name and L2 == L2_name and \
L3 == L3_name and L4 == L4_name and \
L5 == L5_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
vars = dict(inherited=True,
lat=Lx_lat,
lon=Lx_lon,
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
L5=L5_name,
)
elif inherited or lat is None or lon is None:
vars = dict(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
L5=L5_name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon
)
db(table.id == id).update(**vars)
# Also do the Bounds/Centroid/WKT
vars.update(id=id,
gis_feature_type="1",
)
feature.update(**vars)
bounds_centroid_wkt(feature)
else:
db(table.id == id).update(path=_path,
inherited=False,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
L5=L5_name)
return _path
# -------------------------------------------------------------------------
@staticmethod
def wkt_centroid(form):
"""
OnValidation callback:
If a WKT is defined: validate the format,
calculate the LonLat of the Centroid, and set bounds
Else if a LonLat is defined: calculate the WKT for the Point.
Uses Shapely.
@ToDo: provide an option to use PostGIS/Spatialite
"""
messages = current.messages
vars = form.vars
if vars.get("gis_feature_type", None) == "1":
# Point
if (vars.lon is None and vars.lat is None) or \
(vars.lon == "" and vars.lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
elif vars.lat is None or vars.lat == "":
# Can't just have lon without lat
form.errors["lat"] = messages.lat_empty
elif vars.lon is None or vars.lon == "":
form.errors["lon"] = messages.lon_empty
else:
vars.wkt = "POINT(%(lon)s %(lat)s)" % vars
if "lon_min" not in vars or vars.lon_min is None:
vars.lon_min = vars.lon
if "lon_max" not in vars or vars.lon_max is None:
vars.lon_max = vars.lon
if "lat_min" not in vars or vars.lat_min is None:
vars.lat_min = vars.lat
if "lat_max" not in vars or vars.lat_max is None:
vars.lat_max = vars.lat
elif vars.get("wkt", None):
# Parse WKT for LineString, Polygon, etc
from shapely.wkt import loads as wkt_loads
try:
shape = wkt_loads(vars.wkt)
except:
try:
# Perhaps this is really a LINESTRING (e.g. OSM import of an unclosed Way)
linestring = "LINESTRING%s" % vars.wkt[8:-1]
shape = wkt_loads(linestring)
vars.wkt = linestring
except:
form.errors["wkt"] = messages.invalid_wkt
return
gis_feature_type = shape.type
if gis_feature_type == "Point":
vars.gis_feature_type = 1
elif gis_feature_type == "LineString":
vars.gis_feature_type = 2
elif gis_feature_type == "Polygon":
vars.gis_feature_type = 3
elif gis_feature_type == "MultiPoint":
vars.gis_feature_type = 4
elif gis_feature_type == "MultiLineString":
vars.gis_feature_type = 5
elif gis_feature_type == "MultiPolygon":
vars.gis_feature_type = 6
elif gis_feature_type == "GeometryCollection":
vars.gis_feature_type = 7
try:
centroid_point = shape.centroid
vars.lon = centroid_point.x
vars.lat = centroid_point.y
bounds = shape.bounds
if gis_feature_type != "Point" or \
"lon_min" not in vars or vars.lon_min is None or \
vars.lon_min == vars.lon_max:
# Update bounds unless we have a 'Point' which has already got wider Bounds specified (such as a country)
vars.lon_min = bounds[0]
vars.lat_min = bounds[1]
vars.lon_max = bounds[2]
vars.lat_max = bounds[3]
except:
form.errors.gis_feature_type = messages.centroid_error
elif (vars.lon is None and vars.lat is None) or \
(vars.lon == "" and vars.lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
else:
# Point
vars.gis_feature_type = "1"
if vars.lat is None or vars.lat == "":
form.errors["lat"] = messages.lat_empty
elif vars.lon is None or vars.lon == "":
form.errors["lon"] = messages.lon_empty
else:
vars.wkt = "POINT(%(lon)s %(lat)s)" % vars
if "lon_min" not in vars or vars.lon_min is None:
vars.lon_min = vars.lon
if "lon_max" not in vars or vars.lon_max is None:
vars.lon_max = vars.lon
if "lat_min" not in vars or vars.lat_min is None:
vars.lat_min = vars.lat
if "lat_max" not in vars or vars.lat_max is None:
vars.lat_max = vars.lat
if current.deployment_settings.get_gis_spatialdb():
# Also populate the spatial field
vars.the_geom = vars.wkt
return
# -------------------------------------------------------------------------
@staticmethod
def query_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns a query of all Locations inside the given bounding box
"""
table = current.s3db.gis_location
query = (table.lat_min <= lat_max) & \
(table.lat_max >= lat_min) & \
(table.lon_min <= lon_max) & \
(table.lon_max >= lon_min)
return query
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns Rows of Locations whose shape intersects the given bbox.
"""
query = current.gis.query_features_by_bbox(lon_min,
lat_min,
lon_max,
lat_max)
return current.db(query).select()
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_shape(shape):
"""
Returns Rows of locations which intersect the given shape.
Relies on Shapely for wkt parsing and intersection.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
table = current.s3db.gis_location
in_bbox = current.gis.query_features_by_bbox(*shape.bounds)
has_wkt = (table.wkt != None) & (table.wkt != "")
for loc in current.db(in_bbox & has_wkt).select():
try:
location_shape = wkt_loads(loc.wkt)
if location_shape.intersects(shape):
yield loc
except ReadingError:
s3_debug("Error reading wkt of location with id", loc.id)
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_latlon(lat, lon):
"""
Returns a generator of locations whose shape intersects the given LatLon.
Relies on Shapely.
@todo: provide an option to use PostGIS/Spatialite
"""
from shapely.geometry import point
return current.gis.get_features_by_shape(point.Point(lon, lat))
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_feature(feature):
"""
Returns all Locations whose geometry intersects the given feature.
Relies on Shapely.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.wkt import loads as wkt_loads
shape = wkt_loads(feature.wkt)
return current.gis.get_features_by_shape(shape)
# -------------------------------------------------------------------------
@staticmethod
def set_all_bounds():
"""
Sets bounds for all locations without them.
If shapely is present, and a location has wkt, bounds of the geometry
are used. Otherwise, the (lat, lon) are used as bounds.
"""
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
db = current.db
table = current.s3db.gis_location
# Query to find all locations without bounds set
no_bounds = (table.lon_min == None) & \
(table.lat_min == None) & \
(table.lon_max == None) & \
(table.lat_max == None) & \
(table.lat != None) & \
(table.lon != None)
if SHAPELY:
# Refine to those locations with a WKT field
wkt_no_bounds = no_bounds & (table.wkt != None) & (table.wkt != "")
for location in db(wkt_no_bounds).select(table.wkt):
try :
shape = wkt_loads(location.wkt)
except:
s3_debug("Error reading WKT", location.wkt)
continue
bounds = shape.bounds
table[location.id] = dict(lon_min = bounds[0],
lat_min = bounds[1],
lon_max = bounds[2],
lat_max = bounds[3],
)
# Anything left, we assume is a Point, so set the bounds to be the same
db(no_bounds).update(lon_min=table.lon,
lat_min=table.lat,
lon_max=table.lon,
lat_max=table.lat)
# -------------------------------------------------------------------------
@staticmethod
def simplify(wkt,
tolerance=None,
preserve_topology=True,
output="wkt",
decimals=4
):
"""
Simplify a complex Polygon using the Douglas-Peucker algorithm
- NB This uses Python, better performance will be gained by doing
this direct from the database if you are using PostGIS:
ST_Simplify() is available as
db(query).select(table.the_geom.st_simplify(tolerance).st_astext().with_alias('wkt')).first().wkt
db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson().with_alias('geojson')).first().geojson
@param wkt: the WKT string to be simplified (usually coming from a gis_location record)
@param tolerance: how aggressive a simplification to perform
@param preserve_topology: whether the simplified geometry should be maintained
@param output: whether to output as WKT or GeoJSON format
@param decimals: the number of decimal places to include in the output
"""
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
try:
shape = wkt_loads(wkt)
except:
wkt = wkt[10] if wkt else wkt
s3_debug("Invalid Shape: %s" % wkt)
return None
if not tolerance:
tolerance = current.deployment_settings.get_gis_simplify_tolerance()
if tolerance:
shape = shape.simplify(tolerance, preserve_topology)
# Limit the number of decimal places
formatter = ".%sf" % decimals
def shrink_polygon(shape):
""" Helper Function """
points = shape.exterior.coords
coords = []
cappend = coords.append
for point in points:
x = float(format(point[0], formatter))
y = float(format(point[1], formatter))
cappend((x, y))
return Polygon(LineString(coords))
geom_type = shape.geom_type
if geom_type == "MultiPolygon":
polygons = shape.geoms
p = []
pappend = p.append
for polygon in polygons:
pappend(shrink_polygon(polygon))
shape = MultiPolygon([s for s in p])
elif geom_type == "Polygon":
shape = shrink_polygon(shape)
elif geom_type == "LineString":
points = shape.coords
coords = []
cappend = coords.append
for point in points:
x = float(format(point[0], formatter))
y = float(format(point[1], formatter))
cappend((x, y))
shape = LineString(coords)
elif geom_type == "Point":
x = float(format(shape.x, formatter))
y = float(format(shape.y, formatter))
shape = Point(x, y)
else:
s3_debug("Cannot yet shrink Geometry: %s" % geom_type)
# Output
if output == "wkt":
output = shape.to_wkt()
elif output == "geojson":
from ..geojson import dumps
# Compact Encoding
output = dumps(shape, separators=SEPARATORS)
return output
# -------------------------------------------------------------------------
def show_map(self,
id = "default_map",
height = None,
width = None,
bbox = {},
lat = None,
lon = None,
zoom = None,
projection = None,
add_feature = False,
add_feature_active = False,
add_line = False,
add_line_active = False,
add_polygon = False,
add_polygon_active = False,
features = None,
feature_queries = None,
feature_resources = None,
wms_browser = {},
catalogue_layers = False,
legend = False,
toolbar = False,
nav = None,
area = False,
save = False,
search = False,
mouse_position = None,
overview = None,
permalink = None,
scaleline = None,
zoomcontrol = None,
zoomWheelEnabled = True,
print_tool = {},
mgrs = {},
window = False,
window_hide = False,
closable = True,
maximizable = True,
collapsed = False,
callback = "DEFAULT",
plugins = None,
):
"""
Returns the HTML to display a map
Normally called in the controller as: map = gis.show_map()
In the view, put: {{=XML(map)}}
@param id: ID to uniquely identify this map if there are several on a page
@param height: Height of viewport (if not provided then the default deployment setting is used)
@param width: Width of viewport (if not provided then the default deployment setting is used)
@param bbox: default Bounding Box of viewport (if not provided then the Lat/Lon/Zoom are used) (Dict):
{"lon_min" : float,
"lat_min" : float,
"lon_max" : float,
"lat_max" : float,
}
@param lat: default Latitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param lon: default Longitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param zoom: default Zoom level of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param projection: EPSG code for the Projection to use (if not provided then the default setting from the Map Service Catalogue is used)
@param add_feature: Whether to include a DrawFeature control to allow adding a marker to the map
@param add_feature_active: Whether the DrawFeature control should be active by default
@param add_polygon: Whether to include a DrawFeature control to allow drawing a polygon over the map
@param add_polygon_active: Whether the DrawFeature control should be active by default
@param features: Simple Features to overlay on Map (no control over appearance & not interactive)
[wkt]
@param feature_queries: Feature Queries to overlay onto the map & their options (List of Dicts):
[{"name" : T("MyLabel"), # A string: the label for the layer
"query" : query, # A gluon.sql.Rows of gis_locations, which can be from a simple query or a Join.
# Extra fields can be added for 'popup_url', 'popup_label' & either
# 'marker' (url/height/width) or 'shape' (with optional 'colour' & 'size')
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
"marker" : None, # Optional: A per-Layer marker query or marker_id for the icon used to display the feature
"opacity" : 1, # Optional
"cluster_attribute", # Optional
"cluster_distance", # Optional
"cluster_threshold" # Optional
}]
@param feature_resources: REST URLs for (filtered) resources to overlay onto the map & their options (List of Dicts):
[{"name" : T("MyLabel"), # A string: the label for the layer
"id" : "search", # A string: the id for the layer (for manipulation by JavaScript)
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
EITHER:
"layer_id" : 1, # An integer: the layer_id to load (optional alternative to specifying URL/tablename/marker)
"filter" : "filter", # A string: an optional URL filter which *replaces* any in the layer
OR:
"tablename" : "module_resource", # A string: the tablename (used to determine whether to locate via location_id or site_id)
"url" : "/eden/module/resource.geojson?filter", # A URL to load the resource
"marker" : None, # Optional: A per-Layer marker dict for the icon used to display the feature (overrides layer_id if-set)
"opacity" : 1, # Optional (overrides layer_id if-set)
"cluster_attribute", # Optional (overrides layer_id if-set)
"cluster_distance", # Optional (overrides layer_id if-set)
"cluster_threshold", # Optional (overrides layer_id if-set)
"dir", # Optional (overrides layer_id if-set)
"style", # Optional (overrides layer_id if-set)
}]
@param wms_browser: WMS Server's GetCapabilities & options (dict)
{"name": T("MyLabel"), # Name for the Folder in LayerTree
"url": string # URL of GetCapabilities
}
@param catalogue_layers: Show all the enabled Layers from the GIS Catalogue
Defaults to False: Just show the default Base layer
@param legend: True: Show the GeoExt Legend panel, False: No Panel, "floating": New floating Legend Panel
@param toolbar: Show the Icon Toolbar of Controls
@param nav: Show the Navigation controls on the Toolbar
@param area: Show the Area tool on the Toolbar
@param save: Show the Save tool on the Toolbar
@param search: Show the Geonames search box
@param mouse_position: Show the current coordinates in the bottom-right of the map. 3 Options: 'normal', 'mgrs', False (defaults to checking deployment_settings, which defaults to 'normal')
@param overview: Show the Overview Map (defaults to checking deployment_settings, which defaults to True)
@param permalink: Show the Permalink control (defaults to checking deployment_settings, which defaults to True)
@param scaleline: Show the ScaleLine control (defaults to checking deployment_settings, which defaults to True)
@param zoomcontrol: Show the Zoom control (defaults to checking deployment_settings, which defaults to True)
@param print_tool: Show a print utility (NB This requires server-side support: http://eden.sahanafoundation.org/wiki/BluePrintGISPrinting)
{"url": string, # URL of print service (e.g. http://localhost:8080/geoserver/pdf/)
"mapTitle": string, # Title for the Printed Map (optional)
"subTitle": string # subTitle for the Printed Map (optional)
}
@param mgrs: Use the MGRS Control to select PDFs
{"name": string, # Name for the Control
"url": string # URL of PDF server
}
@ToDo: Also add MGRS Search support: http://gxp.opengeo.org/master/examples/mgrs.html
@param window: Have viewport pop out of page into a resizable window
@param window_hide: Have the window hidden by default, ready to appear (e.g. on clicking a button)
@param closable: In Window mode, whether the window is closable or not
@param collapsed: Start the Tools panel (West region) collapsed
@param callback: Code to run once the Map JavaScript has loaded
@param plugins: an iterable of objects which support the following methods:
.extend_gis_map(map)
Client-side portion suppoprts the following methods:
.addToMapWindow(items)
.setup(map)
"""
return MAP(id = id,
height = height,
width = width,
bbox = bbox,
lat = lat,
lon = lon,
zoom = zoom,
projection = projection,
add_feature = add_feature,
add_feature_active = add_feature_active,
add_line = add_line,
add_line_active = add_line_active,
add_polygon = add_polygon,
add_polygon_active = add_polygon_active,
features = features,
feature_queries = feature_queries,
feature_resources = feature_resources,
wms_browser = wms_browser,
catalogue_layers = catalogue_layers,
legend = legend,
toolbar = toolbar,
nav = nav,
area = area,
save = save,
search = search,
mouse_position = mouse_position,
overview = overview,
permalink = permalink,
scaleline = scaleline,
zoomcontrol = zoomcontrol,
zoomWheelEnabled = zoomWheelEnabled,
print_tool = print_tool,
mgrs = mgrs,
window = window,
window_hide = window_hide,
closable = closable,
maximizable = maximizable,
collapsed = collapsed,
callback = callback,
plugins = plugins,
)
# =============================================================================
class MAP(DIV):
"""
HTML Helper to render a Map
- allows the Map to be generated only when being rendered
- used by gis.show_map()
"""
def __init__(self, **opts):
"""
:param **opts: options to pass to the Map for server-side processing
"""
# We haven't yet run _setup()
self.setup = False
self.callback = None
# Options for server-side processing
self.opts = opts
self.id = opts.get("id", "default_map")
# Options for client-side processing
self.options = {}
# Components
components = []
# Map (Embedded not Window)
# Needs to be an ID which means we can't have multiple per page :/
# - Alternatives are also fragile. See s3.gis.js
components.append(DIV(DIV(_class="map_loader"),
_id="map_panel"))
self.components = components
for c in components:
self._setnode(c)
# Other DIV settings
self.attributes = {"_class": "map_wrapper",
"_id": self.id,
}
self.parent = None
# -------------------------------------------------------------------------
def _setup(self):
"""
Setup the Map
- not done during init() to hve as Lazy as possible
- separated from xml() in order to be able to read options to put
into scripts (callback or otherwise)
"""
opts = self.opts
request = current.request
response = current.response
if not response.warning:
response.warning = ""
s3 = response.s3
T = current.T
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
MAP_ADMIN = auth.s3_has_role(current.session.s3.system_roles.MAP_ADMIN)
# Default configuration
config = GIS.get_config()
# Support bookmarks (such as from the control)
# - these over-ride the arguments
vars = request.get_vars
# JS Globals
globals = {}
# Map Options for client-side processing
options = {}
# Strings used by all Maps
i18n = {"gis_base_layers": T("Base Layers"),
"gis_overlays": T(settings.get_gis_label_overlays()),
"gis_layers": T(settings.get_gis_layers_label()),
"gis_draft_layer": T("Draft Features"),
"gis_cluster_multiple": T("There are multiple records at this location"),
"gis_loading": T("Loading"),
"gis_requires_login": T("Requires Login"),
"gis_too_many_features": T("There are too many features, please Zoom In"),
"gis_zoomin": T("Zoom In"),
}
############
# Viewport
############
height = opts.get("height", None)
if height:
map_height = height
else:
map_height = settings.get_gis_map_height()
options["map_height"] = map_height
width = opts.get("width", None)
if width:
map_width = width
else:
map_width = settings.get_gis_map_width()
options["map_width"] = map_width
# Bounding Box or Center/Zoom
bbox = opts.get("bbox", None)
if (bbox
and (-90 <= bbox["lat_max"] <= 90)
and (-90 <= bbox["lat_min"] <= 90)
and (-180 <= bbox["lon_max"] <= 180)
and (-180 <= bbox["lon_min"] <= 180)
):
# We have sane Bounds provided, so we should use them
pass
else:
# No bounds or we've been passed bounds which aren't sane
bbox = None
# Use Lat/Lon/Zoom to center instead
if "lat" in vars and vars.lat:
lat = float(vars.lat)
else:
lat = opts.get("lat", None)
if lat is None or lat == "":
lat = config.lat
if "lon" in vars and vars.lon:
lon = float(vars.lon)
else:
lon = opts.get("lon", None)
if lon is None or lon == "":
lon = config.lon
if bbox:
# Calculate from Bounds
options["bbox"] = [bbox["lon_min"], # left
bbox["lat_min"], # bottom
bbox["lon_max"], # right
bbox["lat_max"], # top
]
else:
options["lat"] = lat
options["lon"] = lon
if "zoom" in vars:
zoom = int(vars.zoom)
else:
zoom = opts.get("zoom", None)
if not zoom:
zoom = config.zoom
options["zoom"] = zoom or 1
options["numZoomLevels"] = config.zoom_levels
############
# Projection
############
projection = opts.get("projection", None)
if not projection:
projection = config.epsg
options["projection"] = projection
if projection not in (900913, 4326):
# Test for Valid Projection file in Proj4JS library
projpath = os.path.join(
request.folder, "static", "scripts", "gis", "proj4js", \
"lib", "defs", "EPSG%s.js" % projection
)
try:
f = open(projpath, "r")
f.close()
except:
if projection:
proj4js = config.proj4js
if proj4js:
# Create it
try:
f = open(projpath, "w")
except IOError, e:
response.error = \
T("Map not available: Cannot write projection file - %s") % e
else:
f.write('''Proj4js.defs["EPSG:4326"]="%s"''' % proj4js)
f.close()
else:
response.warning = \
T("Map not available: Projection %(projection)s not supported - please add definition to %(path)s") % \
dict(projection = "'%s'" % projection,
path= "/static/scripts/gis/proj4js/lib/defs")
else:
response.error = \
T("Map not available: No Projection configured")
return None
options["maxExtent"] = config.maxExtent
options["units"] = config.units
########
# Marker
########
if config.marker_image:
options["marker_default"] = dict(i = config.marker_image,
h = config.marker_height,
w = config.marker_width,
)
# @ToDo: show_map() opts with fallback to settings
# Keep these in sync with scaleImage() in s3.gis.js
marker_max_height = settings.get_gis_marker_max_height()
if marker_max_height != 35:
options["max_h"] = marker_max_height
marker_max_width = settings.get_gis_marker_max_width()
if marker_max_width != 30:
options["max_w"] = marker_max_width
########
# Layout
########
if not opts.get("closable", False):
options["windowNotClosable"] = True
if opts.get("window", False):
options["window"] = True
if opts.get("window_hide", False):
options["windowHide"] = True
if opts.get("maximizable", False):
options["maximizable"] = True
else:
options["maximizable"] = False
# Collapsed
if opts.get("collapsed", False):
options["west_collapsed"] = True
# LayerTree
if not settings.get_gis_layer_tree_base():
options["hide_base"] = True
if not settings.get_gis_layer_tree_overlays():
options["hide_overlays"] = True
if not settings.get_gis_layer_tree_expanded():
options["folders_closed"] = True
if settings.get_gis_layer_tree_radio():
options["folders_radio"] = True
#######
# Tools
#######
# Toolbar
if opts.get("toolbar", False):
options["toolbar"] = True
i18n["gis_length_message"] = T("The length is")
i18n["gis_length_tooltip"] = T("Measure Length: Click the points along the path & end with a double-click")
i18n["gis_zoomfull"] = T("Zoom to maximum map extent")
i18n["gis_zoominbutton"] = T("Zoom In: click in the map or use the left mouse button and drag to create a rectangle")
i18n["gis_zoomout"] = T("Zoom Out: click in the map or use the left mouse button and drag to create a rectangle")
i18n["gis_geoLocate"] = T("Zoom to Current Location")
# Search
if opts.get("search", False):
# Presence of label adds support JS in Loader and turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_search"] = T("Search location in Geonames")
#i18n["gis_search_no_internet"] = T("Geonames.org search requires Internet connectivity!")
# Show NAV controls?
# e.g. removed within S3LocationSelectorWidget[2]
nav = opts.get("nav", None)
if nav is None:
nav = settings.get_gis_nav_controls()
if nav:
i18n["gis_pan"] = T("Pan Map: keep the left mouse button pressed and drag the map")
i18n["gis_navPrevious"] = T("Previous View")
i18n["gis_navNext"] = T("Next View")
else:
options["nav"] = False
# Show Area control?
if opts.get("area", False):
options["area"] = True
i18n["gis_area_message"] = T("The area is")
i18n["gis_area_tooltip"] = T("Measure Area: Click the points around the polygon & end with a double-click")
# Show Save control?
# e.g. removed within S3LocationSelectorWidget[2]
if opts.get("save", True) and auth.is_logged_in():
options["save"] = True
i18n["gis_save"] = T("Save: Default Lat, Lon & Zoom for the Viewport")
if MAP_ADMIN or (config.pe_id == auth.user.pe_id):
# Personal config or MapAdmin, so Save Button does Updates
options["config_id"] = config.id
# OSM Authoring
pe_id = auth.user.pe_id if auth.s3_logged_in() else None
if pe_id and s3db.auth_user_options_get_osm(pe_id):
# Presence of label turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_potlatch"] = T("Edit the OpenStreetMap data for this area")
i18n["gis_osm_zoom_closer"] = T("Zoom in closer to Edit OpenStreetMap layer")
# MGRS PDF Browser
mgrs = opts.get("mgrs", None)
if mgrs:
options["mgrs_name"] = mgrs["name"]
options["mgrs_url"] = mgrs["url"]
else:
# No Toolbar
# Show Save control?
# e.g. removed within S3LocationSelectorWidget[2]
if opts.get("save", True) and auth.is_logged_in():
db = current.db
permit = auth.s3_has_permission
ctable = db.gis_config
if permit("create", ctable):
options["save"] = True
i18n["gis_save_map"] = T("Save Map")
i18n["gis_new_map"] = T("Save as New Map?")
i18n["gis_name_map"] = T("Name of Map")
i18n["save"] = T("Save")
i18n["saved"] = T("Saved")
config_id = config.id
_config = db(ctable.id == config_id).select(ctable.uuid,
ctable.name,
limitby=(0, 1),
).first()
if MAP_ADMIN:
i18n["gis_my_maps"] = T("Saved Maps")
else:
options["pe_id"] = auth.user.pe_id
i18n["gis_my_maps"] = T("My Maps")
if permit("update", ctable, record_id=config_id):
options["config_id"] = config_id
options["config_name"] = _config.name
elif _config.uuid != "SITE_DEFAULT":
options["config_name"] = _config.name
# Legend panel
legend = opts.get("legend", False)
if legend:
i18n["gis_legend"] = T("Legend")
if legend == "float":
options["legend"] = "float"
if settings.get_gis_layer_metadata():
options["metadata"] = True
# MAP_ADMIN better for simpler deployments
#if auth.s3_has_permission("create", "cms_post_layer"):
if MAP_ADMIN:
i18n["gis_metadata_create"] = T("Create 'More Info'")
i18n["gis_metadata_edit"] = T("Edit 'More Info'")
else:
i18n["gis_metadata"] = T("More Info")
else:
options["legend"] = True
# Draw Feature Controls
if opts.get("add_feature", False):
i18n["gis_draw_feature"] = T("Add Point")
if opts.get("add_feature_active", False):
options["draw_feature"] = "active"
else:
options["draw_feature"] = "inactive"
if opts.get("add_line", False):
i18n["gis_draw_line"] = T("Add Line")
if opts.get("add_line_active", False):
options["draw_line"] = "active"
else:
options["draw_line"] = "inactive"
if opts.get("add_polygon", False):
i18n["gis_draw_polygon"] = T("Add Polygon")
if opts.get("add_polygon_active", False):
options["draw_polygon"] = "active"
else:
options["draw_polygon"] = "inactive"
# Layer Properties
if settings.get_gis_layer_properties():
# Presence of label turns feature on in s3.gis.js
i18n["gis_properties"] = T("Layer Properties")
# Upload Layer
if settings.get_gis_geoserver_password():
# Presence of label adds support JS in Loader and turns feature on in s3.gis.js
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_uploadlayer"] = T("Upload Shapefile")
# WMS Browser
wms_browser = opts.get("wms_browser", None)
if wms_browser:
options["wms_browser_name"] = wms_browser["name"]
# urlencode the URL
options["wms_browser_url"] = urllib.quote(wms_browser["url"])
# Mouse Position
# 'normal', 'mgrs' or 'off'
mouse_position = opts.get("mouse_position", None)
if mouse_position is None:
mouse_position = settings.get_gis_mouse_position()
if mouse_position == "mgrs":
options["mouse_position"] = "mgrs"
# Tell loader to load support scripts
globals["mgrs"] = True
elif mouse_position:
options["mouse_position"] = True
# Overview Map
overview = opts.get("overview", None)
if overview is None:
overview = settings.get_gis_overview()
if not overview:
options["overview"] = False
# Permalink
permalink = opts.get("permalink", None)
if permalink is None:
permalink = settings.get_gis_permalink()
if not permalink:
options["permalink"] = False
# ScaleLine
scaleline = opts.get("scaleline", None)
if scaleline is None:
scaleline = settings.get_gis_scaleline()
if not scaleline:
options["scaleline"] = False
# Zoom control
zoomcontrol = opts.get("zoomcontrol", None)
if zoomcontrol is None:
zoomcontrol = settings.get_gis_zoomcontrol()
if not zoomcontrol:
options["zoomcontrol"] = False
zoomWheelEnabled = opts.get("zoomWheelEnabled", True)
if not zoomWheelEnabled:
options["no_zoom_wheel"] = True
########
# Layers
########
# Duplicate Features to go across the dateline?
# @ToDo: Action this again (e.g. for DRRPP)
if settings.get_gis_duplicate_features():
options["duplicate_features"] = True
# Features
features = opts.get("features", None)
if features:
options["features"] = addFeatures(features)
# Feature Queries
feature_queries = opts.get("feature_queries", None)
if feature_queries:
options["feature_queries"] = addFeatureQueries(feature_queries)
# Feature Resources
feature_resources = opts.get("feature_resources", None)
if feature_resources:
options["feature_resources"] = addFeatureResources(feature_resources)
if opts.get("catalogue_layers", False):
# Add all Layers from the Catalogue
layer_types = [LayerArcREST,
LayerBing,
LayerEmpty,
LayerGoogle,
LayerOSM,
LayerTMS,
LayerWMS,
LayerXYZ,
LayerJS,
LayerTheme,
LayerGeoJSON,
LayerGPX,
LayerCoordinate,
LayerGeoRSS,
LayerKML,
LayerOpenWeatherMap,
LayerShapefile,
LayerWFS,
LayerFeature,
]
else:
# Add just the default Base Layer
s3.gis.base = True
layer_types = []
db = current.db
ltable = s3db.gis_layer_config
etable = s3db.gis_layer_entity
query = (etable.id == ltable.layer_id) & \
(ltable.config_id == config["id"]) & \
(ltable.base == True) & \
(ltable.enabled == True)
layer = db(query).select(etable.instance_type,
limitby=(0, 1)).first()
if not layer:
# Use Site Default
ctable = db.gis_config
query = (etable.id == ltable.layer_id) & \
(ltable.config_id == ctable.id) & \
(ctable.uuid == "SITE_DEFAULT") & \
(ltable.base == True) & \
(ltable.enabled == True)
layer = db(query).select(etable.instance_type,
limitby=(0, 1)).first()
if layer:
layer_type = layer.instance_type
if layer_type == "gis_layer_openstreetmap":
layer_types = [LayerOSM]
elif layer_type == "gis_layer_google":
# NB v3 doesn't work when initially hidden
layer_types = [LayerGoogle]
elif layer_type == "gis_layer_arcrest":
layer_types = [LayerArcREST]
elif layer_type == "gis_layer_bing":
layer_types = [LayerBing]
elif layer_type == "gis_layer_tms":
layer_types = [LayerTMS]
elif layer_type == "gis_layer_wms":
layer_types = [LayerWMS]
elif layer_type == "gis_layer_xyz":
layer_types = [LayerXYZ]
elif layer_type == "gis_layer_empty":
layer_types = [LayerEmpty]
if not layer_types:
layer_types = [LayerEmpty]
scripts = []
scripts_append = scripts.append
for LayerType in layer_types:
try:
# Instantiate the Class
layer = LayerType()
layer.as_dict(options)
for script in layer.scripts:
scripts_append(script)
except Exception, exception:
error = "%s not shown: %s" % (LayerType.__name__, exception)
if s3.debug:
raise HTTP(500, error)
else:
response.warning += error
# WMS getFeatureInfo
# (loads conditionally based on whether queryable WMS Layers have been added)
if s3.gis.get_feature_info:
# Presence of label turns feature on
# @ToDo: Provide explicit option to support multiple maps in a page with different options
i18n["gis_get_feature_info"] = T("Get Feature Info")
i18n["gis_feature_info"] = T("Feature Info")
# Callback can be set before _setup()
if not self.callback:
self.callback = opts.get("callback", "DEFAULT")
# These can be read/modified after _setup() & before xml()
self.options = options
self.globals = globals
self.i18n = i18n
self.scripts = scripts
# Set up map plugins
# - currently just used by Climate
# @ToDo: Get these working with new loader
# This, and any code it generates, is done last
# However, map plugin should not assume this.
self.plugin_callbacks = []
plugins = opts.get("plugins", None)
if plugins:
for plugin in plugins:
plugin.extend_gis_map(self)
# Flag to xml() that we've already been run
self.setup = True
return options
# -------------------------------------------------------------------------
def xml(self):
"""
Render the Map
- this is primarily done by inserting a lot of JavaScript
- CSS loaded as-standard to avoid delays in page loading
- HTML added in init() as a component
"""
if not self.setup:
self._setup()
# Add ExtJS
# @ToDo: Do this conditionally on whether Ext UI is used
s3_include_ext()
s3 = current.response.s3
js_global_append = s3.js_global.append
i18n_dict = self.i18n
i18n = []
i18n_append = i18n.append
for key, val in i18n_dict.items():
# @ToDo: Check if already inserted (optimise multiple maps)
i18n_append('''i18n.%s="%s"''' % (key, val))
i18n = '''\n'''.join(i18n)
js_global_append(i18n)
globals_dict = self.globals
globals = []
globals_append = globals.append
dumps = json.dumps
for key, val in globals_dict.items():
# @ToDo: Check if already inserted (optimise multiple maps)
globals_append('''S3.gis.%s=%s''' % (key, dumps(val, separators=SEPARATORS)))
globals = '''\n'''.join(globals)
js_global_append(globals)
scripts = s3.scripts
script = URL(c="static", f="scripts/S3/s3.gis.loader.js")
if script not in scripts:
scripts.append(script)
callback = self.callback
map_id = self.id
options = self.options
projection = options["projection"]
options = dumps(options, separators=SEPARATORS)
plugin_callbacks = '''\n'''.join(self.plugin_callbacks)
if callback:
if callback == "DEFAULT":
if map_id == "default_map":
callback = '''S3.gis.show_map(null,%s)''' % options
else:
callback = '''S3.gis.show_map(%s,%s)''' % (map_id, options)
else:
# Store options where they can be read by a later show_map()
js_global_append('''S3.gis.options["%s"]=%s''' % (map_id, options))
script = URL(c="static", f="scripts/yepnope.1.5.4-min.js")
if script not in scripts:
scripts.append(script)
if plugin_callbacks:
callback = '''%s\n%s''' % (callback, plugin_callbacks)
callback = '''function(){%s}''' % callback
else:
# Store options where they can be read by a later show_map()
js_global_append('''S3.gis.options["%s"]=%s''' % (map_id, options))
if plugin_callbacks:
callback = '''function(){%s}''' % plugin_callbacks
else:
callback = '''null'''
loader = '''s3_gis_loadjs(%(debug)s,%(projection)s,%(callback)s,%(scripts)s)''' \
% dict(debug = "true" if s3.debug else "false",
projection = projection,
callback = callback,
scripts = self.scripts
)
s3.jquery_ready.append(loader)
# Return the HTML
return super(MAP, self).xml()
# =============================================================================
def addFeatures(features):
"""
Add Simple Features to the Draft layer
- used by S3LocationSelectorWidget
"""
simplify = GIS.simplify
_f = []
append = _f.append
for feature in features:
geojson = simplify(feature, output="geojson")
if geojson:
f = dict(type = "Feature",
geometry = json.loads(geojson))
append(f)
return _f
# =============================================================================
def addFeatureQueries(feature_queries):
"""
Add Feature Queries to the map
- These can be Rows or Storage()
NB These considerations need to be taken care of before arriving here:
Security of data
Localisation of name/popup_label
"""
db = current.db
s3db = current.s3db
cache = s3db.cache
request = current.request
controller = request.controller
function = request.function
fqtable = s3db.gis_feature_query
mtable = s3db.gis_marker
auth = current.auth
auth_user = auth.user
if auth_user:
created_by = auth_user.id
s3_make_session_owner = auth.s3_make_session_owner
else:
# Anonymous
# @ToDo: A deployment with many Anonymous Feature Queries being
# accessed will need to change this design - e.g. use session ID instead
created_by = None
layers_feature_query = []
append = layers_feature_query.append
for layer in feature_queries:
name = str(layer["name"])
_layer = dict(name=name)
name_safe = re.sub("\W", "_", name)
# Lat/Lon via Join or direct?
try:
layer["query"][0].gis_location.lat
join = True
except:
join = False
# Push the Features into a temporary table in order to have them accessible via GeoJSON
# @ToDo: Maintenance Script to clean out old entries (> 24 hours?)
cname = "%s_%s_%s" % (name_safe,
controller,
function)
# Clear old records
query = (fqtable.name == cname) & \
(fqtable.created_by == created_by)
db(query).delete()
for row in layer["query"]:
rowdict = {"name" : cname}
if join:
rowdict["lat"] = row.gis_location.lat
rowdict["lon"] = row.gis_location.lon
else:
rowdict["lat"] = row["lat"]
rowdict["lon"] = row["lon"]
if "popup_url" in row:
rowdict["popup_url"] = row["popup_url"]
if "popup_label" in row:
rowdict["popup_label"] = row["popup_label"]
if "marker" in row:
rowdict["marker_url"] = URL(c="static", f="img",
args=["markers",
row["marker"].image])
rowdict["marker_height"] = row["marker"].height
rowdict["marker_width"] = row["marker"].width
else:
if "marker_url" in row:
rowdict["marker_url"] = row["marker_url"]
if "marker_height" in row:
rowdict["marker_height"] = row["marker_height"]
if "marker_width" in row:
rowdict["marker_width"] = row["marker_width"]
if "shape" in row:
rowdict["shape"] = row["shape"]
if "size" in row:
rowdict["size"] = row["size"]
if "colour" in row:
rowdict["colour"] = row["colour"]
if "opacity" in row:
rowdict["opacity"] = row["opacity"]
record_id = fqtable.insert(**rowdict)
if not created_by:
s3_make_session_owner(fqtable, record_id)
# URL to retrieve the data
url = "%s.geojson?feature_query.name=%s&feature_query.created_by=%s" % \
(URL(c="gis", f="feature_query"),
cname,
created_by)
_layer["url"] = url
if "active" in layer and not layer["active"]:
_layer["visibility"] = False
markerLayer = ""
if "marker" in layer:
# per-Layer Marker
marker = layer["marker"]
if isinstance(marker, int):
# integer (marker_id) not row
marker = db(mtable.id == marker).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=cache
).first()
if marker:
# @ToDo: Single option as dict
_layer["marker_url"] = marker["image"]
_layer["marker_height"] = marker["height"]
_layer["marker_width"] = marker["width"]
if "opacity" in layer and layer["opacity"] != 1:
_layer["opacity"] = "%.1f" % layer["opacity"]
if "cluster_attribute" in layer and \
layer["cluster_attribute"] != CLUSTER_ATTRIBUTE:
_layer["cluster_attribute"] = layer["cluster_attribute"]
if "cluster_distance" in layer and \
layer["cluster_distance"] != CLUSTER_DISTANCE:
_layer["cluster_distance"] = layer["cluster_distance"]
if "cluster_threshold" in layer and \
layer["cluster_threshold"] != CLUSTER_THRESHOLD:
_layer["cluster_threshold"] = layer["cluster_threshold"]
append(_layer)
return layers_feature_query
# =============================================================================
def addFeatureResources(feature_resources):
"""
Add Feature Resources to the map
- REST URLs to back-end resources
"""
db = current.db
s3db = current.s3db
config = GIS.get_config()
ftable = s3db.gis_layer_feature
ltable = s3db.gis_layer_config
layers_feature_resource = []
append = layers_feature_resource.append
for layer in feature_resources:
name = str(layer["name"])
_layer = dict(name=name)
id = str(layer["id"])
id = re.sub("\W", "_", id)
_layer["id"] = id
# Are we loading a Catalogue Layer or a simple URL?
layer_id = layer.get("layer_id", None)
if layer_id:
query = (ftable.layer_id == layer_id)
lquery = (ltable.layer_id == layer_id) & \
(ltable.config_id == config.id)
left = ltable.on(lquery)
row = db(query).select(ftable.id,
ftable.controller,
ftable.function,
ftable.filter,
ftable.trackable,
ftable.use_site,
ftable.opacity,
ftable.cluster_attribute,
ftable.cluster_distance,
ftable.cluster_threshold,
ftable.dir,
ltable.style,
left=left,
limitby=(0, 1)).first()
style = layer.get("style", row["gis_layer_config.style"])
row = row["gis_layer_feature"]
if row.use_site:
maxdepth = 1
show_ids = "&show_ids=true"
else:
maxdepth = 0
show_ids = ""
url = "%s.geojson?layer=%i&components=None&maxdepth=%s%s" % \
(URL(row.controller, row.function), row.id, maxdepth, show_ids)
# Use specified filter or fallback to the one in the layer
filter = layer.get("filter", row.filter)
if filter:
url = "%s&%s" % (url, filter)
if row.trackable:
url = "%s&track=1" % url
opacity = layer.get("opacity", row.opacity)
cluster_attribute = layer.get("cluster_attribute",
row.cluster_attribute)
cluster_distance = layer.get("cluster_distance",
row.cluster_distance)
cluster_threshold = layer.get("cluster_threshold",
row.cluster_threshold)
dir = layer.get("dir", row.dir)
if style:
try:
# JSON Object?
style = json.loads(style)
except:
style = None
if not style:
marker = layer.get("marker",
Marker(layer_id=layer_id).as_dict())
else:
# URL to retrieve the data
url = layer["url"]
tablename = layer["tablename"]
table = s3db[tablename]
# Optimise the query & tell back-end not to add the type to the tooltips
if "location_id" in table.fields:
maxdepth = 0
show_ids = ""
elif "site_id" in table.fields:
maxdepth = 1
show_ids = "&show_ids=true"
else:
# Not much we can do!
continue
options = "components=None&maxdepth=%s%s&label_off=1" % \
(maxdepth, show_ids)
if "?" in url:
url = "%s&%s" % (url, options)
else:
url = "%s?%s" % (url, options)
opacity = layer.get("opacity", 1)
cluster_attribute = layer.get("cluster_attribute",
CLUSTER_ATTRIBUTE)
cluster_distance = layer.get("cluster_distance",
CLUSTER_DISTANCE)
cluster_threshold = layer.get("cluster_threshold",
CLUSTER_THRESHOLD)
dir = layer.get("dir", None)
style = layer.get("style", None)
if style:
try:
# JSON Object?
style = json.loads(style)
except:
style = None
if not style:
marker = layer.get("marker", None)
if "active" in layer and not layer["active"]:
_layer["visibility"] = False
if opacity != 1:
_layer["opacity"] = "%.1f" % opacity
if cluster_attribute != CLUSTER_ATTRIBUTE:
_layer["cluster_attribute"] = cluster_attribute
if cluster_distance != CLUSTER_DISTANCE:
_layer["cluster_distance"] = cluster_distance
if cluster_threshold != CLUSTER_THRESHOLD:
_layer["cluster_threshold"] = cluster_threshold
if dir:
_layer["dir"] = dir
if style:
_layer["style"] = style
elif marker:
# Per-layer Marker
_layer["marker"] = dict(i = marker["image"],
h = marker["height"],
w = marker["width"],
)
else:
# Request the server to provide per-feature Markers
url = "%s&markers=1" % url
_layer["url"] = url
append(_layer)
return layers_feature_resource
# =============================================================================
class Marker(object):
"""
Represents a Map Marker
@ToDo: Support Markers in Themes
"""
def __init__(self, id=None, tablename=None, layer_id=None):
db = current.db
s3db = current.s3db
mtable = s3db.gis_marker
marker = None
config = None
polygons = False
if id:
# Lookup the Marker details from it's ID
marker = db(mtable.id == id).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=s3db.cache).first()
elif layer_id:
# Check if we have a Marker for this Layer
config = GIS.get_config()
ltable = s3db.gis_layer_symbology
query = (ltable.layer_id == layer_id) & \
(ltable.symbology_id == config.symbology_id) & \
(ltable.marker_id == mtable.id)
marker = db(query).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1)).first()
if not marker:
# Check to see if we're a Polygon/LineString
# (& hence shouldn't use a default marker)
if tablename == "gis_layer_feature":
table = db.gis_layer_feature
query = (table.layer_id == layer_id)
layer = db(query).select(table.polygons,
limitby=(0, 1)).first()
if layer and layer.polygons:
polygons = True
elif tablename == "gis_layer_shapefile":
table = db.gis_layer_shapefile
query = (table.layer_id == layer_id)
layer = db(query).select(table.gis_feature_type,
limitby=(0, 1)).first()
if layer and layer.gis_feature_type != 1:
polygons = True
if marker:
self.image = marker.image
self.height = marker.height
self.width = marker.width
elif polygons:
self.image = None
else:
# Default Marker
if not config:
config = GIS.get_config()
self.image = config.marker_image
self.height = config.marker_height
self.width = config.marker_width
# -------------------------------------------------------------------------
def add_attributes_to_output(self, output):
"""
Called by Layer.as_dict()
"""
if self.image:
output["marker"] = dict(i = self.image,
h = self.height,
w = self.width,
)
# -------------------------------------------------------------------------
def as_dict(self):
"""
Called by gis.get_marker(), feature_resources & s3profile
"""
output = Storage(image = self.image,
height = self.height,
width = self.width,
)
return output
# =============================================================================
class Projection(object):
"""
Represents a Map Projection
"""
def __init__(self, id=None):
if id:
s3db = current.s3db
table = s3db.gis_projection
query = (table.id == id)
projection = current.db(query).select(table.epsg,
limitby=(0, 1),
cache=s3db.cache).first()
else:
# Default projection
config = GIS.get_config()
projection = Storage(epsg = config.epsg)
self.epsg = projection.epsg
# =============================================================================
class Layer(object):
"""
Abstract base class for Layers from Catalogue
"""
def __init__(self):
sublayers = []
append = sublayers.append
# List of Scripts to load async with the Map JavaScript
self.scripts = []
gis = current.response.s3.gis
s3db = current.s3db
s3_has_role = current.auth.s3_has_role
# Read the Layers enabled in the Active Configs
if gis.config is None:
GIS.set_config()
tablename = self.tablename
table = s3db[tablename]
ctable = s3db.gis_config
ltable = s3db.gis_layer_config
fields = table.fields
metafields = s3_all_meta_field_names()
fields = [table[f] for f in fields if f not in metafields]
fields += [ltable.enabled,
ltable.visible,
ltable.base,
ltable.style,
ctable.pe_type,
]
query = (table.layer_id == ltable.layer_id) & \
(ltable.config_id == ctable.id) & \
(ltable.config_id.belongs(gis.config.ids))
if gis.base == True:
# Only show the default base layer
if self.tablename == "gis_layer_empty":
# Show even if disabled (as fallback)
query = (table.id > 0)
else:
query &= (ltable.base == True)
if current.deployment_settings.get_gis_layer_metadata():
mtable = s3db.cms_post_layer
left = mtable.on(mtable.layer_id == table.layer_id)
fields.append(mtable.post_id)
else:
left = None
rows = current.db(query).select(orderby=ctable.pe_type,
left=left,
*fields)
layer_ids = []
lappend = layer_ids.append
SubLayer = self.SubLayer
# Flag to show whether we've set the default baselayer
# (otherwise a config higher in the hierarchy can overrule one lower down)
base = True
# Layers requested to be visible via URL (e.g. embedded map)
visible = current.request.get_vars.get("layers", None)
if visible:
visible = visible.split(".")
else:
visible = []
for _record in rows:
record = _record[tablename]
# Check if we've already seen this layer
layer_id = record.layer_id
if layer_id in layer_ids:
continue
# Add layer to list of checked
lappend(layer_id)
# Check if layer is enabled
_config = _record["gis_layer_config"]
if not _config.enabled:
continue
# Check user is allowed to access the layer
role_required = record.role_required
if role_required and not s3_has_role(role_required):
continue
# All OK - add SubLayer
record["visible"] = _config.visible or str(layer_id) in visible
if base and _config.base:
# name can't conflict with OSM/WMS/ArcREST layers
record["_base"] = True
base = False
else:
record["_base"] = False
if "style" not in record:
# Take from the layer_config
record["style"] = _config.style
if left is not None:
record["post_id"] = _record["cms_post_layer.post_id"]
if tablename in ["gis_layer_bing", "gis_layer_google"]:
# SubLayers handled differently
append(record)
else:
append(SubLayer(tablename, record))
# Alphasort layers
# - client will only sort within their type: s3.gis.layers.js
self.sublayers = sorted(sublayers, key=lambda row: row.name)
# -------------------------------------------------------------------------
def as_dict(self, options=None):
"""
Output the Layers as a Python dict
"""
sublayer_dicts = []
append = sublayer_dicts.append
sublayers = self.sublayers
for sublayer in sublayers:
# Read the output dict for this sublayer
sublayer_dict = sublayer.as_dict()
if sublayer_dict:
# Add this layer to the list of layers for this layer type
append(sublayer_dict)
if sublayer_dicts:
if options:
# Used by Map._setup()
options[self.dictname] = sublayer_dicts
else:
# Used by as_json() and hence as_javascript()
return sublayer_dicts
# -------------------------------------------------------------------------
def as_json(self):
"""
Output the Layers as JSON
"""
result = self.as_dict()
if result:
#return json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True)
return json.dumps(result, separators=SEPARATORS)
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layers as global Javascript
- suitable for inclusion in the HTML page
"""
result = self.as_json()
if result:
return '''S3.gis.%s=%s\n''' % (self.dictname, result)
# -------------------------------------------------------------------------
class SubLayer(object):
def __init__(self, tablename, record):
# Ensure all attributes available (even if Null)
self.__dict__.update(record)
del record
self.safe_name = re.sub('[\\"]', "", self.name)
if tablename not in ("gis_layer_arcrest",
"gis_layer_coordinate",
"gis_layer_empty",
"gis_layer_js",
"gis_layer_mgrs",
"gis_layer_openstreetmap",
"gis_layer_openweathermap",
"gis_layer_theme",
"gis_layer_tms",
"gis_layer_wms",
"gis_layer_xyz",
):
# Layer uses Markers
self.marker = Marker(tablename=tablename, layer_id=self.layer_id)
if hasattr(self, "projection_id"):
self.projection = Projection(self.projection_id)
def setup_clustering(self, output):
if hasattr(self, "cluster_attribute"):
cluster_attribute = self.cluster_attribute
else:
cluster_attribute = None
cluster_distance = self.cluster_distance
cluster_threshold = self.cluster_threshold
if cluster_attribute and \
cluster_attribute != CLUSTER_ATTRIBUTE:
output["cluster_attribute"] = cluster_attribute
if cluster_distance != CLUSTER_DISTANCE:
output["cluster_distance"] = cluster_distance
if cluster_threshold != CLUSTER_THRESHOLD:
output["cluster_threshold"] = cluster_threshold
def setup_folder(self, output):
if self.dir:
output["dir"] = self.dir
def setup_folder_and_visibility(self, output):
if not self.visible:
output["visibility"] = False
if self.dir:
output["dir"] = self.dir
def setup_folder_visibility_and_opacity(self, output):
if not self.visible:
output["visibility"] = False
if self.opacity != 1:
output["opacity"] = "%.1f" % self.opacity
if self.dir:
output["dir"] = self.dir
# ---------------------------------------------------------------------
@staticmethod
def add_attributes_if_not_default(output, **values_and_defaults):
# could also write values in debug mode, to check if defaults ignored.
# could also check values are not being overwritten.
for key, (value, defaults) in values_and_defaults.iteritems():
if value not in defaults:
output[key] = value
# -----------------------------------------------------------------------------
class LayerArcREST(Layer):
"""
ArcGIS REST Layers from Catalogue
"""
tablename = "gis_layer_arcrest"
dictname = "layers_arcrest"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "arcrest",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_and_visibility(output)
self.add_attributes_if_not_default(
output,
layers = (self.layers, ([0],)),
transparent = (self.transparent, (True,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
)
return output
# -----------------------------------------------------------------------------
class LayerBing(Layer):
"""
Bing Layers from Catalogue
"""
tablename = "gis_layer_bing"
dictname = "Bing"
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if Projection().epsg != 900913:
raise Exception("Cannot display Bing layers unless we're using the Spherical Mercator Projection\n")
apikey = current.deployment_settings.get_gis_api_bing()
if not apikey:
raise Exception("Cannot display Bing layers unless we have an API key\n")
# Mandatory attributes
ldict = {"ApiKey": apikey
}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "aerial":
ldict["Aerial"] = {"name": sublayer.name or "Bing Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "road":
ldict["Road"] = {"name": sublayer.name or "Bing Roads",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Bing Hybrid",
"id": sublayer.layer_id}
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerCoordinate(Layer):
"""
Coordinate Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_coordinate"
dictname = "CoordinateGrid"
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if current.response.s3.debug:
self.scripts.append("gis/cdauth.js")
else:
self.scripts.append("gis/cdauth.min.js")
sublayer = sublayers[0]
name_safe = re.sub("'", "", sublayer.name)
ldict = dict(name = name_safe,
visibility = sublayer.visible,
id = sublayer.layer_id)
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerEmpty(Layer):
"""
Empty Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_empty"
dictname = "EmptyLayer"
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name = str(current.T(sublayer.name))
name_safe = re.sub("'", "", sublayer.name)
ldict = dict(name = name_safe,
id = sublayer.layer_id)
if sublayer._base:
ldict["base"] = True
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerFeature(Layer):
"""
Feature Layers from Catalogue
"""
tablename = "gis_layer_feature"
dictname = "layers_feature"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def __init__(self, tablename, record):
controller = record.controller
self.skip = False
if controller is not None:
if controller not in current.deployment_settings.modules:
# Module is disabled
self.skip = True
if not current.auth.permission.has_permission("read",
c=controller,
f=record.function):
# User has no permission to this resource (in ACL)
self.skip = True
else:
raise Exception("Feature Layer Record '%s' has no controller" % record.name)
super(LayerFeature.SubLayer, self).__init__(tablename, record)
def as_dict(self):
if self.skip:
# Skip layer
return
if self.use_site:
maxdepth = 1
show_ids = "&show_ids=true"
else:
maxdepth = 0
show_ids = ""
url = "%s.geojson?layer=%i&components=None&maxdepth=%s%s" % \
(URL(self.controller, self.function), self.id, maxdepth, show_ids)
if self.filter:
url = "%s&%s" % (url, self.filter)
if self.trackable:
url = "%s&track=1" % url
style = self.style
if style:
try:
# JSON Object?
style = json.loads(style)
except:
# Fieldname to pass to URL for server-side lookup
url = "%s&style=%s" % (url, style)
style = None
# Mandatory attributes
output = {"id": self.layer_id,
# Defaults client-side if not-provided
#"type": "feature",
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
if not self.popup_fields:
output["no_popups"] = 1
style = self.style
if style:
style = json.loads(style)
output["style"] = style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerGeoJSON(Layer):
"""
GeoJSON Layers from Catalogue
"""
tablename = "gis_layer_geojson"
dictname = "layers_geojson"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "geojson",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
projection = self.projection
if projection.epsg != 4326:
output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
style = json.loads(style)
output["style"] = style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerGeoRSS(Layer):
"""
GeoRSS Layers from Catalogue
"""
tablename = "gis_layer_georss"
dictname = "layers_georss"
def __init__(self):
super(LayerGeoRSS, self).__init__()
LayerGeoRSS.SubLayer.cachetable = current.s3db.gis_cache
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
response = current.response
cachetable = self.cachetable
url = self.url
# Check to see if we should Download layer to the cache
download = True
query = (cachetable.source == url)
existing_cached_copy = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if existing_cached_copy:
modified_on = existing_cached_copy.modified_on
cutoff = modified_on + timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download layer to the Cache
from gluon.tools import fetch
# @ToDo: Call directly without going via HTTP
# @ToDo: Make this async by using S3Task (also use this for the refresh time)
fields = ""
if self.data:
fields = "&data_field=%s" % self.data
if self.image:
fields = "%s&image_field=%s" % (fields, self.image)
_url = "%s%s/update.georss?fetchurl=%s%s" % (current.deployment_settings.get_base_public_url(),
URL(c="gis", f="cache_feed"),
url,
fields)
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[response.session_id_name] = response.session_id
current.session._unlock(response)
try:
# @ToDo: Need to commit to not have DB locked with SQLite?
fetch(_url, cookie=cookie)
if existing_cached_copy:
# Clear old selfs which are no longer active
query = (cachetable.source == url) & \
(cachetable.modified_on < cutoff)
db(query).delete()
except Exception, exception:
s3_debug("GeoRSS %s download error" % url, exception)
# Feed down
if existing_cached_copy:
# Use cached copy
# Should we Update timestamp to prevent every
# subsequent request attempting the download?
#query = (cachetable.source == url)
#db(query).update(modified_on=request.utcnow)
pass
else:
response.warning += "%s down & no cached copy available" % url
name_safe = self.safe_name
# Pass the GeoJSON URL to the client
# Filter to the source of this feed
url = "%s.geojson?cache.source=%s" % (URL(c="gis", f="cache_feed"),
url)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "georss",
"name": name_safe,
"url": url,
}
self.marker.add_attributes_to_output(output)
# Attributes which are defaulted client-side if not set
if self.refresh != 900:
output["refresh"] = self.refresh
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerGoogle(Layer):
"""
Google Layers/Tools from Catalogue
"""
tablename = "gis_layer_google"
dictname = "Google"
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
T = current.T
epsg = (Projection().epsg == 900913)
settings = current.deployment_settings
apikey = settings.get_gis_api_google()
s3 = current.response.s3
debug = s3.debug
# Google scripts use document.write so cannot be loaded async via yepnope.js
add_script = s3.scripts.append
ldict = {}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer.type == "earth":
ldict["Earth"] = str(T("Switch to 3D"))
#{"modules":[{"name":"earth","version":"1"}]}
add_script("http://www.google.com/jsapi?key=" + apikey + "&autoload=%7B%22modules%22%3A%5B%7B%22name%22%3A%22earth%22%2C%22version%22%3A%221%22%7D%5D%7D")
# Dynamic Loading not supported: https://developers.google.com/loader/#Dynamic
#s3.jquery_ready.append('''try{google.load('earth','1')catch(e){}''')
if debug:
self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleEarthPanel.min.js")
s3.js_global.append('''S3.public_url="%s"''' % settings.get_base_public_url())
elif epsg:
# Earth is the only layer which can run in non-Spherical Mercator
# @ToDo: Warning?
if sublayer._base:
# Set default Base layer
ldict["Base"] = sublayer.type
if sublayer.type == "satellite":
ldict["Satellite"] = {"name": sublayer.name or "Google Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "maps":
ldict["Maps"] = {"name": sublayer.name or "Google Maps",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
ldict["Hybrid"] = {"name": sublayer.name or "Google Hybrid",
"id": sublayer.layer_id}
elif sublayer.type == "streetview":
ldict["StreetviewButton"] = "Click where you want to open Streetview"
elif sublayer.type == "terrain":
ldict["Terrain"] = {"name": sublayer.name or "Google Terrain",
"id": sublayer.layer_id}
elif sublayer.type == "mapmaker":
ldict["MapMaker"] = {"name": sublayer.name or "Google MapMaker",
"id": sublayer.layer_id}
elif sublayer.type == "mapmakerhybrid":
ldict["MapMakerHybrid"] = {"name": sublayer.name or "Google MapMaker Hybrid",
"id": sublayer.layer_id}
if "MapMaker" in ldict or "MapMakerHybrid" in ldict:
# Need to use v2 API
# This should be able to be fixed in OpenLayers now since Google have fixed in v3 API:
# http://code.google.com/p/gmaps-api-issues/issues/detail?id=2349#c47
add_script("http://maps.google.com/maps?file=api&v=2&key=%s" % apikey)
else:
# v3 API (3.10 is frozen, 3.11 release & 3.12 is nightly)
add_script("http://maps.google.com/maps/api/js?v=3.11&sensor=false")
if "StreetviewButton" in ldict:
# Streetview doesn't work with v2 API
ldict["StreetviewButton"] = str(T("Click where you want to open Streetview"))
ldict["StreetviewTitle"] = str(T("Street View"))
if debug:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.js")
else:
self.scripts.append("gis/gxp/widgets/GoogleStreetViewPanel.min.js")
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerGPX(Layer):
"""
GPX Layers from Catalogue
"""
tablename = "gis_layer_gpx"
dictname = "layers_gpx"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = URL(c="default", f="download",
args=self.track)
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.marker.add_attributes_to_output(output)
self.add_attributes_if_not_default(
output,
waypoints = (self.waypoints, (True,)),
tracks = (self.tracks, (True,)),
routes = (self.routes, (True,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class LayerJS(Layer):
"""
JS Layers from Catalogue
- these are raw Javascript layers for use by expert OpenLayers people
to quickly add/configure new data sources without needing support
from back-end Sahana programmers
"""
tablename = "gis_layer_js"
dictname = "layers_js"
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
sublayer_dicts = []
append = sublayer_dicts.append
for sublayer in sublayers:
append(sublayer.code)
if options:
# Used by Map._setup()
options[self.dictname] = sublayer_dicts
else:
# Used by as_json() and hence as_javascript()
return sublayer_dicts
# -----------------------------------------------------------------------------
class LayerKML(Layer):
"""
KML Layers from Catalogue
"""
tablename = "gis_layer_kml"
dictname = "layers_kml"
# -------------------------------------------------------------------------
def __init__(self, init=True):
"Set up the KML cache, should be done once per request"
super(LayerKML, self).__init__()
# Can we cache downloaded KML feeds?
# Needed for unzipping & filtering as well
# @ToDo: Should we move this folder to static to speed up access to cached content?
# Do we need to secure it?
request = current.request
cachepath = os.path.join(request.folder,
"uploads",
"gis_cache")
if os.path.exists(cachepath):
cacheable = os.access(cachepath, os.W_OK)
else:
try:
os.mkdir(cachepath)
except OSError, os_error:
s3_debug("GIS: KML layers cannot be cached: %s %s" % \
(cachepath, os_error))
cacheable = False
else:
cacheable = True
# @ToDo: Migrate to gis_cache
LayerKML.cachetable = current.s3db.gis_cache2
LayerKML.cacheable = cacheable
LayerKML.cachepath = cachepath
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
cachetable = LayerKML.cachetable
cacheable = LayerKML.cacheable
cachepath = LayerKML.cachepath
name = self.name
if cacheable:
_name = urllib2.quote(name)
_name = _name.replace("%", "_")
filename = "%s.file.%s.kml" % (cachetable._tablename,
_name)
# Should we download a fresh copy of the source file?
download = True
query = (cachetable.name == name)
cached = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if cached:
modified_on = cached.modified_on
cutoff = modified_on + timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download file (async, if workers alive)
response = current.response
session_id_name = response.session_id_name
session_id = response.session_id
current.s3task.async("gis_download_kml",
args=[self.id, filename, session_id_name, session_id])
if cached:
db(query).update(modified_on=request.utcnow)
else:
cachetable.insert(name=name, file=filename)
url = URL(c="default", f="download",
args=[filename])
else:
# No caching possible (e.g. GAE), display file direct from remote (using Proxy)
# (Requires OpenLayers.Layer.KML to be available)
url = self.url
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = url,
)
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
title = (self.title, ("name", None, "")),
body = (self.body, ("description", None)),
refresh = (self.refresh, (900,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
style = json.loads(style)
output["style"] = style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerOSM(Layer):
"""
OpenStreetMap Layers from Catalogue
@ToDo: Provide a catalogue of standard layers which are fully-defined
in static & can just have name over-ridden, as well as
fully-custom layers.
"""
tablename = "gis_layer_openstreetmap"
dictname = "layers_osm"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if Projection().epsg != 900913:
# Cannot display OpenStreetMap layers unless we're using the Spherical Mercator Projection
return {}
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url1": self.url1,
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
base = (self.base, (True,)),
_base = (self._base, (False,)),
url2 = (self.url2, ("",)),
url3 = (self.url3, ("",)),
zoomLevels = (self.zoom_levels, (9,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder_and_visibility(output)
return output
# -----------------------------------------------------------------------------
class LayerOpenWeatherMap(Layer):
"""
OpenWeatherMap Layers from Catalogue
"""
tablename = "gis_layer_openweathermap"
dictname = "OWM"
# -------------------------------------------------------------------------
def as_dict(self, options=None):
sublayers = self.sublayers
if sublayers:
if current.response.s3.debug:
self.scripts.append("gis/OWM.OpenLayers.js")
else:
self.scripts.append("gis/OWM.OpenLayers.min.js")
ldict = {}
for sublayer in sublayers:
if sublayer.type == "station":
ldict["station"] = {"name": sublayer.name or "Weather Stations",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
elif sublayer.type == "city":
ldict["city"] = {"name": sublayer.name or "Current Weather",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
if options:
# Used by Map._setup()
options[self.dictname] = ldict
else:
# Used by as_json() and hence as_javascript()
return ldict
# -----------------------------------------------------------------------------
class LayerShapefile(Layer):
"""
Shapefile Layers from Catalogue
"""
tablename = "gis_layer_shapefile"
dictname = "layers_shapefile"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s/%s/data.geojson" % \
(URL(c="gis", f="layer_shapefile"), self.id)
if self.filter:
url = "%s?layer_shapefile_%s.%s" % (url, self.id, self.filter)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "shapefile",
"name": self.safe_name,
"url": url,
# Shapefile layers don't alter their contents, so don't refresh
"refresh": 0,
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
desc = (self.description, (None, "")),
src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
)
# We convert on-upload to have BBOX handling work properly
#projection = self.projection
#if projection.epsg != 4326:
# output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
style = json.loads(style)
output["style"] = style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerTheme(Layer):
"""
Theme Layers from Catalogue
"""
tablename = "gis_layer_theme"
dictname = "layers_theme"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s.geojson?theme_data.layer_theme_id=%i&polygons=1&maxdepth=0" % \
(URL(c="gis", f="theme_data"), self.id)
# Mandatory attributes
output = {"id": self.layer_id,
"type": "theme",
"name": self.safe_name,
"url": url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
style = json.loads(style)
output["style"] = style
return output
# -----------------------------------------------------------------------------
class LayerTMS(Layer):
"""
TMS Layers from Catalogue
"""
tablename = "gis_layer_tms"
dictname = "layers_tms"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"type": "tms",
"name": self.safe_name,
"url": self.url,
"layername": self.layername
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# -----------------------------------------------------------------------------
class LayerWFS(Layer):
"""
WFS Layers from Catalogue
"""
tablename = "gis_layer_wfs"
dictname = "layers_wfs"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = self.url,
title = self.title,
featureType = self.featureType,
)
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
version = (self.version, ("1.1.0",)),
featureNS = (self.featureNS, (None, "")),
geometryName = (self.geometryName, ("the_geom",)),
schema = (self.wfs_schema, (None, "")),
username = (self.username, (None, "")),
password = (self.password, (None, "")),
projection = (self.projection.epsg, (4326,)),
desc = (self.description, (None, "")),
src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
refresh = (self.refresh, (0,)),
#editable
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
style = self.style
if style:
style = json.loads(style)
output["style"] = style
else:
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class LayerWMS(Layer):
"""
WMS Layers from Catalogue
"""
tablename = "gis_layer_wms"
dictname = "layers_wms"
# -------------------------------------------------------------------------
def __init__(self):
super(LayerWMS, self).__init__()
if self.sublayers:
if current.response.s3.debug:
self.scripts.append("gis/gxp/plugins/WMSGetFeatureInfo.js")
else:
self.scripts.append("gis/gxp/plugins/WMSGetFeatureInfo.min.js")
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if self.queryable:
current.response.s3.gis.get_feature_info = True
# Mandatory attributes
output = dict(id = self.layer_id,
name = self.safe_name,
url = self.url,
layers = self.layers
)
# Attributes which are defaulted client-side if not set
legend_url = self.legend_url
if legend_url and not legend_url.startswith("http"):
legend_url = "%s/%s%s" % \
(current.deployment_settings.get_base_public_url(),
current.request.application,
legend_url)
attr = dict(transparent = (self.transparent, (True,)),
version = (self.version, ("1.1.1",)),
format = (self.img_format, ("image/png",)),
map = (self.map, (None, "")),
username = (self.username, (None, "")),
password = (self.password, (None, "")),
buffer = (self.buffer, (0,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
style = (self.style, (None, "")),
bgcolor = (self.bgcolor, (None, "")),
tiled = (self.tiled, (False,)),
legendURL = (legend_url, (None, "")),
queryable = (self.queryable, (False,)),
desc = (self.description, (None, "")),
)
if current.deployment_settings.get_gis_layer_metadata():
# Use CMS to add info about sources
attr["post_id"] = (self.post_id, (None, ""))
else:
# Link direct to sources
attr.update(src = (self.source_name, (None, "")),
src_url = (self.source_url, (None, "")),
)
self.add_attributes_if_not_default(output, **attr)
self.setup_folder_visibility_and_opacity(output)
return output
# -----------------------------------------------------------------------------
class LayerXYZ(Layer):
"""
XYZ Layers from Catalogue
"""
tablename = "gis_layer_xyz"
dictname = "layers_xyz"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {"id": self.layer_id,
"name": self.safe_name,
"url": self.url
}
# Attributes which are defaulted client-side if not set
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# =============================================================================
class S3Map(S3Method):
"""
Class to generate a Map linked to Search filters
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply map method to S3Requests
- produces a full page with S3FilterWidgets above a Map
@param r: the S3Request isntance
@param attr: controller attributes for the request
@return: output object to send to the view
"""
if r.http == "GET":
representation = r.representation
if representation == "html":
return self.page(r, **attr)
else:
r.error(405, current.manager.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def page(self, r, **attr):
"""
Map page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.representation in ("html", "iframe"):
s3db = current.s3db
response = current.response
resource = self.resource
get_config = resource.get_config
tablename = resource.tablename
widget_id = "default_map"
output = {}
title = response.s3.crud_strings[tablename].get("title_map",
current.T("Map"))
output["title"] = title
# Filter widgets
filter_widgets = get_config("filter_widgets", None)
if filter_widgets and not self.hide_filter:
advanced = False
for widget in filter_widgets:
if "hidden" in widget.opts and widget.opts.hidden:
advanced = resource.get_config("map_advanced", True)
break
request = self.request
from s3filter import S3FilterForm
filter_formstyle = get_config("filter_formstyle", None)
submit = resource.get_config("map_submit", True)
filter_form = S3FilterForm(filter_widgets,
formstyle=filter_formstyle,
advanced=advanced,
submit=submit,
ajax=True,
# URL to update the Filter Widget Status
ajaxurl=r.url(method="filter",
vars={},
representation="options"),
_class="filter-form",
_id="%s-filter-form" % widget_id,
)
get_vars = request.get_vars
filter_form = filter_form.html(resource, get_vars=get_vars, target=widget_id)
else:
# Render as empty string to avoid the exception in the view
filter_form = ""
output["form"] = filter_form
# Map
output["map"] = self.widget(r, widget_id=widget_id,
callback='''S3.search.s3map()''', **attr)
# View
response.view = self._view(r, "map.html")
return output
else:
r.error(501, r.ERROR.BAD_FORMAT)
# -------------------------------------------------------------------------
def widget(self,
r,
method="map",
widget_id=None,
visible=True,
callback=None,
**attr):
"""
Render a Map widget suitable for use in an S3Filter-based page
such as S3Summary
@param r: the S3Request
@param method: the widget method
@param widget_id: the widget ID
@param callback: None by default in case DIV is hidden
@param visible: whether the widget is initially visible
@param attr: controller attributes
"""
if not widget_id:
widget_id = "default_map"
gis = current.gis
s3db = current.s3db
tablename = self.tablename
prefix, name = tablename.split("_", 1)
ftable = s3db.gis_layer_feature
query = (ftable.controller == prefix) & \
(ftable.function == name)
layers = current.db(query).select(ftable.layer_id,
ftable.style_default,
)
if len(layers) > 1:
layers.exclude(lambda row: row.style_default == False)
if len(layers) == 1:
layer_id = layers.first().layer_id
else:
layer_id = None
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
# Per-feature markers added in get_location_data()
marker = None
else:
# Single Marker for the layer
marker = gis.get_marker(prefix, name)
url = URL(extension="geojson", args=None)
# @ToDo: Support maps with multiple layers (Dashboards)
#id = "search_results_%s" % widget_id
id = "search_results"
feature_resources = [{"name" : current.T("Search Results"),
"id" : id,
"layer_id" : layer_id,
"tablename" : tablename,
"url" : url,
# We activate in callback after ensuring URL is updated for current filter status
"active" : False,
"marker" : marker
}]
map = gis.show_map(id = widget_id,
feature_resources = feature_resources,
#catalogue_layers = True,
collapsed = True,
legend = True,
#toolbar = True,
#search = True,
save = False,
callback = callback,
)
return map
# =============================================================================
class S3ExportPOI(S3Method):
""" Export point-of-interest resources for a location """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
manager = current.manager
output = dict()
if r.http == "GET":
output = self.export(r, **attr)
else:
r.error(405, manager.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def export(self, r, **attr):
"""
Export POI resources.
URL options:
- "resources" list of tablenames to export records from
- "msince" datetime in ISO format, "auto" to use the
feed's last update
- "update_feed" 0 to skip the update of the feed's last
update datetime, useful for trial exports
Supported formats:
.xml S3XML
.osm OSM XML Format
.kml Google KML
(other formats can be requested, but may give unexpected results)
@param r: the S3Request
@param attr: controller options for this request
"""
import datetime, time
tfmt = current.xml.ISOFORMAT
# Determine request Lx
current_lx = r.record
if not current_lx: # or not current_lx.level:
# Must have a location
r.error(400, current.manager.error.BAD_REQUEST)
else:
self.lx = current_lx.id
tables = []
# Parse the ?resources= parameter
if "resources" in r.get_vars:
resources = r.get_vars["resources"]
else:
# Fallback to deployment_setting
resources = current.deployment_settings.get_gis_poi_resources()
if not isinstance(resources, list):
resources = [resources]
[tables.extend(t.split(",")) for t in resources]
# Parse the ?update_feed= parameter
update_feed = True
if "update_feed" in r.get_vars:
_update_feed = r.get_vars["update_feed"]
if _update_feed == "0":
update_feed = False
# Parse the ?msince= parameter
msince = None
if "msince" in r.get_vars:
msince = r.get_vars["msince"]
if msince.lower() == "auto":
msince = "auto"
else:
try:
(y, m, d, hh, mm, ss, t0, t1, t2) = \
time.strptime(msince, tfmt)
msince = datetime.datetime(y, m, d, hh, mm, ss)
except ValueError:
msince = None
# Export a combined tree
tree = self.export_combined_tree(tables,
msince=msince,
update_feed=update_feed)
xml = current.xml
manager = current.manager
# Set response headers
headers = current.response.headers
representation = r.representation
if r.representation in manager.json_formats:
as_json = True
default = "application/json"
else:
as_json = False
default = "text/xml"
headers["Content-Type"] = manager.content_type.get(representation,
default)
# Find XSLT stylesheet and transform
stylesheet = r.stylesheet()
if tree and stylesheet is not None:
args = Storage(domain=manager.domain,
base_url=manager.s3.base_url,
utcnow=datetime.datetime.utcnow().strftime(tfmt))
tree = xml.transform(tree, stylesheet, **args)
if tree:
if as_json:
output = xml.tree2json(tree, pretty_print=True)
else:
output = xml.tostring(tree, pretty_print=True)
return output
# -------------------------------------------------------------------------
def export_combined_tree(self, tables, msince=None, update_feed=True):
"""
Export a combined tree of all records in tables, which
are in Lx, and have been updated since msince.
@param tables: list of table names
@param msince: minimum modified_on datetime, "auto" for
automatic from feed data, None to turn it off
@param update_feed: update the last_update datetime in the feed
"""
db = current.db
s3db = current.s3db
ftable = s3db.gis_poi_feed
lx = self.lx
elements = []
results = 0
for tablename in tables:
# Define the resource
try:
resource = s3db.resource(tablename, components=[])
except AttributeError:
# Table not defined (module deactivated?)
continue
# Check
if "location_id" not in resource.fields:
# Hardly a POI resource without location_id
continue
# Add Lx filter
self._add_lx_filter(resource, lx)
# Get the feed data
query = (ftable.tablename == tablename) & \
(ftable.location_id == lx)
feed = db(query).select(limitby=(0, 1)).first()
if msince == "auto":
if feed is None:
_msince = None
else:
_msince = feed.last_update
else:
_msince = msince
# Export the tree and append its element to the element list
tree = resource.export_tree(msince=_msince,
references=["location_id"])
# Update the feed data
if update_feed:
muntil = resource.muntil
if feed is None:
ftable.insert(location_id = lx,
tablename = tablename,
last_update = muntil)
else:
feed.update_record(last_update = muntil)
elements.extend([c for c in tree.getroot()])
# Combine all elements in one tree and return it
tree = current.xml.tree(elements, results=len(elements))
return tree
# -------------------------------------------------------------------------
@staticmethod
def _add_lx_filter(resource, lx):
"""
Add a Lx filter for the current location to this
resource.
@param resource: the resource
"""
from s3resource import S3FieldSelector as FS
query = (FS("location_id$path").contains("/%s/" % lx)) | \
(FS("location_id$path").like("%s/%%" % lx))
resource.add_filter(query)
# -----------------------------------------------------------------------------
class S3ImportPOI(S3Method):
"""
Import point-of-interest resources for a location
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
request = current.request
response = current.response
title = T("Import from OpenStreetMap")
res_select = [TR(TD(B("%s: " % T("Select resources to import")),
_colspan=3))]
for resource in current.deployment_settings.get_gis_poi_resources():
id = "res_" + resource
res_select.append(TR(TD(LABEL(resource, _for=id)),
TD(INPUT(_type="checkbox",
_name=id,
_id=id,
_checked=True)),
TD()))
form = FORM(
TABLE(
TR(TD(T("Can read PoIs either from an OpenStreetMap file (.osm) or mirror."),
_colspan=3),
),
TR(TD(B("%s: " % T("File"))),
TD(INPUT(_type="file", _name="file", _size="50")),
TD(SPAN("*", _class="req",
_style="padding-right: 5px;"))
),
TR(TD(),
TD(T("or")),
TD(),
),
TR(TD(B("%s: " % T("Host"))),
TD(INPUT(_type="text", _name="host",
_id="host", _value="localhost")),
TD(),
),
TR(TD(B("%s: " % T("Database"))),
TD(INPUT(_type="text", _name="database",
_id="database", _value="osm")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="osm")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="planet")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
res_select,
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
if not r.id:
from s3validators import IS_LOCATION
from s3widgets import S3LocationAutocompleteWidget
# dummy field
field = s3db.org_office.location_id
field.requires = IS_NULL_OR(IS_LOCATION())
widget = S3LocationAutocompleteWidget()(field, None)
row = TR(TD(B("%s: " % T("Location"))),
TD(widget),
TD(SPAN("*", _class="req",
_style="padding-right: 5px;"))
)
form[0].insert(3, row)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
vars = form.vars
if vars.file != "":
File = vars.file.file
else:
# Create .poly file
if r.record:
record = r.record
elif not vars.location_id:
form.errors["location_id"] = T("Location is Required!")
return output
else:
gtable = s3db.gis_location
record = current.db(gtable.id == vars.location_id).select(gtable.name,
gtable.wkt,
limitby=(0, 1)
).first()
if record.wkt is None:
form.errors["location_id"] = T("Location needs to have WKT!")
return output
error = GIS.create_poly(record)
if error:
current.session.error = error
redirect(URL(args=r.id))
# Use Osmosis to extract an .osm file using this .poly
name = record.name
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
filename = os.path.join(TEMP, "%s.osm" % name)
cmd = ["/home/osm/osmosis/bin/osmosis", # @ToDo: deployment_setting
"--read-pgsql",
"host=%s" % vars.host,
"database=%s" % vars.database,
"user=%s" % vars.user,
"password=%s" % vars.password,
"--dataset-dump",
"--bounding-polygon",
"file=%s" % os.path.join(TEMP, "%s.poly" % name),
"--write-xml",
"file=%s" % filename,
]
import subprocess
try:
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError, e:
current.session.error = T("OSM file generation failed: %s") % e.output
redirect(URL(args=r.id))
except AttributeError:
# Python < 2.7
error = subprocess.call(cmd, shell=True)
if error:
s3_debug(cmd)
current.session.error = T("OSM file generation failed!")
redirect(URL(args=r.id))
try:
File = open(filename, "r")
except:
current.session.error = T("Cannot open created OSM file!")
redirect(URL(args=r.id))
stylesheet = os.path.join(request.folder, "static", "formats",
"osm", "import.xsl")
ignore_errors = vars.get("ignore_errors", None)
xml = current.xml
tree = xml.parse(File)
define_resource = s3db.resource
response.error = ""
import_count = 0
import_res = []
for resource in current.deployment_settings.get_gis_poi_resources():
if getattr(vars, "res_" + resource):
import_res.append(resource)
for tablename in import_res:
try:
table = s3db[tablename]
except:
# Module disabled
continue
resource = define_resource(tablename)
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
success = resource.import_xml(s3xml,
ignore_errors=ignore_errors)
import_count += resource.import_count
except:
import sys
response.error += str(sys.exc_info()[1])
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("PoIs successfully imported."))
else:
response.information = T("No PoIs available.")
return output
else:
raise HTTP(501, BADMETHOD)
# END =========================================================================
| sammyshj/gci | modules/s3/s3gis.py | Python | mit | 350,924 | [
"Amber"
] | 66964b3249381052150ed133ffaf409d27c146468c71dae52ace2f114ff5fbfc |
"""
Utility Functions and Classes
This module collects small pieces of code used throughout :py:mod:`bioconda_utils`.
"""
import asyncio
import contextlib
import datetime
import fnmatch
import glob
import logging
import os
import subprocess as sp
import sys
import shutil
import json
import queue
import warnings
from threading import Event, Thread
from pathlib import PurePath
from collections import Counter, Iterable, defaultdict, namedtuple
from itertools import product, chain, groupby, zip_longest
from functools import partial
from typing import Sequence, Collection, List, Dict, Any, Union
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
import pkg_resources
import pandas as pd
import tqdm as _tqdm
import aiohttp
import backoff
import yaml
import jinja2
from jinja2 import Environment, PackageLoader
# FIXME(upstream): For conda>=4.7.0 initialize_logging is (erroneously) called
# by conda.core.index.get_index which messes up our logging.
# => Prevent custom conda logging init before importing anything conda-related.
import conda.gateways.logging
conda.gateways.logging.initialize_logging = lambda: None
from conda_build import api
from conda.exports import VersionOrder
from jsonschema import validate
from colorlog import ColoredFormatter
from boltons.funcutils import FunctionBuilder
logger = logging.getLogger(__name__)
class TqdmHandler(logging.StreamHandler):
"""Tqdm aware logging StreamHandler
Passes all log writes through tqdm to allow progress bars and log
messages to coexist without clobbering terminal
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# initialise internal tqdm lock so that we can use tqdm.write
_tqdm.tqdm(disable=True, total=0)
def emit(self, record):
_tqdm.tqdm.write(self.format(record))
def tqdm(*args, **kwargs):
"""Wrapper around TQDM handling disable
Logging is disabled if:
- ``TERM`` is set to ``dumb``
- ``CIRCLECI`` is set to ``true``
- the effective log level of the is lower than set via ``loglevel``
Args:
loglevel: logging loglevel (the number, so logging.INFO)
logger: local logger (in case it has different effective log level)
"""
term_ok = (sys.stderr.isatty()
and os.environ.get("TERM", "") != "dumb"
and os.environ.get("CIRCLECI", "") != "true")
loglevel_ok = (kwargs.get('logger', logger).getEffectiveLevel()
<= kwargs.get('loglevel', logging.INFO))
kwargs['disable'] = not (term_ok and loglevel_ok)
return _tqdm.tqdm(*args, **kwargs)
def ensure_list(obj):
"""Wraps **obj** in a list if necessary
>>> ensure_list("one")
["one"]
>>> ensure_list(["one", "two"])
["one", "two"]
"""
if isinstance(obj, Sequence) and not isinstance(obj, str):
return obj
return [obj]
def wraps(func):
"""Custom wraps() function for decorators
This one differs from functiools.wraps and boltons.funcutils.wraps in
that it allows *adding* keyword arguments to the function signature.
>>> def decorator(func):
>>> @wraps(func)
>>> def wrapper(*args, extra_param=None, **kwargs):
>>> print("Called with extra_param=%s" % extra_param)
>>> func(*args, **kwargs)
>>> return wrapper
>>>
>>> @decorator()
>>> def test(arg1, arg2, arg3='default'):
>>> pass
>>>
>>> test('val1', 'val2', extra_param='xyz')
"""
fb = FunctionBuilder.from_func(func)
def wrapper_wrapper(wrapper_func):
fb_wrapper = FunctionBuilder.from_func(wrapper_func)
fb.kwonlyargs += fb_wrapper.kwonlyargs
fb.kwonlydefaults.update(fb_wrapper.kwonlydefaults)
fb.body = 'return _call(%s)' % fb.get_invocation_str()
execdict = dict(_call=wrapper_func, _func=func)
fully_wrapped = fb.get_func(execdict)
fully_wrapped.__wrapped__ = func
return fully_wrapped
return wrapper_wrapper
class LogFuncFilter:
"""Logging filter capping the number of messages emitted from given function
Arguments:
func: The function for which to filter log messages
trunc_msg: The message to emit when logging is truncated, to inform user that
messages will from now on be hidden.
max_lines: Max number of log messages to allow to pass
consectuctive: If try, filter applies to consectutive messages and resets
if a message from a different source is encountered.
Fixme:
The implementation assumes that **func** uses a logger initialized with
``getLogger(__name__)``.
"""
def __init__(self, func, trunc_msg: str = None, max_lines: int = 0,
consecutive: bool = True) -> None:
self.func = func
self.max_lines = max_lines + 1
self.cur_max_lines = max_lines + 1
self.consecutive = consecutive
self.trunc_msg = trunc_msg
def filter(self, record: logging.LogRecord) -> bool:
if record.name == self.func.__module__ and record.funcName == self.func.__name__:
if self.cur_max_lines > 1:
self.cur_max_lines -= 1
return True
if self.cur_max_lines == 1 and self.trunc_msg:
self.cur_max_lines -= 1
record.msg = self.trunc_msg
return True
return False
if self.consecutive:
self.cur_max_lines = self.max_lines
return True
class LoggingSourceRenameFilter:
"""Logging filter for abbreviating module name in logs
Maps ``bioconda_utils`` to ``BIOCONDA`` and for everything else
to just the top level package uppercased.
"""
def filter(self, record: logging.LogRecord) -> bool:
if record.name.startswith("bioconda_utils"):
record.name = "BIOCONDA"
else:
record.name = record.name.split('.')[0].upper()
return True
def setup_logger(name: str = 'bioconda_utils', loglevel: Union[str, int] = logging.INFO,
logfile: str = None, logfile_level: Union[str, int] = logging.DEBUG,
log_command_max_lines = None,
prefix: str = "BIOCONDA ",
msgfmt: str = ("%(asctime)s "
"%(log_color)s%(name)s %(levelname)s%(reset)s "
"%(message)s"),
datefmt: str ="%H:%M:%S") -> logging.Logger:
"""Set up logging for bioconda-utils
Args:
name: Module name for which to get a logger (``__name__``)
loglevel: Log level, can be name or int level
logfile: File to log to as well
logfile_level: Log level for file logging
prefix: Prefix to add to our log messages
msgfmt: Format for messages
datefmt: Format for dates
Returns:
A new logger
"""
new_logger = logging.getLogger(name)
root_logger = logging.getLogger()
if logfile:
if isinstance(logfile_level, str):
logfile_level = getattr(logging, logfile_level.upper())
log_file_handler = logging.FileHandler(logfile)
log_file_handler.setLevel(logfile_level)
log_file_formatter = logging.Formatter(
msgfmt.replace("%(log_color)s", "").replace("%(reset)s", "").format(prefix=prefix),
datefmt=None,
)
log_file_handler.setFormatter(log_file_formatter)
root_logger.addHandler(log_file_handler)
else:
logfile_level = logging.FATAL
if isinstance(loglevel, str):
loglevel = getattr(logging, loglevel.upper())
# Base logger is set to the lowest of console or file logging
root_logger.setLevel(min(loglevel, logfile_level))
# Console logging is passed through TqdmHandler so that the progress bar does not
# get broken by log lines emitted.
log_stream_handler = TqdmHandler()
if loglevel:
log_stream_handler.setLevel(loglevel)
log_stream_handler.setFormatter(ColoredFormatter(
msgfmt.format(prefix=prefix),
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}))
log_stream_handler.addFilter(LoggingSourceRenameFilter())
root_logger.addHandler(log_stream_handler)
# Add filter for `utils.run` to truncate after n lines emitted.
# We do this here rather than in `utils.run` so that it can be configured
# from the CLI more easily
if log_command_max_lines is not None:
log_filter = LogFuncFilter(run, "Command output truncated", log_command_max_lines)
log_stream_handler.addFilter(log_filter)
return new_logger
def ellipsize_recipes(recipes: Collection[str], recipe_folder: str,
n: int = 5, m: int = 50) -> str:
"""Logging helper showing recipe list
Args:
recipes: List of recipes
recipe_folder: Folder name to strip from recipes.
n: Show at most this number of recipes, with "..." if more are found.
m: Don't show anything if more recipes than this
(pointless to show first 5 of 5000)
Returns:
A string like " (htslib, samtools, ...)" or ""
"""
if not recipes or len(recipes) > m:
return ""
if len(recipes) > n:
if not isinstance(recipes, Sequence):
recipes = list(recipes)
recipes = recipes[:n]
append = ", ..."
else:
append = ""
return ' ('+', '.join(recipe.lstrip(recipe_folder).lstrip('/')
for recipe in recipes) + append + ')'
class JinjaSilentUndefined(jinja2.Undefined):
def _fail_with_undefined_error(self, *args, **kwargs):
return ""
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
jinja = Environment(
loader=PackageLoader('bioconda_utils', 'templates'),
trim_blocks=True,
lstrip_blocks=True
)
jinja_silent_undef = Environment(
undefined=JinjaSilentUndefined
)
# Patterns of allowed environment variables that are allowed to be passed to
# conda-build.
ENV_VAR_WHITELIST = [
'PATH',
'LC_*',
'LANG',
'MACOSX_DEPLOYMENT_TARGET',
'HTTPS_PROXY','HTTP_PROXY', 'https_proxy', 'http_proxy',
]
# Of those that make it through the whitelist, remove these specific ones
ENV_VAR_BLACKLIST = [
]
# Of those, also remove these when we're running in a docker container
ENV_VAR_DOCKER_BLACKLIST = [
'PATH',
]
def get_free_space():
"""Return free space in MB on disk"""
s = os.statvfs(os.getcwd())
return s.f_frsize * s.f_bavail / (1024 ** 2)
def allowed_env_var(s, docker=False):
for pattern in ENV_VAR_WHITELIST:
if fnmatch.fnmatch(s, pattern):
for bpattern in ENV_VAR_BLACKLIST:
if fnmatch.fnmatch(s, bpattern):
return False
if docker:
for dpattern in ENV_VAR_DOCKER_BLACKLIST:
if fnmatch.fnmatch(s, dpattern):
return False
return True
def bin_for(name='conda'):
if 'CONDA_ROOT' in os.environ:
return os.path.join(os.environ['CONDA_ROOT'], 'bin', name)
return name
@contextlib.contextmanager
def temp_env(env):
"""
Context manager to temporarily set os.environ.
Used to send values in **env** to processes that only read the os.environ,
for example when filling in meta.yaml with jinja2 template variables.
All values are converted to string before sending to os.environ
"""
env = dict(env)
orig = os.environ.copy()
_env = {k: str(v) for k, v in env.items()}
os.environ.update(_env)
try:
yield
finally:
os.environ.clear()
os.environ.update(orig)
@contextlib.contextmanager
def sandboxed_env(env):
"""
Context manager to temporarily set os.environ, only allowing env vars from
the existing `os.environ` or the provided **env** that match
ENV_VAR_WHITELIST globs.
"""
env = dict(env)
orig = os.environ.copy()
_env = {k: v for k, v in orig.items() if allowed_env_var(k)}
_env.update({k: str(v) for k, v in env.items() if allowed_env_var(k)})
os.environ = _env
try:
yield
finally:
os.environ.clear()
os.environ.update(orig)
def load_all_meta(recipe, config=None, finalize=True):
"""
For each environment, yield the rendered meta.yaml.
Parameters
----------
finalize : bool
If True, do a full conda-build render. Determines exact package builds
of build/host dependencies. It involves costly dependency resolution
via conda and also download of those packages (to inspect possible
run_exports). For fast-running tasks like linting, set to False.
"""
if config is None:
config = load_conda_build_config()
# `bypass_env_check=True` prevents evaluating (=environment solving) the
# package versions used for `pin_compatible` and the like.
# To avoid adding a separate `bypass_env_check` alongside every `finalize`
# parameter, just assume we do not want to bypass if `finalize is True`.
metas = [
meta
for (meta, _, _) in api.render(
recipe,
config=config,
finalize=False,
bypass_env_check=True,
)
]
# Render again if we want the finalized version.
# Rendering the non-finalized version beforehand lets us filter out
# variants that get skipped. (E.g., with a global `numpy 1.16` pin for
# py==27 the env check fails when evaluating `pin_compatible('numpy')` for
# recipes that use a pinned `numpy` and also require `numpy>=1.17` but
# actually skip py==27. Filtering out that variant beforehand avoids this.
if finalize:
metas = [
meta
for non_finalized_meta in metas
for (meta, _, _) in api.render(
recipe,
config=config,
variants=non_finalized_meta.config.variant,
finalize=True,
bypass_env_check=False,
)
]
return metas
def load_meta_fast(recipe: str, env=None):
"""
Given a package name, find the current meta.yaml file, parse it, and return
the dict.
Args:
recipe: Path to recipe (directory containing the meta.yaml file)
env: Optional variables to expand
Returns:
Tuple of original recipe string and rendered dict
"""
if not env:
env = {}
try:
pth = os.path.join(recipe, 'meta.yaml')
template = jinja_silent_undef.from_string(open(pth, 'r', encoding='utf-8').read())
meta = yaml.safe_load(template.render(env))
return (meta, recipe)
except Exception:
raise ValueError('Problem inspecting {0}'.format(recipe))
def load_conda_build_config(platform=None, trim_skip=True):
"""
Load conda build config while considering global pinnings from conda-forge.
"""
config = api.Config(
no_download_source=True,
set_build_id=False)
# get environment root
env_root = PurePath(shutil.which("bioconda-utils")).parents[1]
# set path to pinnings from conda forge package
config.exclusive_config_files = [
os.path.join(env_root, "conda_build_config.yaml"),
os.path.join(
os.path.dirname(__file__),
'bioconda_utils-conda_build_config.yaml'),
]
for cfg in chain(config.exclusive_config_files, config.variant_config_files or []):
assert os.path.exists(cfg), ('error: {0} does not exist'.format(cfg))
if platform:
config.platform = platform
config.trim_skip = trim_skip
return config
CondaBuildConfigFile = namedtuple('CondaBuildConfigFile', (
'arg', # either '-e' or '-m'
'path',
))
def get_conda_build_config_files(config=None):
if config is None:
config = load_conda_build_config()
# TODO: open PR upstream for conda-build to support multiple exclusive_config_files
for file_path in (config.exclusive_config_files or []):
yield CondaBuildConfigFile('-e', file_path)
for file_path in (config.variant_config_files or []):
yield CondaBuildConfigFile('-m', file_path)
def load_first_metadata(recipe, config=None, finalize=True):
"""
Returns just the first of possibly many metadata files. Used for when you
need to do things like check a package name or version number (which are
not expected to change between variants).
If the recipe will be skipped, then returns None
Parameters
----------
finalize : bool
If True, do a full conda-build render. Determines exact package builds
of build/host dependencies. It involves costly dependency resolution
via conda and also download of those packages (to inspect possible
run_exports). For fast-running tasks like linting, set to False.
"""
metas = load_all_meta(recipe, config, finalize=finalize)
if len(metas) > 0:
return metas[0]
@contextlib.contextmanager
def temp_os(platform):
"""
Context manager to temporarily set sys.platform.
"""
original = sys.platform
sys.platform = platform
try:
yield
finally:
sys.platform = original
def run(cmds: List[str], env: Dict[str, str]=None, mask: List[str]=None, live: bool=True,
mylogger: logging.Logger=logger, loglevel: int=logging.INFO,
**kwargs: Dict[Any, Any]) -> sp.CompletedProcess:
"""
Run a command (with logging, masking, etc)
- Explicitly decodes stdout to avoid UnicodeDecodeErrors that can occur when
using the ``universal_newlines=True`` argument in the standard
subprocess.run.
- Masks secrets
- Passed live output to `logging`
Arguments:
cmd: List of command and arguments
env: Optional environment for command
mask: List of terms to mask (secrets)
live: Whether output should be sent to log
kwargs: Additional arguments to `subprocess.Popen`
Returns:
CompletedProcess object
Raises:
subprocess.CalledProcessError if the process failed
FileNotFoundError if the command could not be found
"""
logq = queue.Queue()
def pushqueue(out, pipe):
"""Reads from a pipe and pushes into a queue, pushing "None" to
indicate closed pipe"""
for line in iter(pipe.readline, b''):
out.put((pipe, line))
out.put(None) # End-of-data-token
def do_mask(arg: str) -> str:
"""Masks secrets in **arg**"""
if mask is None:
# caller has not considered masking, hide the entire command
# for security reasons
return '<hidden>'
if mask is False:
# masking has been deactivated
return arg
for mitem in mask:
arg = arg.replace(mitem, '<hidden>')
return arg
mylogger.log(loglevel, "(COMMAND) %s", ' '.join(do_mask(arg) for arg in cmds))
# bufsize=4 result of manual experimentation. Changing it can
# drop performance drastically.
with sp.Popen(cmds, stdout=sp.PIPE, stderr=sp.PIPE,
close_fds=True, env=env, bufsize=4, **kwargs) as proc:
# Start threads reading stdout/stderr and pushing it into queue q
out_thread = Thread(target=pushqueue, args=(logq, proc.stdout))
err_thread = Thread(target=pushqueue, args=(logq, proc.stderr))
out_thread.daemon = True # Do not wait for these threads to terminate
err_thread.daemon = True
out_thread.start()
err_thread.start()
output_lines = []
try:
for _ in range(2): # Run until we've got both `None` tokens
for pipe, line in iter(logq.get, None):
line = do_mask(line.decode(errors='replace').rstrip())
output_lines.append(line)
if live:
if pipe == proc.stdout:
prefix = "OUT"
else:
prefix = "ERR"
mylogger.log(loglevel, "(%s) %s", prefix, line)
except Exception:
proc.kill()
proc.wait()
raise
output = "\n".join(output_lines)
if isinstance(cmds, str):
masked_cmds = do_mask(cmds)
else:
masked_cmds = [do_mask(c) for c in cmds]
if proc.poll() is None:
mylogger.log(loglevel, 'Command closed STDOUT/STDERR but is still running')
waitfor = 30
waittimes = 5
for attempt in range(waittimes):
mylogger.log(loglevel, "Waiting %s seconds (%i/%i)", waitfor, attempt+1, waittimes)
try:
proc.wait(timeout=waitfor)
break;
except sp.TimeoutExpired:
pass
else:
mylogger.log(loglevel, "Terminating process")
proc.kill()
proc.wait()
returncode = proc.poll()
if returncode:
logger.error('COMMAND FAILED (exited with %s): %s', returncode, ' '.join(masked_cmds))
if not live:
logger.error('STDOUT+STDERR:\n%s', output)
raise sp.CalledProcessError(returncode, masked_cmds, output=output)
return sp.CompletedProcess(returncode, masked_cmds, output)
def envstr(env):
env = dict(env)
return ';'.join(['='.join([i, str(j)]) for i, j in sorted(env.items())])
def flatten_dict(dict):
for key, values in dict.items():
if isinstance(values, str) or not isinstance(values, Iterable):
values = [values]
yield [(key, value) for value in values]
class EnvMatrix:
"""
Intended to be initialized with a YAML file and iterated over to yield all
combinations of environments.
YAML file has the following format::
CONDA_PY:
- "2.7"
- "3.5"
CONDA_BOOST: "1.60"
CONDA_PERL: "5.22.0"
CONDA_NPY: "110"
CONDA_NCURSES: "5.9"
CONDA_GSL: "1.16"
"""
def __init__(self, env):
"""
Parameters
----------
env : str or dict
If str, assume it's a path to a YAML-format filename and load it
into a dict. If a dict is provided, use it directly.
"""
if isinstance(env, str):
with open(env) as f:
self.env = yaml.safe_load(f)
else:
self.env = env
for key, val in self.env.items():
if key != "CONDA_PY" and not isinstance(val, str):
raise ValueError(
"All versions except CONDA_PY must be strings.")
def __iter__(self):
"""
Given the YAML::
CONDA_PY:
- "2.7"
- "3.5"
CONDA_BOOST: "1.60"
CONDA_NPY: "110"
We get the following sets of env vars::
[('CONDA_BOOST', '1.60'), ('CONDA_PY', '2.7'), ('CONDA_NPY', '110')]
[('CONDA_BOOST', '1.60'), ('CONDA_PY', '3.5'), ('CONDA_NPY', '110')]
A copy of the entire os.environ dict is updated and yielded for each of
these sets.
"""
for env in product(*flatten_dict(self.env)):
yield env
def get_deps(recipe=None, build=True):
"""
Generator of dependencies for a single recipe
Only names (not versions) of dependencies are yielded.
If the variant/version matrix yields multiple instances of the metadata,
the union of these dependencies is returned.
Parameters
----------
recipe : str or MetaData
If string, it is a path to the recipe; otherwise assume it is a parsed
conda_build.metadata.MetaData instance.
build : bool
If True yield build dependencies, if False yield run dependencies.
"""
if recipe is not None:
assert isinstance(recipe, str)
metadata = load_all_meta(recipe, finalize=False)
elif meta is not None:
metadata = [meta]
else:
raise ValueError("Either meta or recipe has to be specified.")
all_deps = set()
for meta in metadata:
if build:
deps = meta.get_value('requirements/build', [])
else:
deps = meta.get_value('requirements/run', [])
all_deps.update(dep.split()[0] for dep in deps)
return all_deps
_max_threads = 1
def set_max_threads(n):
global _max_threads
_max_threads = n
def threads_to_use():
"""Returns the number of cores we are allowed to run on"""
if hasattr(os, 'sched_getaffinity'):
cores = len(os.sched_getaffinity(0))
else:
cores = os.cpu_count()
return min(_max_threads, cores)
def parallel_iter(func, items, desc, *args, **kwargs):
pfunc = partial(func, *args, **kwargs)
with Pool(threads_to_use()) as pool:
yield from tqdm(
pool.imap_unordered(pfunc, items),
desc=desc,
total=len(items)
)
def get_recipes(recipe_folder, package="*", exclude=None):
"""
Generator of recipes.
Finds (possibly nested) directories containing a ``meta.yaml`` file.
Parameters
----------
recipe_folder : str
Top-level dir of the recipes
package : str or iterable
Pattern or patterns to restrict the results.
"""
if isinstance(package, str):
package = [package]
if isinstance(exclude, str):
exclude = [exclude]
if exclude is None:
exclude = []
for p in package:
logger.debug("get_recipes(%s, package='%s'): %s",
recipe_folder, package, p)
path = os.path.join(recipe_folder, p)
for new_dir in glob.glob(path):
meta_yaml_found_or_excluded = False
for dir_path, dir_names, file_names in os.walk(new_dir):
if any(fnmatch.fnmatch(dir_path[len(recipe_folder):], pat) for pat in exclude):
meta_yaml_found_or_excluded = True
continue
if "meta.yaml" in file_names:
meta_yaml_found_or_excluded = True
yield dir_path
if not meta_yaml_found_or_excluded and os.path.isdir(new_dir):
logger.warn(
"No meta.yaml found in %s."
" If you want to ignore this directory, add it to the blacklist.",
new_dir
)
yield new_dir
def get_latest_recipes(recipe_folder, config, package="*"):
"""
Generator of recipes.
Finds (possibly nested) directories containing a ``meta.yaml`` file and returns
the latest version of each recipe.
Parameters
----------
recipe_folder : str
Top-level dir of the recipes
config : dict or filename
package : str or iterable
Pattern or patterns to restrict the results.
"""
def toplevel(x):
return x.replace(
recipe_folder, '').strip(os.path.sep).split(os.path.sep)[0]
config = load_config(config)
recipes = sorted(get_recipes(recipe_folder, package), key=toplevel)
for package, group in groupby(recipes, key=toplevel):
group = list(group)
if len(group) == 1:
yield group[0]
else:
def get_version(p):
meta_path = os.path.join(p, 'meta.yaml')
meta = load_first_metadata(meta_path, finalize=False)
version = meta.get_value('package/version')
return VersionOrder(version)
sorted_versions = sorted(group, key=get_version)
if sorted_versions:
yield sorted_versions[-1]
class DivergentBuildsError(Exception):
pass
def _string_or_float_to_integer_python(s):
"""
conda-build 2.0.4 expects CONDA_PY values to be integers (e.g., 27, 35) but
older versions were OK with strings or even floats.
To avoid editing existing config files, we support those values here.
"""
try:
s = float(s)
if s < 10: # it'll be a looong time before we hit Python 10.0
s = int(s * 10)
else:
s = int(s)
except ValueError:
raise ValueError("{} is an unrecognized Python version".format(s))
return s
def built_package_paths(recipe):
"""
Returns the path to which a recipe would be built.
Does not necessarily exist; equivalent to ``conda build --output recipename``
but without the subprocess.
"""
config = load_conda_build_config()
# NB: Setting bypass_env_check disables ``pin_compatible`` parsing, which
# these days does not change the package build string, so should be fine.
paths = api.get_output_file_paths(recipe, config=config, bypass_env_check=True)
return paths
def last_commit_to_master():
"""
Identifies the day of the last commit to master branch.
"""
if not shutil.which('git'):
raise ValueError("git not found")
p = sp.run(
'git log master --date=iso | grep "^Date:" | head -n1',
shell=True, stdout=sp.PIPE, check=True
)
date = datetime.datetime.strptime(
p.stdout[:-1].decode().split()[1],
'%Y-%m-%d')
return date
def file_from_commit(commit, filename):
"""
Returns the contents of a file at a particular commit as a string.
Parameters
----------
commit : commit-like string
filename : str
"""
if commit == 'HEAD':
return open(filename).read()
p = run(['git', 'show', '{0}:{1}'.format(commit, filename)], mask=False,
loglevel=0)
return str(p.stdout)
def newly_unblacklisted(config_file, recipe_folder, git_range):
"""
Returns the set of recipes that were blacklisted in master branch but have
since been removed from the blacklist. Considers the contents of all
blacklists in the current config file and all blacklists in the same config
file in master branch.
Parameters
----------
config_file : str
Needs filename (and not dict) because we check what the contents of the
config file were in the master branch.
recipe_folder : str
Path to recipe dir, needed by get_blacklist
git_range : str or list
If str or single-item list. If ``'HEAD'`` or ``['HEAD']`` or ``['master',
'HEAD']``, compares the current changes to master. If other commits are
specified, then use those commits directly via ``git show``.
"""
# 'HEAD' becomes ['HEAD'] and then ['master', 'HEAD'].
# ['HEAD'] becomes ['master', 'HEAD']
# ['HEAD~~', 'HEAD'] stays the same
if isinstance(git_range, str):
git_range = [git_range]
if len(git_range) == 1:
git_range = ['master', git_range[0]]
# Get the set of previously blacklisted recipes by reading the original
# config file and then all the original blacklists it had listed
previous = set()
orig_config = file_from_commit(git_range[0], config_file)
for bl in yaml.safe_load(orig_config)['blacklists']:
with open('.tmp.blacklist', 'w', encoding='utf8') as fout:
fout.write(file_from_commit(git_range[0], bl))
previous.update(get_blacklist({'blacklists': '.tmp.blacklist'}, recipe_folder))
os.unlink('.tmp.blacklist')
current = get_blacklist(
yaml.safe_load(file_from_commit(git_range[1], config_file)),
recipe_folder)
results = previous.difference(current)
logger.info('Recipes newly unblacklisted:\n%s', '\n'.join(list(results)))
return results
def changed_since_master(recipe_folder):
"""
Return filenames changed since master branch.
Note that this uses ``origin``, so if you are working on a fork of the main
repo and have added the main repo as ``upstream``, then you'll have to do
a ``git checkout master && git pull upstream master`` to update your fork.
"""
p = run(['git', 'fetch', 'origin', 'master'], mask=False, loglevel=0)
p = run(['git', 'diff', 'FETCH_HEAD', '--name-only'], mask=False, loglevel=0)
return [
os.path.dirname(os.path.relpath(i, recipe_folder))
for i in p.stdout.splitlines(False)
]
def _load_platform_metas(recipe, finalize=True):
# check if package is noarch, if so, build only on linux
# with temp_os, we can fool the MetaData if needed.
platform = os.environ.get('OSTYPE', sys.platform)
if platform.startswith("darwin"):
platform = 'osx'
elif platform == "linux-gnu":
platform = "linux"
config = load_conda_build_config(platform=platform)
return platform, load_all_meta(recipe, config=config, finalize=finalize)
def _meta_subdir(meta):
# logic extracted from conda_build.variants.bldpkg_path
return 'noarch' if meta.noarch or meta.noarch_python else meta.config.host_subdir
def check_recipe_skippable(recipe, check_channels):
"""
Return True if the same number of builds (per subdir) defined by the recipe
are already in channel_packages.
"""
platform, metas = _load_platform_metas(recipe, finalize=False)
# The recipe likely defined skip: True
if not metas:
return True
# If on CI, handle noarch.
if os.environ.get('CI', None) == 'true':
first_meta = metas[0]
if first_meta.get_value('build/noarch'):
if platform != 'linux':
logger.debug('FILTER: only building %s on '
'linux because it defines noarch.',
recipe)
return True
packages = set(
(meta.name(), meta.version(), int(meta.build_number() or 0))
for meta in metas
)
r = RepoData()
num_existing_pkg_builds = Counter(
(name, version, build_number, subdir)
for name, version, build_number in packages
for subdir in r.get_package_data("subdir", name=name, version=version,
build_number=build_number,
channels=check_channels, native=True)
)
if num_existing_pkg_builds == Counter():
# No packages with same version + build num in channels: no need to skip
return False
num_new_pkg_builds = Counter(
(meta.name(), meta.version(), int(meta.build_number() or 0), _meta_subdir(meta))
for meta in metas
)
if num_new_pkg_builds == num_existing_pkg_builds:
logger.info(
'FILTER: not building recipe %s because '
'the same number of builds are in channel(s) and it is not forced.',
recipe)
return True
return False
def _filter_existing_packages(metas, check_channels):
new_metas = [] # MetaData instances of packages not yet in channel
existing_metas = [] # MetaData instances of packages already in channel
divergent_builds = set() # set of Dist (i.e., name-version-build) strings
key_build_meta = defaultdict(dict)
for meta in metas:
pkg_key = (meta.name(), meta.version(), int(meta.build_number() or 0))
pkg_build = (_meta_subdir(meta), meta.build_id())
key_build_meta[pkg_key][pkg_build] = meta
r = RepoData()
for pkg_key, build_meta in key_build_meta.items():
existing_pkg_builds = set(r.get_package_data(['subdir', 'build'],
name=pkg_key[0],
version=pkg_key[1],
build_number=pkg_key[2],
channels=check_channels,
native=True))
for pkg_build, meta in build_meta.items():
if pkg_build not in existing_pkg_builds:
new_metas.append(meta)
else:
existing_metas.append(meta)
for divergent_build in (existing_pkg_builds - set(build_meta.keys())):
divergent_builds.add(
'-'.join((pkg_key[0], pkg_key[1], divergent_build[1])))
return new_metas, existing_metas, divergent_builds
def get_package_paths(recipe, check_channels, force=False):
if not force:
if check_recipe_skippable(recipe, check_channels):
# NB: If we skip early here, we don't detect possible divergent builds.
return []
platform, metas = _load_platform_metas(recipe, finalize=True)
# The recipe likely defined skip: True
if not metas:
return []
new_metas, existing_metas, divergent_builds = (
_filter_existing_packages(metas, check_channels))
if divergent_builds:
raise DivergentBuildsError(*sorted(divergent_builds))
for meta in existing_metas:
logger.info(
'FILTER: not building %s because '
'it is in channel(s) and it is not forced.', meta.pkg_fn())
# yield all pkgs that do not yet exist
if force:
build_metas = new_metas + existing_metas
else:
build_metas = new_metas
return list(chain.from_iterable(
api.get_output_file_paths(meta) for meta in build_metas))
def get_blacklist(config: Dict[str, Any], recipe_folder: str) -> set:
"Return list of recipes to skip from blacklists"
blacklist = set()
for p in config.get('blacklists', []):
blacklist.update(
[
os.path.relpath(i.strip(), recipe_folder)
for i in open(p, encoding='utf8')
if not i.startswith('#') and i.strip()
]
)
return blacklist
def validate_config(config):
"""
Validate config against schema
Parameters
----------
config : str or dict
If str, assume it's a path to YAML file and load it. If dict, use it
directly.
"""
if not isinstance(config, dict):
config = yaml.safe_load(open(config))
fn = pkg_resources.resource_filename(
'bioconda_utils', 'config.schema.yaml'
)
schema = yaml.safe_load(open(fn))
validate(config, schema)
def load_config(path):
"""
Parses config file, building paths to relevant blacklists
Parameters
----------
path : str
Path to YAML config file
"""
validate_config(path)
if isinstance(path, dict):
def relpath(p):
return p
config = path
else:
def relpath(p):
return os.path.join(os.path.dirname(path), p)
config = yaml.safe_load(open(path))
def get_list(key):
# always return empty list, also if NoneType is defined in yaml
value = config.get(key)
if value is None:
return []
return value
default_config = {
'blacklists': [],
'channels': ['conda-forge', 'bioconda', 'defaults'],
'requirements': None,
'upload_channel': 'bioconda'
}
if 'blacklists' in config:
config['blacklists'] = [relpath(p) for p in get_list('blacklists')]
if 'channels' in config:
config['channels'] = get_list('channels')
default_config.update(config)
# register config object in RepoData
RepoData.register_config(default_config)
return default_config
class BiocondaUtilsWarning(UserWarning):
pass
class Progress:
def __init__(self):
self.thread = Thread(target=self.progress)
self.stop = Event()
def progress(self):
while not self.stop.wait(60):
print(".", end="")
sys.stdout.flush()
print("")
def __enter__(self):
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop.set()
self.thread.join()
class AsyncRequests:
"""Download a bunch of files in parallel
This is not really a class, more a name space encapsulating a bunch of calls.
"""
#: Identify ourselves
USER_AGENT = "bioconda/bioconda-utils"
#: Max connections to each server
CONNECTIONS_PER_HOST = 4
@classmethod
def fetch(cls, urls, descs, cb, datas):
"""Fetch data from URLs.
This will use asyncio to manage a pool of connections at once, speeding
up download as compared to iterative use of ``requests`` significantly.
It will also retry on non-permanent HTTP error codes (i.e. 429, 502,
503 and 504).
Args:
urls: List of URLS
descs: Matching list of descriptions (for progress display)
cb: As each download is completed, data is passed through this function.
Use to e.g. offload json parsing into download loop.
"""
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if loop.is_running():
logger.warning("Running AsyncRequests.fetch from within running loop")
# Workaround the fact that asyncio's loop is marked as not-reentrant
# (it is apparently easy to patch, but not desired by the devs,
with ThreadPool(1) as pool:
res = pool.apply(cls.fetch, (urls, descs, cb, datas))
return res
task = asyncio.ensure_future(cls.async_fetch(urls, descs, cb, datas))
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
task.cancel()
loop.run_forever()
task.exception()
return task.result()
@classmethod
async def async_fetch(cls, urls, descs=None, cb=None, datas=None, fds=None):
if descs is None:
descs = []
if datas is None:
datas = []
if fds is None:
fds = []
conn = aiohttp.TCPConnector(limit_per_host=cls.CONNECTIONS_PER_HOST)
async with aiohttp.ClientSession(
connector=conn,
headers={'User-Agent': cls.USER_AGENT}
) as session:
coros = [
asyncio.ensure_future(cls._async_fetch_one(session, url, desc, cb, data, fd))
for url, desc, data, fd in zip_longest(urls, descs, datas, fds)
]
with tqdm(asyncio.as_completed(coros),
total=len(coros),
desc="Downloading", unit="files") as t:
result = [await coro for coro in t]
return result
@staticmethod
@backoff.on_exception(backoff.fibo, aiohttp.ClientResponseError, max_tries=20,
giveup=lambda ex: ex.code not in [429, 502, 503, 504])
async def _async_fetch_one(session, url, desc, cb=None, data=None, fd=None):
result = []
async with session.get(url, timeout=None) as resp:
resp.raise_for_status()
size = int(resp.headers.get("Content-Length", 0))
with tqdm(total=size, unit='B', unit_scale=True, unit_divisor=1024,
desc=desc, miniters=1,
disable=logger.getEffectiveLevel() > logging.INFO
) as progress:
while True:
block = await resp.content.read(1024*16)
if not block:
break
progress.update(len(block))
if fd:
fd.write(block)
else:
result.append(block)
if cb:
return cb(b"".join(result), data)
else:
return b"".join(result)
class RepoData:
"""Singleton providing access to package directory on anaconda cloud
If the first call provides a filename as **cache** argument, the
file is used to cache the directory in CSV format.
Data structure:
Each **channel** hosted at anaconda cloud comprises a number of
**subdirs** in which the individual package files reside. The
**subdirs** can be one of **noarch**, **osx-64** and **linux-64**
for Bioconda. (Technically ``(noarch|(linux|osx|win)-(64|32))``
appears to be the schema).
For **channel/subdir** (aka **channel/platform**) combination, a
**repodata.json** contains a **package** key describing each
package file with at least the following information:
name: Package name (lowercase, alphanumeric + dash)
version: Version (no dash, PEP440)
build_number: Non negative integer indicating packaging revisions
build: String comprising hash of pinned dependencies and build
number. Used to distinguish different builds of the same
package/version combination.
depends: Runtime requirements for package as list of strings. We
do not currently load this.
arch: Architecture key (x86_64). Not used by conda and not loaded
here.
platform: Platform of package (osx, linux, noarch). Optional
upstream, not used by conda. We generate this from the subdir
information to have it available.
Repodata versions:
The version is indicated by the key **repodata_version**, with
absence of that key indication version 0.
In version 0, the **info** key contains the **subdir**,
**platform**, **arch**, **default_python_version** and
**default_numpy_version** keys. In version 1 it only contains the
**subdir** key.
In version 1, a key **removed** was added, listing packages
removed from the repository.
"""
REPODATA_URL = 'https://conda.anaconda.org/{channel}/{subdir}/repodata.json'
REPODATA_LABELED_URL = 'https://conda.anaconda.org/{channel}/label/{label}/{subdir}/repodata.json'
REPODATA_DEFAULTS_URL = 'https://repo.anaconda.com/pkgs/main/{subdir}/repodata.json'
_load_columns = ['build', 'build_number', 'name', 'version', 'depends']
#: Columns available in internal dataframe
columns = _load_columns + ['channel', 'subdir', 'platform']
#: Platforms loaded
platforms = ['linux', 'osx', 'noarch']
# config object
config = None
cache_file = None
_df = None
_df_ts = None
#: default lifetime for repodata cache
cache_timeout = 60*60*8
@classmethod
def register_config(cls, config):
cls.config = config
__instance = None
def __new__(cls):
"""Makes RepoData a singleton"""
if RepoData.__instance is None:
assert RepoData.config is not None, ("bug: ensure to load config "
"before instantiating RepoData.")
RepoData.__instance = object.__new__(cls)
return RepoData.__instance
def set_cache(self, cache):
if self._df is not None:
warnings.warn("RepoData cache set after first use", BiocondaUtilsWarning)
else:
self.cache_file = cache
def set_timeout(self, timeout):
"""Set the timeout after which the repodata should be reloaded"""
self.cache_timeout = timeout
@property
def channels(self):
"""Return channels to load."""
return self.config["channels"]
@property
def df(self):
"""Internal Pandas DataFrame object
Try not to use this ... the point of this class is to be able to
change the structure in which the data is held.
"""
if self._df_ts is not None:
seconds = (datetime.datetime.now() - self._df_ts).seconds
else:
seconds = 0
if self._df is None or seconds > self.cache_timeout:
self._df = self._load_channel_dataframe_cached()
self._df_ts = datetime.datetime.now()
return self._df
def _make_repodata_url(self, channel, platform):
if channel == "defaults":
# caveat: this only gets defaults main, not 'free', 'r' or 'pro'
url_template = self.REPODATA_DEFAULTS_URL
else:
url_template = self.REPODATA_URL
url = url_template.format(channel=channel,
subdir=self.platform2subdir(platform))
return url
def _load_channel_dataframe_cached(self):
if self.cache_file is not None and os.path.exists(self.cache_file):
ts = datetime.datetime.fromtimestamp(os.path.getmtime(self.cache_file))
seconds = (datetime.datetime.now() - ts).seconds
if seconds <= self.cache_timeout:
logger.info("Loading repodata from cache %s", self.cache_file)
return pd.read_pickle(self.cache_file)
else:
logger.info("Repodata cache file too old. Reloading")
res = self._load_channel_dataframe()
if self.cache_file is not None:
res.to_pickle(self.cache_file)
return res
def _load_channel_dataframe(self):
repos = list(product(self.channels, self.platforms))
urls = [self._make_repodata_url(c, p) for c, p in repos]
descs = ["{}/{}".format(c, p) for c, p in repos]
def to_dataframe(json_data, meta_data):
channel, platform = meta_data
repo = json.loads(json_data)
df = pd.DataFrame.from_dict(repo['packages'], 'index',
columns=self._load_columns)
# Ensure that version is always a string.
df['version'] = df['version'].astype(str)
df['channel'] = channel
df['platform'] = platform
df['subdir'] = repo['info']['subdir']
return df
if urls:
dfs = AsyncRequests.fetch(urls, descs, to_dataframe, repos)
res = pd.concat(dfs)
else:
res = pd.DataFrame(columns=self.columns)
for col in ('channel', 'platform', 'subdir', 'name', 'version', 'build'):
res[col] = res[col].astype('category')
res = res.reset_index(drop=True)
return res
@staticmethod
def native_platform():
if sys.platform.startswith("linux"):
return "linux"
if sys.platform.startswith("darwin"):
return "osx"
raise ValueError("Running on unsupported platform")
@staticmethod
def platform2subdir(platform):
if platform == 'linux':
return 'linux-64'
elif platform == 'osx':
return 'osx-64'
elif platform == 'noarch':
return 'noarch'
else:
raise ValueError(
'Unsupported platform: bioconda only supports linux, osx and noarch.')
def get_versions(self, name):
"""Get versions available for package
Args:
name: package name
Returns:
Dictionary mapping version numbers to list of architectures
e.g. {'0.1': ['linux'], '0.2': ['linux', 'osx'], '0.3': ['noarch']}
"""
# called from doc generator
packages = self.df[self.df.name == name][['version', 'platform']]
versions = packages.groupby('version').agg(lambda x: list(set(x)))
return versions['platform'].to_dict()
def get_latest_versions(self, channel):
"""Get the latest version for each package in **channel**"""
# called from pypi module
packages = self.df[self.df.channel == channel]['version']
def max_vers(x):
return max(VersionOrder(v) for v in x)
vers = packages.groupby('name').agg(max_vers)
def get_package_data(self, key=None, channels=None, name=None, version=None,
build_number=None, platform=None, build=None, native=False):
"""Get **key** for each package in **channels**
If **key** is not give, returns bool whether there are matches.
If **key** is a string, returns list of strings.
If **key** is a list of string, returns tuple iterator.
"""
if native:
platform = ['noarch', self.native_platform()]
if version is not None:
version = str(version)
df = self.df
# We iteratively drill down here, starting with the (probably)
# most specific columns. Filtering this way on a large data frame
# is much faster than executing the comparisons for all values
# every time, in particular if we are looking at a specific package.
for col, val in (
('name', name), # thousands of different values
('build', build), # build string should vary a lot
('version', version), # still pretty good variety
('channel', channels), # 3 values
('platform', platform), # 3 values
('build_number', build_number), # most values 0
):
if val is None:
continue
if isinstance(val, list) or isinstance(val, tuple):
df = df[df[col].isin(val)]
else:
df = df[df[col] == val]
if key is None:
return not df.empty
if isinstance(key, str):
return list(df[key])
return df[key].itertuples(index=False)
| bioconda/bioconda-utils | bioconda_utils/utils.py | Python | mit | 53,574 | [
"Bioconda"
] | 505ba714ff440325ab80e9e7fae8077003134f42ca609de157d44f789d956c26 |
""" The POOL XML Slice class provides a simple plugin module to create
an XML file for applications to translate LFNs to TURLs. The input
dictionary has LFNs as keys with all associated metadata as key,
value pairs.
"""
from DIRAC.Resources.Catalog.PoolXMLCatalog import PoolXMLCatalog
from DIRAC import S_OK, S_ERROR, gLogger
import os, types
__RCSID__ = "$Id$"
COMPONENT_NAME = 'PoolXMLSlice'
class PoolXMLSlice( object ):
#############################################################################
def __init__( self, catalogName ):
""" Standard constructor
"""
self.fileName = catalogName
self.name = COMPONENT_NAME
self.log = gLogger.getSubLogger( self.name )
#############################################################################
def execute( self, dataDict ):
""" Given a dictionary of resolved input data, this will creates a POOL XML slice.
"""
poolXMLCatName = self.fileName
try:
poolXMLCat = PoolXMLCatalog()
self.log.verbose( 'Creating POOL XML slice' )
for lfn, mdataList in dataDict.items():
# lfn,pfn,se,guid tuple taken by POOL XML Catalogue
if type( mdataList ) != types.ListType:
mdataList = [mdataList]
# As a file may have several replicas, set first the file, then the replicas
poolXMLCat.addFile( ( lfn, None, None, mdataList[0]['guid'], None ) )
for mdata in mdataList:
path = ''
if 'path' in mdata:
path = mdata['path']
elif os.path.exists( os.path.basename( mdata['pfn'] ) ):
path = os.path.abspath( os.path.basename( mdata['pfn'] ) )
else:
path = mdata['turl']
poolXMLCat.addReplica( ( lfn, path, mdata['se'], False ) )
xmlSlice = poolXMLCat.toXML()
self.log.verbose( 'POOL XML Slice is: ' )
self.log.verbose( xmlSlice )
with open( poolXMLCatName, 'w' ) as poolSlice:
poolSlice.write( xmlSlice )
self.log.info( 'POOL XML Catalogue slice written to %s' % ( poolXMLCatName ) )
try:
# Temporary solution to the problem of storing the SE in the Pool XML slice
poolSlice_temp = open( '%s.temp' % ( poolXMLCatName ), 'w' )
xmlSlice = poolXMLCat.toXML( True )
poolSlice_temp.write( xmlSlice )
poolSlice_temp.close()
except Exception as x:
self.log.warn( 'Attempted to write catalog also to %s.temp but this failed' % ( poolXMLCatName ) )
except Exception as x:
self.log.error( str( x ) )
return S_ERROR( 'Exception during construction of POOL XML slice' )
return S_OK( 'POOL XML Slice created' )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Andrew-McNab-UK/DIRAC | WorkloadManagementSystem/Client/PoolXMLSlice.py | Python | gpl-3.0 | 2,823 | [
"DIRAC"
] | 4f961affb093e84e8ac2677ef3c177161906d6fbd64fc459dce26e41ded7e4a7 |
# Filename : Image2Map.py
# Authors : Georg Muntingh and Bjorn Lindeijer
# Version : 1.2
# Date : June 16, 2010
# Copyright : Public Domain
from PIL import Image
import os, sys, networkx
class TileMap:
""" This class represents a map of tiles.
"""
def __init__(self, file, tileX, tileY):
# For initialization, map image with filename file should be specified, together with the
# tile size (tile X, tileY). First we set the tile sizes.
self.TileX, self.TileY = tileX, tileY
# Open the map and find its attributes.
print "Opening the map image file: " + file
self.MapImage = Image.open(file)
self.MapImageWidth, self.MapImageHeight = self.MapImage.size
self.Width, self.Height = self.MapImageWidth / self.TileX, self.MapImageHeight / self.TileY
# Store the unique tiles in a list and a hash, and the map in a list.
self.MapList, self.TileList, self.TileDict = self.parseMap()
# Create a graph that contains the information that is relevant for the article.
self.graphFromList()
# We create a dictionary self.FullTileMap whose keys will be the coordinates
# for the image of unique tiles, and the values are the unique tile numbers.
self.FullTileMap = {}
# Extract maximal components from G into the dictionary TileMap, and combine them
# into self.FullTileMap using a method that places them as close to each other as
# possible.
while self.G.nodes() != []:
v = self.G.nodes()[0]
TileMap, K = self.growTileMap({(0, 0): v}, self.G, 0, 0, v)
self.FullTileMap = self.composeDictionaries(self.FullTileMap, TileMap)
self.G.remove_nodes_from(TileMap.values())
# Create an image file from our map of unique tiles.
self.TileImage = self.getTileImage()
def parseMap(self):
""" This function takes the map image, and obtains
* a list TList of unique tiles.
* a hash TDict of unique tiles.
* a double list self.MapList of where an entry equals i if
self.TileList[i] is the corresponding picture on the map image.
"""
MList = [[-1 for i in range(self.Width)] for j in range(self.Height)] # TODO: Make this a single list
TList = []
TDict = {}
progress = -1
print "Parsing the Map: "
# Jump through the map image in 8 x 8-tile steps. In each step:
# * If the string of the tile is in the dictionary, place its value in map list MList[y][x].
# * Otherwise, add this tile to the list, and add its string to the dictionary with value "the
# number of elements in the list". Also place this value in MList[y][x].
for y in range(self.Height):
for x in range(self.Width):
box = self.TileX * x, self.TileY * y, self.TileX * (x+1), self.TileY * (y+1)
tile = self.MapImage.crop(box)
s = tile.tobytes()
if TDict.has_key(s):
MList[y][x] = TDict[s]
else:
TList.append(tile)
TDict[s] = len(TList)
MList[y][x] = len(TList)
# Calculate the progress, and print it to the screen.
p = ((x + y * self.Width) * 100) / (self.Width * self.Height)
if progress != p:
progress = p
self.printProgress(progress)
self.printProgress(100)
print "Done!"
return MList, TList, TDict
def printProgress(self, percentage):
""" This function prints the percentage on the current row after erasing what is already there.
"""
print '%s\r' % ' '*20, # clean up row
print '%3d%% ' % percentage, # ending with comma prevents newline from being appended
sys.stdout.flush()
def getTileImage(self):
""" This function takes the hash of unique tiles self.FullTileMap and
creates a tileset image from it.
"""
H = self.FullTileMap
Xmin = min([ h[1] for h in H.keys() ])
Xmax = max([ h[1] for h in H.keys() ])
Ymin = min([ h[0] for h in H.keys() ])
Ymax = max([ h[0] for h in H.keys() ])
TileImage = Image.new("RGB", (self.TileX * (Xmax - Xmin + 1), self.TileY * (Ymax - Ymin + 1) ) )
for i in range(Ymin, Ymax + 1):
for j in range(Xmin, Xmax + 1):
if (i,j) in H:
box = ( self.TileX * (j - Xmin) , self.TileY * (i - Ymin), \
self.TileX * (j - Xmin + 1), self.TileY * (i - Ymin + 1) )
TileImage.paste(self.TileList[H[(i,j)] - 1].convert("RGB"), box)
return TileImage
def printHash(self, H):
""" This function nicely aligns dictionaries with elements of the form
"(y, x): n" in a table, in which row y, column x has entry n.
In this specific case (x, y) will be the tile coordinates at which
tile n will be placed in the tile image.
"""
Xmin = min([ h[1] for h in H.keys() ])
Xmax = max([ h[1] for h in H.keys() ])
Ymin = min([ h[0] for h in H.keys() ])
Ymax = max([ h[0] for h in H.keys() ])
# Find the number of symbols we need to write down the tile numbers.
D = len(str(max(H.values())))
st = ""
for i in range(Ymin, Ymax + 1):
for j in range(Xmin, Xmax + 1):
if not (i,j) in H:
st = st + "|"
for k in range(D):
st = st + "."
else:
h = H[(i,j)]
d = len(str(h))
st = st + "|"
for k in range(D-d):
st = st + "."
st = st + str(h)
st = st + "|\n"
print st
def addEdge(self, s, t, dirr):
""" This function increases abs(value) of an edge st in a graph G, taking the
'direction' of st into account.
s: a start vertex
t: an end vertex
dir: a value depicting the st-direction,
+1 for left -> right
-1 for up -> down
"""
if self.G.has_edge(s, t):
values = [ value for value in self.G.edge[s][t] if (dirr * value) > 0 ]
else:
values = []
if values:
self.G.remove_edge(s, t, values[0]) # increase the value by 1
self.G.add_edge(s, t, values[0] + dirr)
else:
self.G.add_edge(s, t, dirr) # create a dir-valued edge
def graphFromList(self):
""" This function constructs a weighted directed graph from the
list that depicts the map using the following scheme:
Left A, Right B -> add (A, B, 1)
Left B, Right A -> add (B, A, 1)
Up A, Down B -> add (A, B,-1)
Up B, Down A -> add (B, A,-1)
We then add all similar edges together, so for instance
(A, B, 1) and (A, B, 1) -> (A, B, 2)
but *NOT*
(A, B, 1) and (A, B, -1) -> (A, B, 0)
"""
self.G = networkx.MultiDiGraph(selfloops = False, multiedges = True)
L = self.MapList
progress = -1
print "Generating the graph: "
# Now add for every Cartesian crossing an edge (or a value) in G
for i in range(len(L) - 1):
for j in range(len(L[0]) - 1):
self.addEdge(L[i][j], L[i][j + 1], 1) # L-R, +1
self.addEdge(L[i][j], L[i + 1][j], -1) # U-D, -1
# Calculate the progress, and print it to the screen.
p = ((j + i * len(L)) * 100) / (len(L) * len(L[0]))
if progress != p:
progress = p
self.printProgress(progress)
# What remains is the bottom and right line of edges:
for j in range(len(L[0]) - 1):
self.addEdge(L[len(L) - 1][j], L[len(L) - 1][j + 1], 1)
for i in range(len(L) - 1):
self.addEdge(L[i][len(L[0]) - 1], L[i + 1][len(L[0]) - 1], -1)
# Now show 100% progress and say we're done.
self.printProgress(100)
print "Done!"
def growTileMap(self, TileMap, G, posX, posY, curV):
""" This is a recursive function that arranges a map of unique tiles.
"""
# For each of the directions, make a possible edge-list to choose from,
# and combine them into one list Edges such that Edges[i] stands
# for the edges with direction code i, where
# 0 <-> up
# 1 <-> right
# 2 <-> down
# 3 <-> left
LL = [e for e in G.in_edges(curV) if e[1] > 0]
LU = [e for e in G.in_edges(curV) if e[1] < 0]
LR = [e for e in G.out_edges(curV) if e[1] > 0]
LD = [e for e in G.out_edges(curV) if e[1] < 0]
Edges = [LU, LR, LD, LL]
# We want to visit all directions such that we visit the direction with
# the smallest amount of possible tiles first. This is because these tiles
# will have the smallest probability to fit in at a later stage. It will
# also embed blocks of tiles that appear only in one configuration
# (pictures chopped up in tiles).
dir = [ [ Edges[i], i ] for i in range(4)]
dir.sort(cmp = lambda L1, L2: len(L1[0]) - len(L2[0]))
dir = [ x[1] for x in dir]
while dir != []:
direction = dir[0]
if Edges[direction] != []:
E = Edges[direction]
# Now order E with respect to the values of its edges. This will
# make the algorithm start with a combination that appears most
# often in the graph, which is a measure for how much two tiles
# "belong together".
E.sort(cmp = lambda e, f: abs(e[1]) - abs(f[1]), reverse = True)
# Now walk through E until you find an edge that fits with
# the previously placed tiles in TileMap
isPlaced = False
while E != [] and isPlaced == False:
e = E[0]
# We need to know the end vertex and the new position.
if direction == 0:
endV = e[0]
NX, NY = posX, posY - 1
elif direction == 1:
endV = e[1]
NX, NY = posX + 1, posY
elif direction == 2:
endV = e[1]
NX, NY = posX, posY + 1
elif direction == 3:
endV = e[0]
NX, NY = posX - 1, posY
# Now in case position NX, NY is not already taken and endV is
# compatible with "surrounding edges" in our graph, then we can
# add endV to our TileMap.
if (not (NY, NX) in TileMap) and (TileMap.values().count(endV) == 0) and \
( (not (NY-1, NX) in TileMap) or G.has_edge(TileMap[(NY-1, NX)], endV) ) and \
( (not (NY, NX+1) in TileMap) or G.has_edge(endV, TileMap[(NY, NX+1)]) ) and \
( (not (NY+1, NX) in TileMap) or G.has_edge(endV, TileMap[(NY+1), NX]) ) and \
( (not (NY, NX-1) in TileMap) or G.has_edge(TileMap[(NY, NX-1)], endV) ):
# Add this node to our TileMap and delete the edge we just processed.
TileMap[(NY, NX)] = endV
isPlaced = True
G.remove_edge(*e)
# Call the procedure recursively with this new node.
TileMap, G = self.growTileMap(TileMap, G, NX, NY, endV)
E = E[1:len(E)] # Chop of the first edge
dir = dir[1:len(dir)] # Chop of the first direction
return TileMap, G
def centerOfDictionary(self, H):
""" Returns the center of the dictionary, that is, the average of all keys.
"""
L = H.keys()
return [ int(round( sum([l[1] for l in L]) / (len(L) + 0.0) )), \
int(round( sum([l[0] for l in L]) / (len(L) + 0.0) )) ]
def composeDictionaries(self, H1, H2):
""" This method takes two dictionaries H1, H2 that represent pieces of the
maps of unique tiles, and pastes the second into the first, as close to
their centers -- as close together -- as possible.
"""
# In the first step H1 will be empty, and we return just H2.
if H1 == {}:
return H2
CX1, CY1 = self.centerOfDictionary(H1)
CX2, CY2 = self.centerOfDictionary(H2)
# To make sure we fit H2 in as central as possible in H1, we walk in a spiral
# around the center of H1, the offset being X, Y.
# |.4|.5|.6|
# |.3|.0|.7|
# |.2|.1|.8|
# ...|.9|
X, Y = 0, 0
foundFit = False
while foundFit == False:
# We check if H2 can be placed at location (CX1 + X, CY1 + Y)
isFit = True
keys = H2.keys()
# As long as there are keys in H2 left and we found no counter example:
while keys != [] and isFit:
(y, x) = keys.pop()
x1, y1 = x - CX2 + CX1 + X, y - CY2 + CY1 + Y
if H1.has_key((y1, x1)) or H1.has_key((y1 - 1, x1)) or H1.has_key((y1, x1 + 1)) or \
H1.has_key((y1 + 1, x1)) or H1.has_key((y1, x1 - 1)):
isFit = False
# If we found a fit, embed H2 into H1 accordingly.
if isFit:
for (y, x) in H2.keys():
x1, y1 = x - CX2 + CX1 + X, y - CY2 + CY1 + Y
H1[(y1, x1)] = H2[(y, x)]
foundFit = True
# Update the offset (X, Y) from the center of H1, by spiraling away.
if X == 0 and Y == 0:
Y += 1 # The first direction away from (0,0)
elif Y < 0 and X < -Y and X >= Y:
X += 1
elif X > 0 and Y <= X and Y >= -X:
Y += 1
elif Y > 0 and X > -Y and X < Y:
X -= 1
elif X < 0 and Y > X and Y <= -X:
Y -= 1
return H1
if sys.argv[1] == "--help":
print "Usage : python Image2Map.py [tileX] [tileY] files..."
print "Example: python Image2Map.py 8 8 Sewers.png Caves.png"
elif len(sys.argv) < 4:
print "Error : You specified too few arguments!\n"
print "Usage : python Image2Map.py [tileX] [tileY] files..."
print "Example: python Image2Map.py 8 8 Sewers.png Caves.png"
else:
tileX, tileY = int(sys.argv[1]), int(sys.argv[2])
for file in sys.argv[3:]:
map = TileMap(file, tileX, tileY)
tilefile = os.path.splitext(file)[0] + "-Tileset" + ".png"
print "Saving the tileset image into the file: " + tilefile
map.TileImage.save( tilefile, "PNG" )
print "Pretty-printing the tileset:" + "\n"
map.printHash(map.FullTileMap)
| alpine9000/climbyskies | tools/image2map/Image2Map.py | Python | bsd-2-clause | 15,519 | [
"VisIt"
] | 7fac9ae7f19e626ab39e8e659f6b5cc0669db41b3137c7c6e751beea130b6dc6 |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import warnings
from pathlib import Path
import neurom as nm
import pandas as pd
from neurom.apps import morph_stats as ms
from neurom.exceptions import ConfigError
from neurom.features import _NEURITE_FEATURES, _MORPHOLOGY_FEATURES, _POPULATION_FEATURES
import pytest
from numpy.testing import assert_array_equal, assert_almost_equal
from pandas.testing import assert_frame_equal
DATA_PATH = Path(__file__).parent.parent / 'data'
SWC_PATH = DATA_PATH / 'swc'
REF_CONFIG = {
'neurite': {
'section_lengths': ['max', 'sum'],
'section_volumes': ['sum'],
'section_branch_orders': ['max', 'raw'],
'segment_midpoints': ['max'],
'max_radial_distance': ['mean'],
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': ['mean'],
'max_radial_distance': ['mean'],
}
}
REF_CONFIG_NEW = {
'neurite': {
'section_lengths': {'modes': ['max', 'sum']},
'section_volumes': {'modes': ['sum']},
'section_branch_orders': {'modes': ['max', 'raw']},
'segment_midpoints': {'modes': ['max']},
'max_radial_distance': {'modes': ['mean']},
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': {'modes': ['mean']},
'max_radial_distance': {'modes': ['mean']},
}
}
REF_OUT = {
'morphology': {
'mean_soma_radius': 0.13065629648763766,
'mean_max_radial_distance': 99.5894610648815,
},
'axon': {
'sum_section_lengths': 207.87975220908129,
'max_section_lengths': 11.018460736176685,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 276.73857657289523,
'max_segment_midpoints_0': 0.0,
'max_segment_midpoints_1': 0.0,
'max_segment_midpoints_2': 49.520305964149998,
'mean_max_radial_distance': 82.44254511788921,
},
'all': {
'sum_section_lengths': 840.68521442251949,
'max_section_lengths': 11.758281556059444,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 1104.9077419665782,
'max_segment_midpoints_0': 64.401674984050004,
'max_segment_midpoints_1': 48.48197694465,
'max_segment_midpoints_2': 53.750947521650005,
'mean_max_radial_distance': 99.5894610648815,
},
'apical_dendrite': {
'sum_section_lengths': 214.37304577550353,
'max_section_lengths': 11.758281556059444,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 271.9412385728449,
'max_segment_midpoints_0': 64.401674984050004,
'max_segment_midpoints_1': 0.0,
'max_segment_midpoints_2': 53.750947521650005,
'mean_max_radial_distance': 99.5894610648815,
},
'basal_dendrite': {
'sum_section_lengths': 418.43241643793476,
'max_section_lengths': 11.652508126101711,
'max_section_branch_orders': 10,
'raw_section_branch_orders': [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10],
'sum_section_volumes': 556.22792682083821,
'max_segment_midpoints_0': 64.007872333250006,
'max_segment_midpoints_1': 48.48197694465,
'max_segment_midpoints_2': 51.575580778049996,
'mean_max_radial_distance': 94.43342438865741,
},
}
def test_extract_stats_single_morphology():
m = nm.load_morphology(SWC_PATH / 'Neuron.swc')
res = ms.extract_stats(m, REF_CONFIG)
assert set(res.keys()) == set(REF_OUT.keys())
for k in ('morphology', 'all', 'axon', 'basal_dendrite', 'apical_dendrite'):
assert set(res[k].keys()) == set(REF_OUT[k].keys())
for kk in res[k].keys():
assert_almost_equal(res[k][kk], REF_OUT[k][kk], decimal=4)
def test_extract_stats_new_format():
m = nm.load_morphology(SWC_PATH / 'Neuron.swc')
res = ms.extract_stats(m, REF_CONFIG_NEW)
assert set(res.keys()) == set(REF_OUT.keys())
for k in ('morphology', 'all', 'axon', 'basal_dendrite', 'apical_dendrite'):
assert set(res[k].keys()) == set(REF_OUT[k].keys())
for kk in res[k].keys():
assert_almost_equal(res[k][kk], REF_OUT[k][kk], decimal=4)
def test_stats_new_format_set_arg():
m = nm.load_morphology(SWC_PATH / 'Neuron.swc')
config = {
'neurite': {
'section_lengths': {'kwargs': {'neurite_type': 'AXON'}, 'modes': ['max', 'sum']},
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': {'modes': ['mean']},
}
}
res = ms.extract_stats(m, config)
assert set(res.keys()) == {'morphology', 'axon'}
assert set(res['axon'].keys()) == {'max_section_lengths', 'sum_section_lengths'}
assert set(res['morphology'].keys()) == {'mean_soma_radius'}
def test_extract_stats_scalar_feature():
m = nm.load_morphology(DATA_PATH / 'neurolucida' / 'bio_neuron-000.asc')
config = {
'neurite_type': ['ALL'],
'neurite': {
'number_of_forking_points': ['max'],
},
'morphology': {
'soma_volume': ['sum'],
}
}
res = ms.extract_stats(m, config)
assert res == {'all': {'max_number_of_forking_points': 277},
'morphology': {'sum_soma_volume': 1424.4383771584492}}
def test_extract_dataframe():
# Vanilla test
morphs = nm.load_morphologies([SWC_PATH / 'Neuron.swc', SWC_PATH / 'simple.swc'])
actual = ms.extract_dataframe(morphs, REF_CONFIG_NEW)
# drop raw features as they require too much test data to mock
actual = actual.drop(columns='raw_section_branch_orders', level=1)
expected = pd.read_csv(Path(DATA_PATH, 'extracted-stats.csv'), header=[0, 1], index_col=0)
assert_frame_equal(actual, expected, check_dtype=False)
# Test with a single morphology in the population
morphs = nm.load_morphologies(SWC_PATH / 'Neuron.swc')
actual = ms.extract_dataframe(morphs, REF_CONFIG_NEW)
# drop raw features as they require too much test data to mock
actual = actual.drop(columns='raw_section_branch_orders', level=1)
assert_frame_equal(actual, expected.iloc[[0]], check_dtype=False)
# Test with a config without the 'morphology' key
morphs = nm.load_morphologies([Path(SWC_PATH, name)
for name in ['Neuron.swc', 'simple.swc']])
config = {'neurite': {'section_lengths': ['sum']},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL']}
actual = ms.extract_dataframe(morphs, config)
idx = pd.IndexSlice
expected = expected.loc[:, idx[:, ['name', 'sum_section_lengths']]]
assert_frame_equal(actual, expected, check_dtype=False)
# Test with a Morphology argument
m = nm.load_morphology(Path(SWC_PATH, 'Neuron.swc'))
actual = ms.extract_dataframe(m, config)
assert_frame_equal(actual, expected.iloc[[0]], check_dtype=False)
# Test with a List[Morphology] argument
morphs = [nm.load_morphology(Path(SWC_PATH, name))
for name in ['Neuron.swc', 'simple.swc']]
actual = ms.extract_dataframe(morphs, config)
assert_frame_equal(actual, expected, check_dtype=False)
# Test with a List[Path] argument
morphs = [Path(SWC_PATH, name) for name in ['Neuron.swc', 'simple.swc']]
actual = ms.extract_dataframe(morphs, config)
assert_frame_equal(actual, expected, check_dtype=False)
# Test without any neurite_type keys, it should pick the defaults
config = {'neurite': {'total_length_per_neurite': ['sum']}}
actual = ms.extract_dataframe(morphs, config)
expected_columns = pd.MultiIndex.from_tuples(
[('property', 'name'),
('axon', 'sum_total_length_per_neurite'),
('basal_dendrite', 'sum_total_length_per_neurite'),
('apical_dendrite', 'sum_total_length_per_neurite'),
('all', 'sum_total_length_per_neurite')])
expected = pd.DataFrame(
columns=expected_columns,
data=[['Neuron.swc', 207.87975221, 418.43241644, 214.37304578, 840.68521442],
['simple.swc', 15., 16., 0., 31., ]])
assert_frame_equal(actual, expected, check_dtype=False)
def test_extract_dataframe_multiproc():
morphs = [Path(SWC_PATH, name)
for name in ['Neuron.swc', 'simple.swc']]
with warnings.catch_warnings(record=True) as w:
actual = ms.extract_dataframe(morphs, REF_CONFIG, n_workers=2)
# drop raw features as they require too much test data to mock
actual = actual.drop(columns='raw_section_branch_orders', level=1)
expected = pd.read_csv(Path(DATA_PATH, 'extracted-stats.csv'), index_col=0, header=[0, 1])
assert_frame_equal(actual, expected, check_dtype=False)
with warnings.catch_warnings(record=True) as w:
actual = ms.extract_dataframe(morphs, REF_CONFIG, n_workers=os.cpu_count() + 1)
# drop raw features as they require too much test data to mock
actual = actual.drop(columns='raw_section_branch_orders', level=1)
assert len(w) == 1, "Warning not emitted"
assert_frame_equal(actual, expected, check_dtype=False)
def test_get_header():
fake_results = {'fake_name0': REF_OUT,
'fake_name1': REF_OUT,
'fake_name2': REF_OUT,
}
header = ms.get_header(fake_results)
assert 1 + 2 + 4 * (4 + 5) == len(header) # name + everything in REF_OUT
assert 'name' in header
assert 'morphology:mean_soma_radius' in header
def test_generate_flattened_dict():
fake_results = {'fake_name0': REF_OUT,
'fake_name1': REF_OUT,
'fake_name2': REF_OUT,
}
header = ms.get_header(fake_results)
rows = list(ms.generate_flattened_dict(header, fake_results))
assert 3 == len(rows) # one for fake_name[0-2]
assert 1 + 2 + 4 * (4 + 5) == len(rows[0]) # name + everything in REF_OUT
def test_full_config():
config = ms.full_config()
assert set(config.keys()) == {'neurite', 'population', 'morphology', 'neurite_type'}
assert set(config['neurite'].keys()) == set(_NEURITE_FEATURES.keys())
assert set(config['morphology'].keys()) == set(_MORPHOLOGY_FEATURES.keys())
assert set(config['population'].keys()) == set(_POPULATION_FEATURES.keys())
def test_sanitize_config():
with pytest.raises(ConfigError):
ms.sanitize_config({'neurite': []})
new_config = ms.sanitize_config({}) # empty
assert 2 == len(new_config) # neurite & morphology created
full_config = {
'neurite': {
'section_lengths': ['max', 'sum'],
'section_volumes': ['sum'],
'section_branch_orders': ['max']
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'morphology': {
'soma_radius': ['mean']
}
}
new_config = ms.sanitize_config(full_config)
assert 3 == len(new_config) # neurite, neurite_type & morphology
def test_multidimensional_features():
"""Features should be split into sub-features when they
are multidimensional.
This should be the case even when the feature is `None` or `[]`
The following morphology has no axon but the axon feature segment_midpoints for
the axon should still be made of 3 values (X, Y and Z)
Cf: https://github.com/BlueBrain/NeuroM/issues/859
"""
m = nm.load_morphology(Path(SWC_PATH, 'no-axon.swc'))
config = {'neurite': {'segment_midpoints': ['max']},
'neurite_type': ['AXON']}
actual = ms.extract_dataframe(m, config)
assert_array_equal(actual['axon'][['max_segment_midpoints_0',
'max_segment_midpoints_1',
'max_segment_midpoints_2']].values,
[[None, None, None]])
config = {'neurite': {'partition_pairs': ['max']}}
actual = ms.extract_dataframe(m, config)
assert_array_equal(actual['axon'][['max_partition_pairs_0',
'max_partition_pairs_1']].values,
[[None, None]])
| BlueBrain/NeuroM | tests/apps/test_morph_stats.py | Python | bsd-3-clause | 14,459 | [
"NEURON"
] | eb42c39df3e70c8fc5ba048be678e96c4b02842e9322f78f2e753d0e5008357a |
# -*- coding: utf-8 -*-
''' Used for processing CLA requests'''
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2008-2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <ricky@fedoraproject.org>
# Mike McGrath <mmcgrath@redhat.com>
# Toshio Kuratomi <toshio@redhat.com>
#
import turbogears
from turbogears import controllers, expose, identity, config
from turbogears.database import session
import cherrypy
from sqlalchemy.exceptions import SQLError
from datetime import datetime
import GeoIP
from genshi.template.plugin import TextTemplateEnginePlugin
from fedora.tg.tg1utils import request_format
from fas.model import People, Groups, Log
from fas.auth import is_admin, cla_done
from fas.util import send_mail
import fas
from fas import _
class CLA(controllers.Controller):
''' Processes CLA workflow '''
# Group name for people having signed the CLA
CLAGROUPNAME = config.get('cla_standard_group')
# Meta group for everyone who has satisfied the requirements of the CLA
# (By signing or having a corporate signatue or, etc)
CLAMETAGROUPNAME = config.get('cla_done_group')
# Values legal in phone numbers
PHONEDIGITS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '+',
'-', ')' ,'(', ' ')
def __init__(self):
'''Create a CLA Controller.'''
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.cla.index")
def index(self):
'''Display the CLAs (and accept/do not accept buttons)'''
show = {}
show['show_postal_address'] = config.get('show_postal_address')
username = turbogears.identity.current.user_name
person = People.by_username(username)
try:
code_len = len(person.country_code)
except TypeError:
code_len = 0
if show['show_postal_address']:
contactInfo = person.telephone or person.postal_address
if person.country_code == 'O1' and not person.telephone:
turbogears.flash(_('A telephone number is required to ' + \
'complete the CLA. Please fill out below.'))
elif not person.country_code or not person.human_name \
or not contactInfo:
turbogears.flash(_('A valid country and telephone number ' + \
'or postal address is required to complete the CLA. ' + \
'Please fill them out below.'))
else:
if not person.telephone or code_len != 2 or \
person.country_code == ' ':
turbogears.flash(_('A valid country and telephone number are' +
' required to complete the CLA. Please fill them ' +
'out below.'))
cla = cla_done(person)
person = person.filter_private()
return dict(cla=cla, person=person, date=datetime.utcnow().ctime(),
show=show)
def _cla_dependent(self, group):
'''
Check whether a group has the cla in its prerequisite chain.
Arguments:
:group: group to check
Returns: True if the group requires the cla_group_name otherwise
'''
if group.name in (self.CLAGROUPNAME, self.CLAMETAGROUPNAME):
return True
if group.prerequisite_id:
return self._cla_dependent(group.prerequisite)
return False
def json_request(self):
''' Helps define if json is being used for this request
:returns: 1 or 0 depending on if request is json or not
'''
return 'tg_format' in cherrypy.request.params and \
cherrypy.request.params['tg_format'] == 'json'
@expose(template="fas.templates.error")
def error(self, tg_errors=None):
'''Show a friendly error message'''
if not tg_errors:
turbogears.redirect('/')
return dict(tg_errors=tg_errors)
@identity.require(turbogears.identity.not_anonymous())
@expose(template = "genshi-text:fas.templates.cla.cla", format = "text",
content_type = 'text/plain; charset=utf-8')
def text(self):
'''View CLA as text'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
person = person.filter_private()
return dict(person=person, date=datetime.utcnow().ctime())
@identity.require(turbogears.identity.not_anonymous())
@expose(template = "genshi-text:fas.templates.cla.cla", format = "text",
content_type = 'text/plain; charset=utf-8')
def download(self):
'''Download CLA'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
person = person.filter_private()
return dict(person=person, date=datetime.utcnow().ctime())
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.user.view", allow_json=True)
def reject(self, person_name):
'''Reject a user's CLA.
This method will remove a user from the CLA group and any other groups
that they are in that require the CLA. It is used when a person has
to fulfill some more legal requirements before having a valid CLA.
Arguments
:person_name: Name of the person to reject.
'''
show = {}
show['show_postal_address'] = config.get('show_postal_address')
exc = None
user = People.by_username(turbogears.identity.current.user_name)
if not is_admin(user):
# Only admins can use this
turbogears.flash(_('You are not allowed to reject CLAs.'))
exc = 'NotAuthorized'
else:
# Unapprove the cla and all dependent groups
person = People.by_username(person_name)
for role in person.roles:
if self._cla_dependent(role.group):
role.role_status = 'unapproved'
try:
session.flush()
except SQLError, error:
turbogears.flash(_('Error removing cla and dependent groups' \
' for %(person)s\n Error was: %(error)s') %
{'person': person_name, 'error': str(error)})
exc = 'sqlalchemy.SQLError'
if not exc:
# Send a message that the ICLA has been revoked
date_time = datetime.utcnow()
Log(author_id=user.id, description='Revoked %s CLA' %
person.username, changetime=date_time)
revoke_subject = _('Fedora ICLA Revoked')
revoke_text = _('''
Hello %(human_name)s,
We're sorry to bother you but we had to reject your CLA for now because
information you provided has been deemed incorrect. The most common cause
of this is people abbreviating their name like "B L Couper" instead of
providing their actual full name "Bill Lay Couper". Other causes of this
include are using a country, or phone number that isn't accurate [1]_.
If you could edit your account [2]_ to fix any of these problems and resubmit
the CLA we would appreciate it.
.. [1]: Why does it matter that we have your real name and phone
number? It's because the CLA is a legal document and should we ever
need to contact you about one of your contributions (as an example,
because someone contacts *us* claiming that it was really they who
own the copyright to the contribution) we might need to contact you
for more information about what's going on.
.. [2]: Edit your account by logging in at this URL:
%(editurl)s/accounts/user/edit/%(username)s
If you have questions about what specifically might be the problem with your
account, please contact us at accounts@fedoraproject.org.
Thanks!
''') % {'username': person.username,
'human_name': person.human_name,
'editurl' : config.get('base_url_filter.base_url').rstrip('/')}
send_mail(person.email, revoke_subject, revoke_text)
# Yay, sweet success!
turbogears.flash(_('CLA Successfully Removed.'))
# and now we're done
if request_format() == 'json':
return_val = {}
if exc:
return_val['exc'] = exc
return return_val
else:
turbogears.redirect('/user/view/%s' % person_name)
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.cla.index")
def send(self, human_name, telephone, country_code, postal_address=None,
confirm=False, agree=False):
'''Send CLA'''
# TO DO: Pull show_postal_address in at the class level
# as it's used in three methods now
show = {}
show['show_postal_address'] = config.get('show_postal_address')
username = turbogears.identity.current.user_name
person = People.by_username(username)
if cla_done(person):
turbogears.flash(_('You have already completed the CLA.'))
turbogears.redirect('/cla/')
return dict()
if not agree:
turbogears.flash(_("You have not completed the CLA."))
turbogears.redirect('/user/view/%s' % person.username)
if not confirm:
turbogears.flash(_(
'You must confirm that your personal information is accurate.'
))
turbogears.redirect('/cla/')
# Compare old information to new to see if any changes have been made
if human_name and person.human_name != human_name:
person.human_name = human_name
if telephone and person.telephone != telephone:
person.telephone = telephone
if postal_address and person.postal_address != postal_address:
person.postal_address = postal_address
if country_code and person.country_code != country_code:
person.country_code = country_code
# Save it to the database
try:
session.flush()
except Exception:
turbogears.flash(_("Your updated information could not be saved."))
turbogears.redirect('/cla/')
return dict()
# Heuristics to detect bad data
if show['show_postal_address']:
contactInfo = person.telephone or person.postal_address
if person.country_code == 'O1':
if not person.human_name or not person.telephone:
# Message implemented on index
turbogears.redirect('/cla/')
else:
if not person.country_code or not person.human_name \
or not contactInfo:
# Message implemented on index
turbogears.redirect('/cla/')
else:
if not person.telephone or \
not person.human_name or \
not person.country_code:
turbogears.flash(_('To complete the CLA, we must have your ' + \
'name, telephone number, and country. Please ensure they ' + \
'have been filled out.'))
turbogears.redirect('/cla/')
blacklist = config.get('country_blacklist', [])
country_codes = [c for c in GeoIP.country_codes if c not in blacklist]
if person.country_code not in country_codes:
turbogears.flash(_('To complete the CLA, a valid country code ' + \
'must be specified. Please select one now.'))
turbogears.redirect('/cla/')
if [True for char in person.telephone if char not in self.PHONEDIGITS]:
turbogears.flash(_('Telephone numbers can only consist of ' + \
'numbers, "-", "+", "(", ")", or " ". Please reenter using ' +\
'only those characters.'))
turbogears.redirect('/cla/')
group = Groups.by_name(self.CLAGROUPNAME)
try:
# Everything is correct.
person.apply(group, person) # Apply for the new group
session.flush()
except fas.ApplyError:
# This just means the user already is a member (probably
# unapproved) of this group
pass
except Exception:
turbogears.flash(_("You could not be added to the '%s' group.") %
group.name)
turbogears.redirect('/cla/')
return dict()
try:
# Everything is correct.
person.sponsor(group, person) # Sponsor!
session.flush()
except fas.SponsorError:
turbogears.flash(_("You are already a part of the '%s' group.") %
group.name)
turbogears.redirect('/cla/')
except:
turbogears.flash(_("You could not be added to the '%s' group.") %
group.name)
turbogears.redirect('/cla/')
date_time = datetime.utcnow()
Log(author_id = person.id, description = 'Completed CLA',
changetime = date_time)
cla_subject = \
_('Fedora ICLA completed for %(human_name)s (%(username)s)') % \
{'username': person.username, 'human_name': person.human_name}
cla_text = _('''
Fedora user %(username)s has completed an ICLA (below).
Username: %(username)s
Email: %(email)s
Date: %(date)s
If you need to revoke it, please visit this link:
%(rejecturl)s/accounts/cla/reject/%(username)s
=== CLA ===
''') % {'username': person.username,
'email': person.email,
'date': date_time.ctime(),
'rejecturl' : config.get('base_url_filter.base_url').rstrip('/')}
# Sigh.. if only there were a nicer way.
plugin = TextTemplateEnginePlugin()
cla_text += plugin.transform(dict(person=person),
'fas.templates.cla.cla').render(method='text',
encoding=None)
send_mail(config.get('legal_cla_email'), cla_subject, cla_text)
turbogears.flash(_("You have successfully completed the CLA. You " + \
"are now in the '%s' group.") % group.name)
turbogears.redirect('/user/view/%s' % person.username)
return dict()
| chensuchun/fas | fas/cla.py | Python | gpl-2.0 | 15,152 | [
"VisIt"
] | 1f5efa947819640ba98867ffb42c466eb7486f6773bf4219ad2a7ff2ca66762c |
""" Implementation of Module
"""
# pylint: disable=unused-wildcard-import,wildcard-import
import copy
import os
# try: # this part to import as part of the DIRAC framework
from DIRAC.Core.Workflow.Parameter import *
__RCSID__ = "$Id$"
class ModuleDefinition(AttributeCollection):
def __init__(self, type=None, obj=None, parent=None):
# we can create an object from another module
# or from the ParameterCollection
AttributeCollection.__init__(self)
self.main_class_obj = None # used for the interpretation only
# self.module_obj = None # used for the interpretation only
self.parent = parent
if (obj is None) or isinstance(obj, ParameterCollection):
self.setType('nitgiven')
self.setDescrShort('')
self.setDescription('')
self.setRequired('')
self.setBody('')
self.setOrigin('')
self.setVersion(0.0)
self.parameters = ParameterCollection(obj) # creating copy
elif isinstance(obj, ModuleDefinition):
self.setType(obj.getType())
self.setDescrShort(obj.getDescrShort())
self.setDescription(obj.getDescription())
self.setBody(obj.getBody())
self.setRequired(obj.getRequired())
self.setOrigin(obj.getOrigin())
self.setVersion(obj.getVersion())
self.parameters = ParameterCollection(obj.parameters)
else:
raise TypeError('Can not create object type ' + str(type(self)) + ' from the ' + str(type(obj)))
if type:
self.setType(type)
def createCode(self):
return self.getBody() + '\n'
def __str__(self):
return str(type(self)) + ':\n' + AttributeCollection.__str__(self) + self.parameters.__str__()
def toXML(self):
ret = '<ModuleDefinition>\n'
ret = ret + AttributeCollection.toXML(self)
ret = ret + self.parameters.toXML()
ret = ret + '</ModuleDefinition>\n'
return ret
def toXMLFile(self, outFile):
if os.path.exists(outFile):
os.remove(outFile)
with open(outFile, 'w') as xmlfile:
xmlfile.write(self.toXML())
def loadCode(self):
# print 'Loading code of the Module =', self.getType()
# version 1 - OLD sample
#ret = compile(self.getBody(),'<string>','exec')
# eval(ret)
# return ret #returning ref just in case we might need it
#
if self.getBody(): # checking the size of the string
# version 2 - we assume that each self.body is a module oblect
# module = new.module(self.getType()) # create empty module object
# sys.modules[self.getType()] = module # add reference for the import operator
# exec self.getBody() in module.__dict__ # execute code itself
# self.module_obj = module # save pointer to this module
# if module.__dict__.has_key(self.getType()):
# self.main_class_obj = module.__dict__[self.getType()] # save class object
# version 3
# A.T. Use vars() function to inspect local objects instead of playing with
# fake modules. We assume that after the body execution there will be
# a class with name "self.getType()" defined in the local scope.
exec self.getBody()
if self.getType() in vars():
self.main_class_obj = vars()[self.getType()] # save class object
else:
# it is possible to have this class in another module, we have to check for this
# but it is advisible to use 'from module import class' operator
# otherwise i could not find the module. But it is possible that
# in the future I can change this code to do it more wisely
sErr = 'Can not find class '\
+ self.getType()\
+ ' in the module created from the body of the module '\
+ self.getOrigin()
raise TypeError(sErr)
else:
raise TypeError('The body of the Module ' + self.getType() + ' seems empty')
return self.main_class_obj
class ModuleInstance(AttributeCollection):
def __init__(self, name, obj=None, parent=None):
AttributeCollection.__init__(self)
self.instance_obj = None # used for the interpretation only
self.parent = parent
if obj is None:
self.parameters = ParameterCollection()
elif isinstance(obj, ModuleInstance) or isinstance(obj, ModuleDefinition):
if name is None:
self.setName(obj.getName())
else:
self.setName(name)
self.setType(obj.getType())
self.setDescrShort(obj.getDescrShort())
self.parameters = ParameterCollection(obj.parameters)
elif isinstance(obj, ParameterCollection):
# set attributes
self.setName(name)
self.setType("")
self.setDescrShort("")
self.parameters = ParameterCollection(obj)
elif obj is not None:
raise TypeError('Can not create object type ' + str(type(self)) + ' from the ' + str(type(obj)))
def createCode(self, ind=2):
str = indent(ind) + self.getName() + ' = ' + self.getType() + '()\n'
str = str + self.parameters.createParametersCode(ind, self.getName())
str = str + indent(ind) + self.getName() + '.execute()\n\n'
return str
def __str__(self):
return str(type(self)) + ':\n' + AttributeCollection.__str__(self) + self.parameters.__str__()
def toXML(self):
ret = '<ModuleInstance>\n'
ret = ret + AttributeCollection.toXML(self)
ret = ret + self.parameters.toXML()
ret = ret + '</ModuleInstance>\n'
return ret
def execute(self, step_parameters, definitions):
# print 'Executing ModuleInstance ',self.getName(),'of type',self.getType()
self.instance_obj = definitions[self.getType()].main_class_obj() # creating instance
# FIXME: pylint complains that ParameterCollection doesn't have execute. What should this be?
self.parameters.execute(self.getName()) # pylint: disable=no-member
self.instance_obj.execute2()
class DefinitionsPool(dict):
def __init__(self, parent, pool=None):
dict.__init__(self)
self.parent = parent # this is a cache value, we propagate it into next level
if isinstance(pool, DefinitionsPool):
for k in pool.keys():
v = pool[k]
if isinstance(v, ModuleDefinition):
obj = ModuleDefinition(None, v, self.parent)
elif isinstance(v, StepDefinition): # pylint: disable=undefined-variable
obj = StepDefinition(None, v, self.parent) # pylint: disable=undefined-variable
else:
raise TypeError('Error: __init__ Wrong type of object stored in the DefinitionPool ' + str(type(pool[v])))
self.append(obj)
elif pool is not None:
raise TypeError('Can not create object type ' + str(type(self)) + ' from the ' + str(type(pool)))
def __setitem__(self, i, obj):
if i not in self:
dict.__setitem__(self, i, obj)
# print 'We need to write piece of code to replace existent DefinitionsPool.__setitem__()'
# print 'For now we ignore it for the', obj.getType()
def append(self, obj):
""" We add new Definition (Module, Step)
"""
self[obj.getType()] = obj
obj.setParent(self.parent)
return obj
def remove(self, obj):
del self[obj.getType()]
obj.setParent(None)
def compare(self, s):
if not isinstance(s, DefinitionsPool):
return False # chacking types of objects
if len(s) != len(self):
return False # checkin size
# we need to compare the keys of dictionaries
if self.keys() != s.keys():
return False
for k in self.keys():
if (k not in s) or (not self[k].compare(s[k])):
return False
return True
def __str__(self):
ret = str(type(self)) + ': number of Definitions:' + str(len(self)) + '\n'
index = 0
for k in self.keys():
ret = ret + 'definition(' + str(index) + ')=' + str(self[k]) + '\n'
index = index + 1
return ret
def setParent(self, parent):
self.parent = parent
# we need to propagate it just in case it was different one
for k in self.keys():
self[k].setParent(parent)
def getParent(self):
return self.parent
def updateParents(self, parent):
self.parent = parent
for k in self.keys():
self[k].updateParents(parent)
def toXML(self):
ret = ''
for k in self.keys():
ret = ret + self[k].toXML()
return ret
def createCode(self):
str = ''
for k in self.keys():
# str=str+indent(2)+'# flush code for instance\n'
str = str + self[k].createCode()
return str
def loadCode(self):
for k in self.keys():
# load code of the modules
self[k].loadCode()
class InstancesPool(list):
def __init__(self, parent, pool=None):
list.__init__(self)
self.parent = None # this is a cache value, we propagate it into next level
if isinstance(pool, InstancesPool):
for v in pool:
# I need to check this fubction
# if it would be a costructor we coul pass parent into it
self.append(copy.deepcopy(v))
if isinstance(v, ModuleInstance):
obj = ModuleInstance(None, v, self.parent)
elif isinstance(v, StepInstance): # pylint: disable=undefined-variable
obj = StepInstance(None, v, self.parent) # pylint: disable=undefined-variable
else:
raise TypeError('Error: __init__ Wrong type of object stored in the DefinitionPool ' + str(type(pool[v])))
self.append(obj)
elif pool is not None:
raise TypeError('Can not create object type ' + str(type(self)) + ' from the ' + str(type(pool)))
def __str__(self):
ret = str(type(self)) + ': number of Instances:' + str(len(self)) + '\n'
index = 0
for v in self:
ret = ret + 'instance(' + str(index) + ')=' + str(v) + '\n'
index = index + 1
return ret
def setParent(self, parent):
self.parent = parent
for v in self:
v.setParent(parent)
def getParent(self):
return self.parent
def updateParents(self, parent):
self.parent = parent
for v in self:
v.updateParents(parent)
def append(self, obj):
list.append(self, obj)
obj.setParent(self.parent)
def toXML(self):
ret = ''
for v in self:
ret = ret + v.toXML()
return ret
def findIndex(self, name):
i = 0
for v in self:
if v.getName() == name:
return i
i = i + 1
return - 1
def find(self, name):
for v in self:
if v.getName() == name:
return v
return None
def delete(self, name):
for v in self:
if v.getName() == name:
self.remove(v)
v.setParent(None)
def compare(self, s):
if (not isinstance(s, InstancesPool) or (len(s) != len(self))):
return False
for v in self:
for i in s:
if v.getName() == i.getName():
if not v.compare(i):
return False
else:
break
else:
# if we reached this place naturally we can not find matching name
return False
return True
def createCode(self):
str = ''
for inst in self:
str = str + inst.createCode()
str = str + indent(2) + '# output assignment\n'
for v in inst.parameters:
if v.isOutput():
str = str + v.createParameterCode(2, 'self')
str = str + '\n'
return str
| andresailer/DIRAC | Core/Workflow/Module.py | Python | gpl-3.0 | 11,204 | [
"DIRAC"
] | 95d1d4fe1dbd7a2a2d707107d45047524c412b2fdae36b9f22b645c8263c6197 |
#!/usr/bin/env python
import sys
from Bio.Blast import NCBIXML
#Usage, opens an outfile and then parses any number of .xml files into that outfile, printing all hits
#parse_blastn.py outfile.txt anynumberofinfiles.xml
OUT = open(sys.argv[1], 'w')
OUT.write("Query Name\tQuery Length\tSubject Name\tSubject Length\tAlignment Length\tQuery Start\tQuery End\tSubject Start\tSubject End\tQuery Sequence\tSubject Sequence\tHsp Score\tHsp Expect\tHsp Identities\tPercent Match\tNumber_of_gaps")
for xml_file in sys.argv[2:]:
result_handle = open(xml_file)
blast_records = NCBIXML.parse(result_handle)
for rec in blast_records:
for alignment in rec.alignments:
for hsp in alignment.hsps:
OUT.write('\n'+ str(rec.query) + '\t' + str(rec.query_length) + '\t' + str(alignment.title) + '\t' + str(alignment.length) + '\t' + str(hsp.align_length) + '\t' + str(hsp.query_start) + '\t' + str(hsp.query_end) + '\t' + str(hsp.sbjct_start) + '\t' + str(hsp.sbjct_end) + '\t' + str(hsp.query) + '\t' + str(hsp.sbjct) + '\t' + str(hsp.score) + '\t' + str(hsp.expect) + '\t' + str(hsp.identities) + '\t' + str(float(hsp.identities)/int(hsp.align_length)) + '\t' + str(hsp.gaps))
| cuttlefishh/papers | vibrio-fischeri-transcriptomics/code/python/parse_blast.py | Python | mit | 1,176 | [
"BLAST"
] | eb4f01ded067a3d95297d9cfa96ff7cf71ce6ed3245813cd36e668f5aa9f75f4 |
#-*- coding: utf-8 -*-
#
# test webfs_upload.py
# PV` Created on 26/09/2015.
#
# Bases:
#---------------------------------------------------
# 2006/02 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 2007/07/26 Slightly modified by Brian Schneider
#
# in order to support unicode files ( multipart_encode function )
# From http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
#
# 2013/07 Ken Olum <kdo@cosmos.phy.tufts.edu>
#
# Removed one of \r\n and send Content-Length
#
# 2014/05 Applied Fedora rpm patch
#
# https://bugzilla.redhat.com/show_bug.cgi?id=920778
# http://pkgs.fedoraproject.org/cgit/python-MultipartPostHandler2.git/diff/python-MultipartPostHandler2-cut-out-main.patch?id=c1638bb3e45596232b4d02f1e69901db0c28cfdb
#
# 2014/05/09 Sergio Basto <sergio@serjux.com>
#
# Better deal with None values, don't throw an exception and just send an empty string.
#
#---------------------------------------------------
import sys
import urllib
import urllib2
import mimetools #, mimetypes
import os
import stat
from base64 import standard_b64encode
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, 1)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
# authstr = 'Basic ' + standard_b64encode('ESP8266' + ':' + '0123456789')
# if(request.has_header('Authorization')):
# print "Replacing %s with %s" % (request.get_header('Authorization'), authstr)
# request.add_unredirected_header('Authorization', authstr)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = StringIO()
for(key, value) in vars:
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"' % key)
if value is None:
value = ""
# if type(value) is not str, we need str(value) to not error with cannot concatenate 'str'
# and 'dict' or 'tuple' or somethingelse objects
buffer.write('\r\n\r\n' + str(value) + '\r\n')
for(key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = fd.name.split('/')[-1]
# contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
contenttype = 'application/octet-stream'
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
buffer.write('Content-Type: %s\r\n' % contenttype)
buffer.write('Content-Length: %s\r\n' % file_size)
fd.seek(0)
buffer.write('\r\n' + fd.read() + '\r\n')
buffer.write('--' + boundary + '--\r\n')
buffer = buffer.getvalue()
return boundary, buffer
multipart_encode = Callable(multipart_encode)
https_request = http_request
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] == '-h':
print 'Usage: filename espurl username password'
sys.exit(0)
filename = '../webbin/WEBFiles.bin'
espurl = 'http://sesp8266/fsupload'
username = 'ESP8266'
password = '0123456789'
if len(sys.argv) > 1:
if sys.argv[1]:
filename = sys.argv[1]
if len(sys.argv) > 2:
if sys.argv[2]:
espurl = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3]:
username = sys.argv[3]
if len(sys.argv) > 4:
if sys.argv[4]:
password = sys.argv[4]
print('Start send %s to %s' % (filename, espurl))
opener = urllib2.build_opener(MultipartPostHandler)
authstr = 'Basic ' + standard_b64encode(username + ':' + password)
opener.addheaders.append(['Authorization', authstr])
params = { 'overlay' : open(filename, 'rb') }
try:
resp = opener.open(espurl, params)
print('End, response code: %s\n' % resp.code)
sys.exit(0)
except Exception as e:
print('Failed open (%s) %s\n' % (str(e).decode('cp1251'), espurl))
sys.exit(1)
| vad7/CO2UART | webfs/webfs_upload1.py | Python | unlicense | 5,374 | [
"Brian"
] | 169b0187c36c782bb38b5054c5d063481edfe326eeb04842c5b34826bfd35a89 |
#!/usr/bin/python
# Script: logview.py
# Purpose: plots of LAMMPS log-file thermodynamic data
# Syntax: logview.py gnu/matlab files ...
# gnu/matlab = style of plots to create
# files = one or more log files
# Example: logview.py gnu log.*
# Author: Steve Plimpton (Sandia)
# enable script to run from Python directly w/out Pizza.py
import sys
from log import log
from plotview import plotview
from gnu import gnu
from matlab import matlab
if not globals().has_key("argv"): argv = sys.argv
# main script
if len(argv) < 3:
raise StandardError, "Syntax: logview.py gnu/matlab files ..."
style = argv[1]
files = ' '.join(argv[2:])
lg = log(files)
exec "plot = %s()" % style
p = plotview(lg,plot)
| eddiejessup/pizza | scripts/logview.py | Python | gpl-2.0 | 725 | [
"LAMMPS"
] | b052a4a0f2f99d09a3af5ec9ef2824d368d06f16a0b46fb28d47f7df7bc74442 |
from __future__ import print_function
import sys
import os
import tempfile
import subprocess
import random
import string
import glob
import struct
import atexit
import six
import pysam
from six.moves import urllib
from . import cbedtools
from . import settings
from . import filenames
from . import genome_registry
from .logger import logger
from .cbedtools import create_interval_from_list
BUFSIZE = 1
_tags = {}
def _check_for_bedtools(program_to_check='intersectBed', force_check=False):
"""
Checks installation as well as version (based on whether or not "bedtools
intersect" works, or just "intersectBed")
"""
if settings._bedtools_installed and not force_check:
return True
try:
p = subprocess.Popen(
[os.path.join(settings._bedtools_path, 'bedtools'),
settings._prog_names[program_to_check]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._bedtools_installed = True
settings._v_2_15_plus = True
except (OSError, KeyError) as err:
try:
p = subprocess.Popen(
[os.path.join(settings._bedtools_path, program_to_check)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._bedtools_installed = True
settings._v_2_15_plus = False
except OSError as err:
if err.errno == 2:
if settings._bedtools_path:
add_msg = "(tried path '%s')" % settings._bedtools_path
else:
add_msg = ""
raise OSError("Please make sure you have installed BEDTools"
"(https://github.com/arq5x/bedtools) and that "
"it's on the path. %s" % add_msg)
def _check_for_R():
try:
p = subprocess.Popen(
[os.path.join(settings._R_path, 'R'), '--version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._R_installed = True
except OSError:
if settings._R_path:
add_msg = "(tried path '%s')" % settings._R_path
else:
add_msg = ""
raise ValueError(
'Please install R and ensure it is on your path %s' % add_msg)
class Error(Exception):
"""Base class for this module's exceptions"""
pass
class BEDToolsError(Error):
def __init__(self, cmd, msg):
self.cmd = str(cmd)
self.msg = str(msg)
def __str__(self):
m = '\nCommand was:\n\n\t' + self.cmd + '\n' + \
'\nError message was:\n' + self.msg
return m
def isGZIP(fn):
with open(fn, 'rb') as f:
start = f.read(3)
if start == b"\x1f\x8b\x08":
return True
return False
def isBGZIP(fn):
"""
Reads a filename to see if it's a BGZIPed file or not.
"""
header_str = open(fn, 'rb').read(15)
if len(header_str) < 15:
return False
header = struct.unpack_from('BBBBiBBHBBB', header_str)
id1, id2, cm, flg, mtime, xfl, os_, xlen, si1, si2, slen = header
if (id1 == 31) and (id2 == 139) and (cm == 8) and (flg == 4) and \
(si1 == 66) and (si2 == 67) and (slen == 2):
return True
return False
def isBAM(fn):
if not isBGZIP(fn):
return False
# Need to differentiate between BAM and plain 'ol BGZIP. Try reading header
# . . .
try:
pysam.Samfile(fn, 'rb')
return True
except ValueError:
return False
def find_tagged(tag):
"""
Returns the bedtool object with tagged with *tag*. Useful for tracking
down bedtools you made previously.
"""
for key, item in _tags.items():
try:
if item._tag == tag:
return item
except AttributeError:
pass
raise ValueError('tag "%s" not found' % tag)
def _flatten_list(x):
nested = True
while nested:
check_again = False
flattened = []
for element in x:
if isinstance(element, list):
flattened.extend(element)
check_again = True
else:
flattened.append(element)
nested = check_again
x = flattened[:]
return x
def set_tempdir(tempdir):
"""
Set the directory for temp files.
Useful for clusters that use a /scratch partition rather than a /tmp dir.
Convenience function to simply set tempfile.tempdir.
"""
if not os.path.exists(tempdir):
errstr = 'The tempdir you specified, %s, does not exist' % tempdir
raise ValueError(errstr)
tempfile.tempdir = tempdir
def get_tempdir():
"""
Gets the current tempdir for the module.
"""
return tempfile.gettempdir()
def cleanup(verbose=False, remove_all=False):
"""
Deletes all temp files from the current session (or optionally *all* \
sessions)
If *verbose*, reports what it's doing
If *remove_all*, then ALL files matching "pybedtools.*.tmp" in the temp dir
will be deleted.
"""
if settings.KEEP_TEMPFILES:
return
for fn in filenames.TEMPFILES:
if verbose:
print('removing', fn)
if os.path.exists(fn):
os.unlink(fn)
if remove_all:
fns = glob.glob(os.path.join(get_tempdir(), 'pybedtools.*.tmp'))
for fn in fns:
os.unlink(fn)
def _version_2_15_plus_names(prog_name):
if not settings._bedtools_installed:
_check_for_bedtools()
if not settings._v_2_15_plus:
return [prog_name]
try:
prog_name = settings._prog_names[prog_name]
except KeyError:
if prog_name in settings._new_names:
pass
raise BEDToolsError(
prog_name, prog_name + 'not a recognized BEDTools program')
return [os.path.join(settings._bedtools_path, 'bedtools'), prog_name]
def call_bedtools(cmds, tmpfn=None, stdin=None, check_stderr=None, decode_output=True, encode_input=True):
"""
Use subprocess.Popen to call BEDTools and catch any errors.
Output goes to *tmpfn*, or, if None, output stays in subprocess.PIPE and
can be iterated over.
*stdin* is an optional file-like object that will be sent to
subprocess.Popen.
Prints some useful help upon getting common errors.
*check_stderr* is a function that takes the stderr string as input and
returns True if it's OK (that is, it's not really an error). This is
needed, e.g., for calling fastaFromBed which will report that it has to
make a .fai for a fasta file.
*decode_output* should be set to False when you are iterating over a BAM
file, where the data represent binary rather than text data.
"""
input_is_stream = stdin is not None
output_is_stream = tmpfn is None
_orig_cmds = cmds[:]
cmds = []
cmds.extend(_version_2_15_plus_names(_orig_cmds[0]))
cmds.extend(_orig_cmds[1:])
try:
# coming from an iterator, sending as iterator
if input_is_stream and output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is stream, output is '
'stream')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=BUFSIZE)
if encode_input:
for line in stdin:
p.stdin.write(line.encode())
else:
for line in stdin:
p.stdin.write(line)
# This is important to prevent deadlocks
p.stdin.close()
if decode_output:
output = (i.decode('UTF-8') for i in p.stdout)
else:
output = (i for i in p.stdout)
stderr = None
# coming from an iterator, writing to file
if input_is_stream and not output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is stream, output is file')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
outfile = open(tmpfn, 'wb')
p = subprocess.Popen(cmds,
stdout=outfile,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=BUFSIZE)
if hasattr(stdin, 'read'):
stdout, stderr = p.communicate(stdin.read())
else:
for item in stdin:
p.stdin.write(item.encode())
stdout, stderr = p.communicate()
output = tmpfn
outfile.close()
# coming from a file, sending as iterator
if not input_is_stream and output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is filename, '
'output is stream')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=BUFSIZE)
if decode_output:
output = (i.decode('UTF-8') for i in p.stdout)
else:
output = (i for i in p.stdout)
stderr = None
# file-to-file
if not input_is_stream and not output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is filename, output '
'is filename (%s)', tmpfn)
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
outfile = open(tmpfn, 'wb')
p = subprocess.Popen(cmds,
stdout=outfile,
stderr=subprocess.PIPE,
bufsize=BUFSIZE)
stdout, stderr = p.communicate()
output = tmpfn
outfile.close()
# Check if it's OK using a provided function to check stderr. If it's
# OK, dump it to sys.stderr so it's printed, and reset it to None so we
# don't raise an exception
if check_stderr is not None:
if isinstance(stderr, bytes):
stderr = stderr.decode('UTF_8')
if check_stderr(stderr):
sys.stderr.write(stderr)
stderr = None
if stderr:
# Fix for issue #147. In general, we consider warnings to not be
# fatal, so just show 'em and continue on.
#
# bedtools source has several different ways of showing a warning,
# but they seem to all have "WARNING" in the first 20 or so
# characters
if isinstance(stderr, bytes):
stderr = stderr.decode('UTF_8')
if len(stderr) > 20 and "WARNING" in stderr[:20]:
sys.stderr.write(stderr)
else:
raise BEDToolsError(subprocess.list2cmdline(cmds), stderr)
except (OSError, IOError) as err:
print('%s: %s' % (type(err), os.strerror(err.errno)))
print('The command was:\n\n\t%s\n' % subprocess.list2cmdline(cmds))
problems = {
2: ('* Did you spell the command correctly?',
'* Do you have BEDTools installed and on the path?'),
13: ('* Do you have permission to write '
'to the output file ("%s")?' % tmpfn,),
24: ('* Too many files open -- please submit '
'a bug report so that this can be fixed',)
}
print('Things to check:')
print('\n\t' + '\n\t'.join(problems[err.errno]))
raise OSError('See above for commands that gave the error')
return output
def set_bedtools_path(path=""):
"""
Explicitly set path to `BEDTools` installation dir.
If BEDTools is not available on your system path, specify the path to the
dir containing the BEDTools executables (intersectBed, subtractBed, etc)
with this function.
To reset and use the default system path, call this function with no
arguments or use path="".
"""
settings._bedtools_path = path
def set_R_path(path=""):
"""
Explicitly set path to `R` installation dir.
If R is not available on the path, then it can be explicitly
specified here.
Use path="" to reset to default system path.
"""
settings._R_path = path
def _check_sequence_stderr(x):
"""
If stderr created by fastaFromBed starts with 'index file', then don't
consider it an error.
"""
if isinstance(x, bytes):
x = x.decode('UTF-8')
if x.startswith('index file'):
return True
if x.startswith("WARNING"):
return True
return False
def _call_randomintersect(_self, other, iterations, intersect_kwargs,
shuffle_kwargs, report_iterations, debug,
_orig_processes):
"""
Helper function that list-ifies the output from randomintersection, s.t.
it can be pickled across a multiprocess Pool.
"""
return list(
_self.randomintersection(
other, iterations,
intersect_kwargs=intersect_kwargs,
shuffle_kwargs=shuffle_kwargs,
report_iterations=report_iterations,
debug=False, processes=None,
_orig_processes=_orig_processes)
)
def close_or_delete(*args):
"""
Single function that can be used to get rid of a BedTool, whether it's a
streaming or file-based version.
"""
for x in args:
if isinstance(x.fn, six.string_types):
os.unlink(x.fn)
elif hasattr(x.fn, 'close'):
x.fn.close()
if hasattr(x.fn, 'throw'):
x.fn.throw(StopIteration)
def n_open_fds():
pid = os.getpid()
procs = subprocess.check_output(
['lsof', '-w', '-Ff', '-p', str(pid)])
nprocs = 0
for i in procs.splitlines():
if i[1:].isdigit() and i[0] == 'f':
nprocs += 1
return nprocs
import re
coord_re = re.compile(
r"""
(?P<chrom>.+):
(?P<start>\d+)-
(?P<stop>\d+)
(?:\[(?P<strand>.)\])?""", re.VERBOSE)
def string_to_interval(s):
"""
Convert string of the form "chrom:start-stop" or "chrom:start-stop[strand]"
to an interval.
Assumes zero-based coords.
If it's already an interval, then return it as-is.
"""
if isinstance(s, six.string_types):
m = coord_re.search(s)
if m.group('strand'):
return create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
'.',
'0',
m.group('strand')])
else:
return create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
])
return s
class FisherOutput(object):
def __init__(self, s, **kwargs):
"""
fisher returns text results like::
# Contingency Table
#_________________________________________
# | not in -b | in -b |
# not in -a | 3137160615 | 503 |
# in -a | 100 | 46 |
#_________________________________________
# p-values for fisher's exact test
left right two-tail ratio
1.00000 0.00000 0.00000 2868973.922
"""
if isinstance(s, str):
s = open(s).read()
if hasattr(s, 'next'):
s = ''.join(i for i in s)
table = {
'not in -a': {
'not in -b': None,
'in -b': None
},
'in -a': {
'not in -b': None,
'in -b': None,
},
}
self.text = s
lines = s.splitlines()
for i in lines:
if 'not in -a' in i:
_, in_b, not_in_b, _= i.strip().split('|')
table['not in -a']['not in -b'] = int(not_in_b)
table['not in -a']['in -b'] = int(in_b)
if ' in -a' in i:
_, in_b, not_in_b, _ = i.strip().split('|')
table['in -a']['not in -b'] = int(not_in_b)
table['in -a']['in -b'] = int(in_b)
self.table = table
left, right, two_tail, ratio = lines[-1].split()
self.left_tail = float(left)
self.right_tail = float(right)
self.two_tail = float(two_tail)
self.ratio = float(ratio)
def __str__(self):
return self.text
def __repr__(self):
return '<%s at %s>\n%s' % (self.__class__.__name__, id(self), self.text)
def internet_on(timeout=1):
try:
response = urllib.request.urlopen('http://genome.ucsc.edu', timeout=timeout)
return True
except urllib.error.URLError as err:
pass
return False
def get_chromsizes_from_ucsc(genome, saveas=None, mysql='mysql', timeout=None):
"""
Download chrom size info for *genome* from UCSC and returns the dictionary.
If you need the file, then specify a filename with *saveas* (the dictionary
will still be returned as well).
If ``mysql`` is not on your path, specify where to find it with
*mysql=<path to mysql executable>*.
*timeout* is how long to wait for a response; mostly used for testing.
Example usage:
>>> dm3_chromsizes = get_chromsizes_from_ucsc('dm3')
>>> for i in sorted(dm3_chromsizes.items()):
... print('{0}: {1}'.format(*i))
chr2L: (0, 23011544)
chr2LHet: (0, 368872)
chr2R: (0, 21146708)
chr2RHet: (0, 3288761)
chr3L: (0, 24543557)
chr3LHet: (0, 2555491)
chr3R: (0, 27905053)
chr3RHet: (0, 2517507)
chr4: (0, 1351857)
chrM: (0, 19517)
chrU: (0, 10049037)
chrUextra: (0, 29004656)
chrX: (0, 22422827)
chrXHet: (0, 204112)
chrYHet: (0, 347038)
"""
if not internet_on(timeout=timeout):
raise ValueError('It appears you don\'t have an internet connection '
'-- unable to get chromsizes from UCSC')
cmds = [mysql,
'--user=genome',
'--host=genome-mysql.cse.ucsc.edu',
'-A',
'-e',
'select chrom, size from %s.chromInfo' % genome]
try:
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
stdout, stderr = p.communicate()
if stderr:
print(stderr)
print('Commands were:\n')
print((subprocess.list2cmdline(cmds)))
lines = stdout.splitlines()[1:]
d = {}
for line in lines:
if isinstance(line, bytes):
line = line.decode('UTF-8')
chrom, size = line.split()
d[chrom] = (0, int(size))
if saveas is not None:
chromsizes_to_file(d, saveas)
return d
except OSError as err:
if err.errno == 2:
raise OSError("Can't find mysql -- if you don't have it "
"installed, you'll have to get chromsizes "
" manually, or "
"specify the path with the 'mysql' kwarg.")
else:
raise
def chromsizes_to_file(chrom_sizes, fn=None):
"""
Converts a *chromsizes* dictionary to a file. If *fn* is None, then a
tempfile is created (which can be deleted with pybedtools.cleanup()).
Returns the filename.
"""
if fn is None:
tmpfn = tempfile.NamedTemporaryFile(prefix='pybedtools.',
suffix='.tmp', delete=False)
tmpfn = tmpfn.name
filenames.TEMPFILES.append(tmpfn)
fn = tmpfn
if isinstance(chrom_sizes, str):
chrom_sizes = chromsizes(chrom_sizes)
fout = open(fn, 'wt')
for chrom, bounds in sorted(chrom_sizes.items()):
line = chrom + '\t' + str(bounds[1]) + '\n'
fout.write(line)
fout.close()
return fn
def chromsizes(genome):
"""
Looks for a *genome* already included in the genome registry; if not found
then it looks it up on UCSC. Returns the dictionary of chromsize tuples
where each tuple has (start,stop).
Chromsizes are described as (start, stop) tuples to allow randomization
within specified regions; e. g., you can make a chromsizes dictionary that
represents the extent of a tiling array.
Example usage:
>>> dm3_chromsizes = chromsizes('dm3')
>>> for i in sorted(dm3_chromsizes.items()):
... print(i)
('chr2L', (0, 23011544))
('chr2LHet', (0, 368872))
('chr2R', (0, 21146708))
('chr2RHet', (0, 3288761))
('chr3L', (0, 24543557))
('chr3LHet', (0, 2555491))
('chr3R', (0, 27905053))
('chr3RHet', (0, 2517507))
('chr4', (0, 1351857))
('chrM', (0, 19517))
('chrU', (0, 10049037))
('chrUextra', (0, 29004656))
('chrX', (0, 22422827))
('chrXHet', (0, 204112))
('chrYHet', (0, 347038))
"""
try:
return getattr(genome_registry, genome)
except AttributeError:
return get_chromsizes_from_ucsc(genome)
atexit.register(cleanup)
| poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/helpers.py | Python | apache-2.0 | 21,821 | [
"pysam"
] | a689c41a40b144f713827735587dab09fa5c595dd827012fe0ad796fa78dbb00 |
# Copyright 2012 Google Inc.
#
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Checkers for various standard library functions."""
import re
import six
import sys
import astroid
from astroid.bases import Instance
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers import utils
if sys.version_info >= (3, 0):
OPEN_MODULE = '_io'
else:
OPEN_MODULE = '__builtin__'
def _check_mode_str(mode):
# check type
if not isinstance(mode, six.string_types):
return False
# check syntax
modes = set(mode)
_mode = "rwatb+U"
creating = False
if six.PY3:
_mode += "x"
creating = "x" in modes
if modes - set(_mode) or len(mode) > len(modes):
return False
# check logic
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending or creating and six.PY3:
return False
reading = True
if not six.PY3:
binary = True
if text and binary:
return False
total = reading + writing + appending + (creating if six.PY3 else 0)
if total > 1:
return False
if not (reading or writing or appending or creating and six.PY3):
return False
# other 2.x constraints
if not six.PY3:
if "U" in mode:
mode = mode.replace("U", "")
if "r" not in mode:
mode = "r" + mode
return mode[0] in ("r", "w", "a", "U")
return True
class StdlibChecker(BaseChecker):
__implements__ = (IAstroidChecker,)
name = 'stdlib'
msgs = {
'W1501': ('"%s" is not a valid mode for open.',
'bad-open-mode',
'Python supports: r, w, a[, x] modes with b, +, '
'and U (only with r) options. '
'See http://docs.python.org/2/library/functions.html#open'),
'W1502': ('Using datetime.time in a boolean context.',
'boolean-datetime',
'Using datetetime.time in a boolean context can hide '
'subtle bugs when the time they represent matches '
'midnight UTC. This behaviour was fixed in Python 3.5. '
'See http://bugs.python.org/issue13936 for reference.',
{'maxversion': (3, 5)}),
'W1503': ('Redundant use of %s with constant '
'value %r',
'redundant-unittest-assert',
'The first argument of assertTrue and assertFalse is'
'a condition. If a constant is passed as parameter, that'
'condition will be always true. In this case a warning '
'should be emitted.')
}
@utils.check_messages('bad-open-mode', 'redundant-unittest-assert')
def visit_callfunc(self, node):
"""Visit a CallFunc node."""
if hasattr(node, 'func'):
infer = utils.safe_infer(node.func)
if infer:
if infer.root().name == OPEN_MODULE:
if getattr(node.func, 'name', None) in ('open', 'file'):
self._check_open_mode(node)
if infer.root().name == 'unittest.case':
self._check_redundant_assert(node, infer)
@utils.check_messages('boolean-datetime')
def visit_unaryop(self, node):
if node.op == 'not':
self._check_datetime(node.operand)
@utils.check_messages('boolean-datetime')
def visit_if(self, node):
self._check_datetime(node.test)
@utils.check_messages('boolean-datetime')
def visit_ifexp(self, node):
self._check_datetime(node.test)
@utils.check_messages('boolean-datetime')
def visit_boolop(self, node):
for value in node.values:
self._check_datetime(value)
def _check_redundant_assert(self, node, infer):
if (isinstance(infer, astroid.BoundMethod) and
node.args and isinstance(node.args[0], astroid.Const) and
infer.name in ['assertTrue', 'assertFalse']):
self.add_message('redundant-unittest-assert',
args=(infer.name, node.args[0].value, ),
node=node)
def _check_datetime(self, node):
""" Check that a datetime was infered.
If so, emit boolean-datetime warning.
"""
try:
infered = next(node.infer())
except astroid.InferenceError:
return
if (isinstance(infered, Instance) and
infered.qname() == 'datetime.time'):
self.add_message('boolean-datetime', node=node)
def _check_open_mode(self, node):
"""Check that the mode argument of an open or file call is valid."""
try:
mode_arg = utils.get_argument_from_call(node, position=1,
keyword='mode')
except utils.NoSuchArgumentError:
return
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if (isinstance(mode_arg, astroid.Const)
and not _check_mode_str(mode_arg.value)):
self.add_message('bad-open-mode', node=node,
args=mode_arg.value)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(StdlibChecker(linter))
| Shouqun/node-gn | tools/depot_tools/third_party/pylint/checkers/stdlib.py | Python | mit | 6,250 | [
"VisIt"
] | 06a0e553595c110eb3a1989fc9ca5ad9f7c383087882d56b5c1d3271869f039d |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from itertools import izip, chain
import re
import vtk
from .class_tree import ClassTree
from .specs import ClassSpec, SpecList, \
ClassInputPortSpec as InputPortSpec, \
ClassOutputPortSpec as OutputPortSpec
from .vtk_parser import VTKMethodParser
parser = VTKMethodParser()
disallowed_classes = set(
[
'simplewrapper', # ticket 464: VTK 5.10 on OpenSuSE needs this
'vtkEventQtSlotConnect', # VTK 5.10.1 OpenSuSE (uses QObject)
'vtkQtView', # VTK 5.10.1 OpenSuSE (uses QWidget)
'vtkCriticalSection',
'vtkDataArraySelection',
'vtkDebugLeaks',
'vtkDirectory',
'vtkDynamicLoader',
'vtkFunctionParser',
'vtkGarbageCollector',
'vtkHeap',
'vtkInformationKey',
'vtkInstantiator',
'vtkLogLookupTable', # use vtkLookupTable.SetScaleToLog10() instead
'vtkMath',
'vtkModelMetadata',
'vtkMultiProcessController',
'vtkMutexLock',
'vtkOutputWindow',
'vtkPriorityQueue',
'vtkQtInitialization',
'vtkReferenceCount',
'vtkRenderWindowCollection',
'vtkRenderWindowInteractor',
'vtkTesting',
'vtkWindow',
'vtkContext2D', #Not working for VTK 5.7.0
'vtkPLYWriter', #Not working for VTK 5.7.0.
'vtkBooleanTexture', #Not working for VTK 5.7.0
'vtkImageMaskBits', #Not working for VTK 5.7.0
'vtkHardwareSelector',#Not working for VTK 5.7.0
'vtkOpenGLExtensionManager',
# these show up with new parse
'vtkAbstractContextBufferId',
'vtkAbstractElectronicData',
'vtkCallbackCommand',
'vtkImageComplex',
'vtkInformationDataObjectKey',
'vtkInformationDoubleKey',
'vtkInformationDoubleVectorKey',
'vtkInformationIdTypeKey',
'vtkInformationInformationKey',
'vtkInformationInformationVectorKey',
'vtkInformationIntegerKey',
'vtkInformationIntegerPointerKey',
'vtkInformationIntegerVectorKey',
'vtkInformationKeyVectorKey',
'vtkInformationObjectBaseKey',
'vtkInformationRequestKey',
'vtkInformationStringKey',
'vtkInformationStringVectorKey',
'vtkInformationUnsignedLongKey',
'vtkRenderWindow',
'vtkShaderProgram2',
'vtkShadowMapBakerPassLightCameras',
'vtkShadowMapBakerPassTextures',
'vtkTDxMotionEventInfo',
'vtkVolumeRayCastDynamicInfo',
'vtkVolumeRayCastStaticInfo',
# For VTK 5.8
'vtkMPICommunicatorOpaqueComm',
# For VTK 6
'vtkBlueObeliskData',
'vtkSocketController',
'vtkMPIController',
'vtkInformationVariantVectorKey',
'vtkInformationVariantKey',
'QImage',
'vtkPLOT3DReader',
# For VTK 6.2
'QuantileDefinitionType'
])
disallowed_modules = set(
[
'vtkGeoAlignedImageCache',
'vtkGeoTerrainCache',
'vtkMPIGroup'
])
def create_module(base_cls_name, node):
"""create_module(base_cls_name: String, node: TreeNode) -> [ModuleSpec]
Construct a module spec that inherits from base_cls_name with
specification from node.
"""
if node.name in disallowed_modules: return []
if node.name == 'int': return [] #enum
def obsolete_class_list():
lst = []
items = ['vtkInteractorStyleTrackball',
'vtkStructuredPointsGeometryFilter',
'vtkConstrainedPointHandleRepresentation',
'vtkRenderViewBase',
'vtkRenderView']
def try_to_add_item(item):
try:
lst.append(getattr(vtk, item))
except AttributeError:
pass
for item in items:
try_to_add_item(item)
return lst
obsolete_list = obsolete_class_list()
def is_abstract():
"""is_abstract tries to instantiate the class. If it's
abstract, this will raise."""
# Consider obsolete classes abstract
if node.klass in obsolete_list:
return True
try:
getattr(vtk, node.name)()
except (TypeError, NotImplementedError): # VTK raises type error on abstract classes
return True
return False
try:
node.klass.__doc__.decode('latin-1')
except UnicodeDecodeError:
print "ERROR decoding docstring", node.name
raise
input_ports, output_ports = get_ports(node.klass)
output_ports = list(output_ports) # drop generator
cacheable = (issubclass(node.klass, vtk.vtkAlgorithm) and
(not issubclass(node.klass, vtk.vtkAbstractMapper))) or \
issubclass(node.klass, vtk.vtkScalarTree)
is_algorithm = issubclass(node.klass, vtk.vtkAlgorithm)
tempfile = '_set_tempfile' if issubclass(node.klass, vtk.vtkWriter) else None
callback = '_set_callback' if is_algorithm else None
methods_last = hasattr(node.klass, 'SetRenderWindow')
module_spec = ClassSpec(node.name, base_cls_name, node.name,
node.klass.__doc__.decode('latin-1'), callback,
tempfile, cacheable, input_ports, output_ports,
compute='Update', cleanup='_cleanup',
methods_last=methods_last, abstract=is_abstract())
module_specs = [module_spec]
for child in node.children:
if child.name in disallowed_classes:
continue
module_specs.extend(create_module(node.name, child))
return module_specs
def get_doc(cls, port_name):
f = re.match(r"(.*)_\d+$", port_name)
if f:
name = f.group(1)
else:
name = port_name
doc = getattr(cls, name).__doc__
# Remove all the C++ function signatures.
idx = doc.find('\n\n')
if idx > 0:
doc = doc[idx+2:]
return doc
def prune_signatures(cls, name, signatures, output=False):
"""prune_signatures tries to remove redundant signatures to reduce
overloading. It _mutates_ the given parameter.
It does this by performing several operations:
1) It compares a 'flattened' version of the types
against the other 'flattened' signatures. If any of them match, we
keep only the 'flatter' ones.
A 'flattened' signature is one where parameters are not inside a
tuple.
2) We explicitly forbid a few signatures based on modules and names
"""
# yeah, this is Omega(n^2) on the number of overloads. Who cares?
def flatten(type_):
if type_ is None:
return []
def convert(entry):
if isinstance(entry, tuple):
return list(entry)
elif isinstance(entry, str):
return [entry]
else:
result = []
first = True
lastList = True
for e in entry:
if (isinstance(e, list)):
if lastList == False: result[len(result)] = result[len(result)] + ']'
aux = e
aux.reverse()
aux[0] = '[' + aux[0]
aux[-1] = aux[-1] + ']'
result.extend(aux)
lastList = True
else:
if first: e = '[' + e
result.append(e)
lastList = False
first = False
return result
result = []
for entry in type_:
result.extend(convert(entry))
return result
flattened_entries = [flatten(sig[1]) for
sig in signatures]
def hit_count(entry):
result = 0
for entry in flattened_entries:
if entry in flattened_entries:
result += 1
return result
hits = [hit_count(entry) for entry in flattened_entries]
def forbidden(flattened, hit_count, original):
if (issubclass(cls, vtk.vtk3DWidget) and
name == 'PlaceWidget' and
flattened == []):
return True
# We forbid this because addPorts hardcodes this but
# SetInputArrayToProcess is an exception for the InfoVis
# package
if (cls == vtk.vtkAlgorithm and
name!='SetInputArrayToProcess'):
return True
return False
# This is messy: a signature is only allowed if there's no
# explicit disallowing of it. Then, if it's not overloaded,
# it is also allowed. If it is overloaded and not the flattened
# version, it is pruned. If these are output ports, there can be
# no parameters.
def passes(flattened, hit_count, original):
if forbidden(flattened, hit_count, original):
return False
if hit_count == 1:
return True
if original[1] is None:
return True
if output and len(original[1]) > 0:
return False
if hit_count > 1 and len(original[1]) == len(flattened):
return True
return False
signatures[:] = [original for (flattened, hit_count, original)
in izip(flattened_entries,
hits,
signatures)
if passes(flattened, hit_count, original)]
#then we remove the duplicates, if necessary
unique_signatures = []
#Remove the arrays and tuples inside the signature
# in order to transform it in a single array
#Also remove the '[]' from the Strings
def removeBracts(signatures):
result = []
stack = list(signatures)
while (len(stack) != 0):
curr = stack.pop(0)
if (isinstance(curr, str)):
c = curr.replace('[', '')
c = c.replace(']', '')
result.append(c)
elif (curr == None):
result.append(curr)
elif (isinstance(curr, list)):
curr.reverse()
for c in curr: stack.insert(0, c)
elif (isinstance(curr, tuple)):
cc = list(curr)
cc.reverse()
for c in cc: stack.insert(0, c)
else:
result.append(curr)
return result
unique2 = []
for s in signatures:
aux = removeBracts(s)
if not unique2.count(aux):
unique_signatures.append(s)
unique2.append(aux)
signatures[:] = unique_signatures
file_name_pattern = re.compile('.*FileName$')
set_file_name_pattern = re.compile('Set.*FileName$')
def resolve_overloaded_name(name, ix, signatures):
# VTK supports static overloading, VisTrails does not. The
# solution is to check whether the current function has
# overloads and change the names appropriately.
if len(signatures) == 1:
return name
else:
return name + '_' + str(ix+1)
type_map_dict = {'int': "basic:Integer",
'long': "basic:Integer",
'float': "basic:Float",
'char*': "basic:String",
'char *': "basic:String",
'string': "basic:String",
'char': "basic:String",
'const char*': "basic:String",
'const char *': "basic:String",
'[float': "basic:Float",
'float]': "basic:Float",
'[int': "basic:Integer",
'int]': "basic:Integer",
'bool': "basic:Boolean",
'unicode': 'basic:String'}
type_map_values = set(type_map_dict.itervalues())
# ["basic:Integer", "basic:Float", "basic:String", "basic:Boolean"]
def get_port_types(name):
""" get_port_types(name: str) -> str
Convert from C/C++ types into VisTrails port type
"""
if isinstance(name, tuple) or isinstance(name, list):
return [get_port_types(x) for x in name]
if name in type_map_dict:
return type_map_dict[name]
else:
if name is not None and name.strip():
#if not name.startswith("vtk"):
# print "RETURNING RAW TYPE:", name
return name
return None
def is_type_allowed(t):
if isinstance(t, list):
return all(is_type_allowed(sub_t) for sub_t in t)
if t is None:
return False
if t == "tuple" or t == "function":
return False
return t not in disallowed_classes
def get_algorithm_ports(cls):
""" get_algorithm_ports(cls: class) -> None
If module is a subclass of vtkAlgorithm, this function will add all
SetInputConnection([id],[port]) and GetOutputPort([id]) as
SetInputConnection{id}([port]) and GetOutputPort{id}.
"""
input_ports = []
output_ports = []
if issubclass(cls, vtk.vtkAlgorithm):
# We try to instantiate the class here to get the number of
# ports and to avoid abstract classes
try:
instance = cls()
except TypeError:
pass
else:
for i in xrange(instance.GetNumberOfInputPorts()):
port_name = "SetInputConnection%d" % i
port_spec = InputPortSpec(name=port_name,
method_name="SetInputConnection",
port_type="vtkAlgorithmOutput",
docstring=get_doc(cls,
"SetInputConnection"),
show_port=True,
prepend_params=[i])
input_ports.append(port_spec)
for i in xrange(instance.GetNumberOfOutputPorts()):
port_name = "GetOutputPort%d" % i
port_spec = OutputPortSpec(name=port_name,
method_name="GetOutputPort",
port_type="vtkAlgorithmOutput",
docstring=get_doc(cls,
"GetOutputPort"),
show_port=True)
output_ports.append(port_spec)
return input_ports, output_ports
disallowed_get_ports = set([
'GetClassName',
'GetErrorCode',
'GetNumberOfInputPorts',
'GetNumberOfOutputPorts',
'GetOutputPortInformation',
'GetTotalNumberOfInputConnections',
])
def get_get_ports(cls, get_list):
output_ports = []
for name in get_list:
if name in disallowed_get_ports:
continue
method = getattr(cls, name)
signatures = parser.get_method_signature(method)
if len(signatures) > 1:
prune_signatures(cls, name, signatures, output=True)
for ix, getter in enumerate(signatures):
if getter[1]:
#print ("Can't handle getter %s (%s) of class %s: Needs input "
# "to get output" % (ix+1, name, cls.__name__))
continue
if len(getter[0]) != 1:
#print ("Can't handle getter %s (%s) of class %s: More than a "
# "single output" % (ix+1, name, cls.__name__))
continue
port_type = get_port_types(getter[0][0])
if is_type_allowed(port_type):
n = resolve_overloaded_name(name[3:], ix, signatures)
port_spec = OutputPortSpec(name=n,
method_name=name,
port_type=port_type,
show_port=False,
docstring=get_doc(cls, name))
output_ports.append(port_spec)
return [], output_ports
disallowed_get_set_ports = set(['ReferenceCount',
'InputConnection',
'OutputPort',
'Progress',
'ProgressText',
'InputArrayToProcess',
])
color_ports = set(["DiffuseColor", "Color", "AmbientColor", "SpecularColor",
"EdgeColor", "Background", "Background2"])
to_vtk6_names = {'AddInput': 'AddInputData',
'SetInput': 'SetInputData',
'AddSource': 'AddSourceData',
'SetSource': 'SetSourceData'}
def get_vtk6_name(cls, name):
# Return SetInputData for SetInput etc.
if name == 'AddInput' and cls == vtk.vtkXYPlotActor:
return 'AddDataSetInput'
return to_vtk6_names.get(name, name)
# FIXME use defaults and ranges!
def get_get_set_ports(cls, get_set_dict):
"""get_get_set_ports(cls: class, get_set_dict: dict) -> None
Convert all Setxxx methods of cls into input ports and all Getxxx
methods of module into output ports
Keyword arguments:
cls --- class
get_set_dict --- the Set/Get method signatures returned by vtk_parser
"""
input_ports = []
output_ports = []
for name in get_set_dict:
if name in disallowed_get_set_ports:
continue
getter_name = 'Get%s' % name
setter_name = 'Set%s' % name
getter_method = getattr(cls, getter_name)
setter_method = getattr(cls, setter_name)
getter_sig = parser.get_method_signature(getter_method)
setter_sig = parser.get_method_signature(setter_method)
if len(getter_sig) > 1:
prune_signatures(cls, getter_name, getter_sig, output=True)
for order, getter in enumerate(getter_sig):
if getter[1]:
#print ("Can't handle getter %s (%s) of class %s: Needs input "
# "to get output" % (order+1, name, cls.__name__))
continue
if len(getter[0]) != 1:
#print ("Can't handle getter %s (%s) of class %s: More than a "
# "single output" % (order+1, name, cls.__name__))
continue
port_type = get_port_types(getter[0][0])
if is_type_allowed(port_type):
if name in color_ports:
ps = OutputPortSpec(name=name,
method_name=getter_name,
port_type="basic:Color",
show_port=False,
docstring=get_doc(cls, getter_name))
input_ports.append(ps)
else:
ps = OutputPortSpec(name=name,
method_name=getter_name,
port_type=port_type,
show_port=False,
docstring=get_doc(cls, getter_name))
output_ports.append(ps)
if len(setter_sig) > 1:
prune_signatures(cls, setter_name, setter_sig)
docstring = get_doc(cls, setter_name)
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [6, 0, 0]:
# Always use VTK6-style names for InputData-style types
setter_name = get_vtk6_name(cls, setter_name)
name = setter_name[3:]
for ix, setter in enumerate(setter_sig):
if setter[1] is None:
continue
# Wrap SetFileNames for VisTrails file access
# FIXME add documentation
if file_name_pattern.match(name):
ps = InputPortSpec(name=name[:-4],
method_name=setter_name,
port_type="basic:File",
show_port=True)
input_ports.append(ps)
# Wrap color methods for VisTrails GUI facilities
# FIXME add documentation
elif name in color_ports:
ps = InputPortSpec(name=name,
method_name=setter_name,
port_type="basic:Color",
show_port=False)
input_ports.append(ps)
# Wrap SetRenderWindow for exporters
# FIXME Add documentation
elif name == 'RenderWindow' and cls == vtk.vtkExporter:
ps = InputPortSpec(name="vtkRenderer",
port_type="vtkRenderer",
show_port=True)
input_ports.append(ps)
else:
n = resolve_overloaded_name(name, ix, setter_sig)
port_types = get_port_types(setter[1])
if is_type_allowed(port_types):
if len(setter[1]) == 1:
show_port = True
try:
show_port = port_types[0] not in type_map_values
except TypeError: # hash error
pass
port_types = port_types[0]
else:
show_port = False
ps = InputPortSpec(name=n,
method_name=setter_name,
port_type=port_types,
show_port=show_port,
docstring=docstring,
depth=1)
input_ports.append(ps)
return input_ports, output_ports
disallowed_toggle_ports = set(['GlobalWarningDisplay',
'Debug',
])
def get_toggle_ports(cls, toggle_dict):
""" get_toggle_ports(cls: class, toggle_dict: dict) -> None
Convert all xxxOn/Off methods of module into boolean input ports
Keyword arguments:
module --- Module
toggle_dict --- the Toggle method signatures returned by vtk_parser
"""
input_ports = []
for name, default_val in toggle_dict.iteritems():
if name in disallowed_toggle_ports:
continue
ps = InputPortSpec(name=name,
method_name=name, # With On/Off appended
method_type='OnOff',
port_type="basic:Boolean",
show_port=False,
defaults=[bool(default_val)],
docstring=get_doc(cls, name + "On"))
input_ports.append(ps)
return input_ports, []
disallowed_state_ports = set([('InputArray', 'Process')])
def get_state_ports(cls, state_dict):
""" get_state_ports(cls: class, state_dict: dict) -> None
Convert all SetxxxToyyy methods of module into input ports
Keyword arguments:
module --- Module
state_dict --- the State method signatures returned by vtk_parser
"""
input_ports = []
for name in state_dict:
enum_values = []
translations = {}
method_name = "Set%sTo%s" % (name, state_dict[name][0][0])
method_name_short = "Set%sTo" % name
for mode in state_dict[name]:
if (name, mode[0]) in disallowed_state_ports:
continue
if mode[0] in translations:
if translations[mode[0]] != mode[1]:
raise Exception("Duplicate entry with different value")
continue
translations[mode[0]] = mode[1]
enum_values.append(mode[0])
ps = InputPortSpec(name=name,
method_name=method_name_short,
method_type='SetXToY',
port_type="basic:String",
entry_types=['enum'],
values=[enum_values],
show_port=False,
docstring=get_doc(cls, method_name))
input_ports.append(ps)
return input_ports, []
disallowed_other_ports = set(
[
'BreakOnError',
'DeepCopy',
'FastDelete',
'HasObserver',
'HasExecutive',
'InvokeEvent',
'IsA',
'Modified',
'NewInstance',
'PrintRevisions',
'RemoveAllInputs',
'RemoveObserver',
'RemoveObservers',
'SafeDownCast',
# 'SetInputArrayToProcess',
'ShallowCopy',
'Update',
'UpdateInformation',
'UpdateProgress',
'UpdateWholeExtent',
# DAK: These are taken care of by s.upper() == s test
# 'GUI_HIDE',
# 'INPUT_ARRAYS_TO_PROCESS',
# 'INPUT_CONNECTION',
# 'INPUT_IS_OPTIONAL',
# 'INPUT_IS_REPEATABLE',
# 'INPUT_PORT',
# 'INPUT_REQUIRED_DATA_TYPE',
# 'INPUT_REQUIRED_FIELDS',
# 'IS_INTERNAL_VOLUME',
# 'IS_EXTERNAL_SURFACE',
# 'MANAGES_METAINFORMATION',
# 'POINT_DATA',
# 'POINTS',
# 'PRESERVES_ATTRIBUTES',
# 'PRESERVES_BOUNDS',
# 'PRESERVES_DATASET',
# 'PRESERVES_GEOMETRY',
# 'PRESERVES_RANGES',
# 'PRESERVES_TOPOLOGY',
# for VTK 6
'SetMaterialProperties',
])
# FIXME deal with this in diff...
force_not_optional_port = set(
['ApplyViewTheme',
])
def get_other_ports(cls, other_list):
""" addOtherPorts(cls: Module, other_list: list) -> None
Convert all other ports such as Insert/Add.... into input/output
Keyword arguments:
cls --- class
other_dict --- any other method signatures that is not
Algorithm/SetGet/Toggle/State type
"""
input_ports = []
for name in other_list:
# DAK: check for static methods as name.upper() == name
if name in disallowed_other_ports or name.upper() == name:
continue
elif name=='CopyImportVoidPointer':
# FIXME add documentation
ps = InputPortSpec(name='CopyImportVoidString',
method_name='CopyImportVoidPointer',
port_type='basic:String',
show_port=True)
# elif name[:3] in ['Add','Set'] or name[:6]=='Insert':
else:
method = getattr(cls, name)
signatures = ""
if not isinstance(method, int):
signatures = parser.get_method_signature(method)
if len(signatures) > 1:
prune_signatures(cls, name, signatures)
docstring = get_doc(cls, name)
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [6, 0, 0]:
# Always use VTK6-style names for InputData-style types
name = get_vtk6_name(cls, name)
for (ix, sig) in enumerate(signatures):
([result], params) = sig
port_types = get_port_types(params)
if not (name[:3] in ['Add','Set'] or
name[:6]=='Insert' or
(port_types is not None and len(port_types) == 0) or
result is None):
continue
if is_type_allowed(port_types):
n = resolve_overloaded_name(name, ix, signatures)
if n.startswith('Set'):
n = n[3:]
show_port = False
if len(port_types) < 1:
raise Exception("Shouldn't have empty input")
elif len(port_types) == 1:
if name[:3] in ['Add','Set'] or name[:6]=='Insert':
try:
show_port = port_types[0] not in type_map_values
except TypeError:
pass
port_types = port_types[0]
ps = InputPortSpec(name=n,
method_name=name,
port_type=port_types,
show_port=show_port,
docstring=docstring,
depth=1)
input_ports.append(ps)
elif result == None or port_types == []:
n = resolve_overloaded_name(name, ix, signatures)
ps = InputPortSpec(name=n,
method_name=name,
port_type='basic:Boolean',
method_type='nullary',
docstring=get_doc(cls, name),
depth=1)
input_ports.append(ps)
return input_ports, []
def get_custom_ports(cls):
""" get_custom_ports(cls: Module) -> None
Patch other ports needed to get a good wrapping
Keyword arguments:
cls --- class
"""
input_ports = []
output_ports = []
if cls == vtk.vtkAlgorithm:
ps = InputPortSpec(name='AddInputConnection',
port_type='vtkAlgorithmOutput',
show_port=True,
docstring='Adds an input connection',
depth=1)
input_ports.append(ps)
# vtkWriters have a custom File port
if cls in [vtk.vtkWriter, vtk.vtkImageWriter]:
ps = OutputPortSpec(name='file',
port_type='basic:File',
show_port=True,
docstring='The written file')
output_ports.append(ps)
elif cls == vtk.vtkVolumeProperty:
ps = InputPortSpec(name='TransferFunction',
method_name='SetTransferFunction',
port_type='TransferFunction',
docstring='Sets the transfer function to use')
input_ports.append(ps)
elif cls == vtk.vtkDataSet:
ps = InputPortSpec(name='SetPointData',
method_name='PointData',
port_type='vtkPointData',
show_port=True,
docstring='Sets the point data')
input_ports.append(ps)
ps = InputPortSpec(name='SetCellData',
method_name='CellData',
port_type='vtkCellData',
show_port=True,
docstring='Sets the cell data')
input_ports.append(ps)
elif cls==vtk.vtkCell:
ps = InputPortSpec(name='SetPointIds',
method_name='PointIds',
port_type='vtkIdList',
show_port=True,
docstring='Sets the point id list')
input_ports.append(ps)
elif cls==vtk.vtkMultiBlockPLOT3DReader:
ps = OutputPortSpec(name='StructuredGrid',
method_name='FirstBlock',
port_type='vtkStructuredGrid',
show_port=True,
docstring='Returns .GetOutput().GetBlock(0)')
output_ports.append(ps)
return input_ports, output_ports
def get_ports(cls):
"""get_ports(cls: vtk class) -> None
Search all metamethods of module and add appropriate ports
"""
parser.parse(cls)
ports_tuples = []
ports_tuples.append(get_algorithm_ports(cls))
ports_tuples.append(get_get_ports(cls, parser.get_get_methods()))
ports_tuples.append(get_get_set_ports(cls, parser.get_get_set_methods()))
ports_tuples.append(get_toggle_ports(cls, parser.get_toggle_methods()))
ports_tuples.append(get_state_ports(cls, parser.get_state_methods()))
ports_tuples.append(get_other_ports(cls, parser.get_other_methods()))
ports_tuples.append(get_custom_ports(cls))
zipped_ports = izip(*ports_tuples)
input_ports = chain(*zipped_ports.next())
output_ports = chain(*zipped_ports.next())
return input_ports, output_ports
def parse(filename="vtk_raw.xml"):
inheritance_graph = ClassTree(vtk)
inheritance_graph.create()
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [5, 7, 0]:
assert len(inheritance_graph.tree[0]) == 1
base = inheritance_graph.tree[0][0]
assert base.name == 'vtkObjectBase'
specs_list = []
if version < [5, 7, 0]:
for child in base.children:
if child.name in disallowed_classes:
continue
specs_list.extend(create_module("vtkObjectBase", child))
else:
for base in inheritance_graph.tree[0]:
for child in base.children:
if child.name in disallowed_classes:
continue
specs_list.extend(create_module("vtkObjectBase", child))
specs = SpecList(specs_list)
specs.write_to_xml(filename)
if __name__ == '__main__':
parse()
| VisTrails/VisTrails | vistrails/packages/vtk/vtk_wrapper/parse.py | Python | bsd-3-clause | 35,333 | [
"VTK"
] | 0c34147acc63788160c2d156092436a3f092f90105545640cd67c8e0e75c1109 |
#!/usr/bin/env python3
from setuptools import setup
from setuptools import Extension
from sys import version_info as vi
installed_version = (vi[0], vi[1])
if installed_version < (3, 0):
raise Exception("The default Python version must be 3.0 or higher, not {0}.{1}".format(vi[0], vi[1]))
USE_CYTHON = True
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize('src/heatsource9/Stream/*.pyx', compiler_directives={'language_level': "3"})
else:
extensions = [Extension('heatsource9.Stream.PyHeatsource', ['src/heatsource9/Stream/PyHeatsource.c']),
Extension('heatsource9.Stream.StreamNode', ['src/heatsource9/Stream/StreamNode.c'])]
setup(name='heatsource9',
version='9.0.0b25',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering'
],
long_description="""Heat Source is a computer model used by the
Oregon Department of Environmental Quality to simulate stream
thermodynamics and hydraulic routing. It was originally developed
by Matt Boyd in 1996 as a Masters Thesis at Oregon State University
in the Departments of Bioresource Engineering and Civil
Engineering. Since then it has grown and changed significantly.
Oregon DEQ currently maintains the Heat Source methodology and
computer programming. Appropriate model use and application are
the sole responsibility of the user.""",
description='One-dimensional stream temperature modeling program',
url='http://www.deq.state.or.us/wq/TMDLs/tools.htm',
project_urls={
'Documentation': 'https://www.oregon.gov/deq/FilterDocs/heatsourcemanual.pdf',
'Source': 'https://github.com/rmichie/heatsource-9/'},
author='Matt Boyd, Brian Kasper, John Metta, Ryan Michie, Dan Turner',
maintainer='Ryan Michie, Oregon DEQ',
maintainer_email='michie.ryan@deq.state.or.us',
platforms=['darwin', 'linux', 'win32'],
license=['GNU General Public License v3 (GPLv3)'],
zip_safe=False,
entry_points={'console_scripts': ['hs = heatsource9.BigRedButton:hs']},
packages=['heatsource9',
'heatsource9.ModelSetup',
'heatsource9.Dieties',
'heatsource9.Stream',
'heatsource9.Utils'],
package_dir={'': 'src'},
install_requires=['Cython==0.29.16'],
ext_modules=extensions,
python_requires='>=3, <4'
)
| rmichie/heatsource-9 | setup.py | Python | gpl-3.0 | 3,217 | [
"Brian"
] | 577054b9d895b1c4e14c6528b751af09de83df3ed273a3e17dbb93072decd292 |
#! /usr/bin/env python
import timeit
import ase
import ase.io
from atomistica import *
###
pots = [ Brenner, BrennerScr, Tersoff, TersoffScr, Rebo2, Rebo2Scr, Rebo2SiCH, Rebo2SiCHScr ]
for pot in pots:
a = ase.io.read('rho_2.9.traj')
a.calc = pot()
t = timeit.timeit('a.get_potential_energy()',
setup='from __main__ import a',
number=100)
print '{0} {1}'.format(pot, t)
| Atomistica/atomistica | examples/ASE/time_potentials.py | Python | gpl-2.0 | 425 | [
"ASE"
] | 04571ebb109e5315d2d71bceeb2c10ca635adffcbbef406458ae120d3087b769 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# gpload - load file(s) into Greenplum Database
# Copyright Greenplum 2008
'''gpload [options] -f configuration file
Options:
-h hostname: host to connect to
-p port: port to connect to
-U username: user to connect as
-d database: database to connect to
-W: force password authentication
-q: quiet mode
-D: do not actually load data
-v: verbose
-V: very verbose
-l logfile: log output to logfile
--no_auto_trans: do not wrap gpload in transaction
--gpfdist_timeout timeout: gpfdist timeout value
--max_retries retry_times: max retry times on gpdb connection timed out. 0 means disabled, -1 means forever
--version: print version number and exit
-?: help
'''
import sys
import yaml
if sys.hexversion<0x2040400:
sys.stderr.write("gpload needs python 2.4.4 or higher\n")
sys.exit(2)
try:
import yaml
except ImportError:
sys.stderr.write("gpload needs pyyaml. You can get it from http://pyyaml.org.\n")
sys.exit(2)
import platform
try:
import pg
except ImportError:
try:
from pygresql import pg
except Exception as e:
pass
except Exception as e:
print(repr(e))
errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e)
sys.stderr.write(str(errorMsg))
errorMsg = "Please check if you have the correct Visual Studio redistributable package installed.\n"
sys.stderr.write(str(errorMsg))
sys.exit(2)
import hashlib
import datetime,getpass,os,signal,socket,threading,time,traceback,re
import subprocess
import uuid
try:
from gppylib.gpversion import GpVersion
except ImportError:
sys.stderr.write("gpload can't import gpversion, will run in GPDB5 compatibility mode.\n")
noGpVersion = True
else:
noGpVersion = False
thePlatform = platform.system()
if thePlatform in ['Windows', 'Microsoft']:
windowsPlatform = True
else:
windowsPlatform = False
if windowsPlatform == False:
import select
from sys import version_info
if version_info.major == 2 :
import __builtin__
long = __builtin__.long
else:
long = int
EXECNAME = 'gpload'
NUM_WARN_ROWS = 0
# Mapping for validating our configuration file. We're only concerned with
# keys -- stuff left of ':'. It gets complex in two cases: firstly when
# we handle blocks which have keys which are not keywords -- such as under
# COLUMNS:. Secondly, we want to detect when users put keywords in the wrong
# place. To that end, the mapping is structured such that:
#
# key -> { 'parse_children' -> [ True | False ],
# 'parent' -> <parent name> }
#
# Each key is a keyword in the configuration file. parse_children tells us
# whether children are expected to be keywords. parent tells us the parent
# keyword or None
valid_tokens = {
"version": {'parse_children': True, 'parent': None},
"database": {'parse_children': True, 'parent': None},
"user": {'parse_children': True, 'parent': None},
"host": {'parse_children': True, 'parent': None},
"port": {'parse_children': True, 'parent': [None, "source"]},
"password": {'parse_children': True, 'parent': None},
"gpload": {'parse_children': True, 'parent': None},
"input": {'parse_children': True, 'parent': "gpload"},
"source": {'parse_children': True, 'parent': "input"},
"local_hostname": {'parse_children': False, 'parent': "source"},
"port_range": {'parse_children': False, 'parent': "source"},
"file": {'parse_children': False, 'parent': "source"},
"ssl": {'parse_children': False, 'parent': "source"},
"certificates_path": {'parse_children': False, 'parent': "source"},
"columns": {'parse_children': False, 'parent': "input"},
"transform": {'parse_children': True, 'parent': "input"},
"transform_config": {'parse_children': True, 'parent': "input"},
"max_line_length": {'parse_children': True, 'parent': "input"},
"format": {'parse_children': True, 'parent': "input"},
"delimiter": {'parse_children': True, 'parent': "input"},
"escape": {'parse_children': True, 'parent': "input"},
"null_as": {'parse_children': True, 'parent': "input"},
"quote": {'parse_children': True, 'parent': "input"},
"encoding": {'parse_children': True, 'parent': "input"},
"force_not_null": {'parse_children': False, 'parent': "input"},
"fill_missing_fields": {'parse_children': False, 'parent': "input"},
"error_limit": {'parse_children': True, 'parent': "input"},
"error_percent": {'parse_children': True, 'parent': "input"},
"error_table": {'parse_children': True, 'parent': "input"},
"log_errors": {'parse_children': False, 'parent': "input"},
"header": {'parse_children': True, 'parent': "input"},
"fully_qualified_domain_name": {'parse_children': False, 'parent': 'input'},
"output": {'parse_children': True, 'parent': "gpload"},
"table": {'parse_children': True, 'parent': "output"},
"mode": {'parse_children': True, 'parent': "output"},
"match_columns": {'parse_children': False, 'parent': "output"},
"update_columns": {'parse_children': False, 'parent': "output"},
"update_condition": {'parse_children': True, 'parent': "output"},
"mapping": {'parse_children': False, 'parent': "output"},
"preload": {'parse_children': True, 'parent': 'gpload'},
"truncate": {'parse_children': False, 'parent': 'preload'},
"reuse_tables": {'parse_children': False, 'parent': 'preload'},
"fast_match": {'parse_children': False, 'parent': 'preload'},
"staging_table": {'parse_children': False, 'parent': 'preload'},
"sql": {'parse_children': True, 'parent': 'gpload'},
"before": {'parse_children': False, 'parent': 'sql'},
"after": {'parse_children': False, 'parent': 'sql'},
"external": {'parse_children': True, 'parent': 'gpload'},
"schema": {'parse_children': False, 'parent': 'external'}}
_abbrevs = [
(long(1<<50), ' PB'),
(long(1<<40), ' TB'),
(long(1<<30), ' GB'),
(long(1<<20), ' MB'),
(long(1<<10), ' kB'),
(1, ' bytes')
]
received_kill = False
keywords = {
"abort": True,
"absolute": True,
"access": True,
"action": True,
"active": True,
"add": True,
"admin": True,
"after": True,
"aggregate": True,
"all": True,
"also": True,
"alter": True,
"analyse": True,
"analyze": True,
"and": True,
"any": True,
"array": True,
"as": True,
"asc": True,
"assertion": True,
"assignment": True,
"asymmetric": True,
"at": True,
"authorization": True,
"backward": True,
"before": True,
"begin": True,
"between": True,
"bigint": True,
"binary": True,
"bit": True,
"boolean": True,
"both": True,
"by": True,
"cache": True,
"called": True,
"cascade": True,
"cascaded": True,
"case": True,
"cast": True,
"chain": True,
"char": True,
"character": True,
"characteristics": True,
"check": True,
"checkpoint": True,
"class": True,
"close": True,
"cluster": True,
"coalesce": True,
"collate": True,
"column": True,
"comment": True,
"commit": True,
"committed": True,
"concurrently": True,
"connection": True,
"constraint": True,
"constraints": True,
"conversion": True,
"convert": True,
"coordinator": True,
"copy": True,
"cost": True,
"create": True,
"createdb": True,
"createrole": True,
"createuser": True,
"cross": True,
"csv": True,
"cube": True,
"current": True,
"current_date": True,
"current_role": True,
"current_time": True,
"current_timestamp": True,
"current_user": True,
"cursor": True,
"cycle": True,
"database": True,
"day": True,
"deallocate": True,
"dec": True,
"decimal": True,
"declare": True,
"default": True,
"defaults": True,
"deferrable": True,
"deferred": True,
"definer": True,
"delete": True,
"delimiter": True,
"delimiters": True,
"desc": True,
"disable": True,
"distinct": True,
"distributed": True,
"do": True,
"domain": True,
"double": True,
"drop": True,
"each": True,
"else": True,
"enable": True,
"encoding": True,
"encrypted": True,
"end": True,
"errors": True,
"escape": True,
"every": True,
"except": True,
"exchange": True,
"exclude": True,
"excluding": True,
"exclusive": True,
"execute": True,
"exists": True,
"explain": True,
"external": True,
"extract": True,
"false": True,
"fetch": True,
"fields": True,
"fill": True,
"filter": True,
"first": True,
"float": True,
"following": True,
"for": True,
"force": True,
"foreign": True,
"format": True,
"forward": True,
"freeze": True,
"from": True,
"full": True,
"function": True,
"global": True,
"grant": True,
"granted": True,
"greatest": True,
"group": True,
"group_id": True,
"grouping": True,
"handler": True,
"hash": True,
"having": True,
"header": True,
"hold": True,
"host": True,
"hour": True,
"if": True,
"ignore": True,
"ilike": True,
"immediate": True,
"immutable": True,
"implicit": True,
"in": True,
"including": True,
"inclusive": True,
"increment": True,
"index": True,
"indexes": True,
"inherit": True,
"inherits": True,
"initially": True,
"inner": True,
"inout": True,
"input": True,
"insensitive": True,
"insert": True,
"instead": True,
"int": True,
"integer": True,
"intersect": True,
"interval": True,
"into": True,
"invoker": True,
"is": True,
"isnull": True,
"isolation": True,
"join": True,
"keep": True,
"key": True,
"lancompiler": True,
"language": True,
"large": True,
"last": True,
"leading": True,
"least": True,
"left": True,
"level": True,
"like": True,
"limit": True,
"list": True,
"listen": True,
"load": True,
"local": True,
"localtime": True,
"localtimestamp": True,
"location": True,
"lock": True,
"log": True,
"login": True,
"match": True,
"maxvalue": True,
"merge": True,
"minute": True,
"minvalue": True,
"mirror": True,
"missing": True,
"mode": True,
"modify": True,
"month": True,
"move": True,
"names": True,
"national": True,
"natural": True,
"nchar": True,
"new": True,
"next": True,
"no": True,
"nocreatedb": True,
"nocreaterole": True,
"nocreateuser": True,
"noinherit": True,
"nologin": True,
"none": True,
"noovercommit": True,
"nosuperuser": True,
"not": True,
"nothing": True,
"notify": True,
"notnull": True,
"nowait": True,
"null": True,
"nullif": True,
"numeric": True,
"object": True,
"of": True,
"off": True,
"offset": True,
"oids": True,
"old": True,
"on": True,
"only": True,
"operator": True,
"option": True,
"or": True,
"order": True,
"others": True,
"out": True,
"outer": True,
"over": True,
"overcommit": True,
"overlaps": True,
"overlay": True,
"owned": True,
"owner": True,
"partial": True,
"partition": True,
"partitions": True,
"password": True,
"percent": True,
"placing": True,
"position": True,
"preceding": True,
"precision": True,
"prepare": True,
"prepared": True,
"preserve": True,
"primary": True,
"prior": True,
"privileges": True,
"procedural": True,
"procedure": True,
"queue": True,
"quote": True,
"randomly": True,
"range": True,
"read": True,
"real": True,
"reassign": True,
"recheck": True,
"references": True,
"reindex": True,
"reject": True,
"relative": True,
"release": True,
"rename": True,
"repeatable": True,
"replace": True,
"reset": True,
"resource": True,
"restart": True,
"restrict": True,
"returning": True,
"returns": True,
"revoke": True,
"right": True,
"role": True,
"rollback": True,
"rollup": True,
"row": True,
"rows": True,
"rule": True,
"savepoint": True,
"schema": True,
"scroll": True,
"second": True,
"security": True,
"segment": True,
"select": True,
"sequence": True,
"serializable": True,
"session": True,
"session_user": True,
"set": True,
"setof": True,
"sets": True,
"share": True,
"show": True,
"similar": True,
"simple": True,
"smallint": True,
"some": True,
"split": True,
"stable": True,
"start": True,
"statement": True,
"statistics": True,
"stdin": True,
"stdout": True,
"storage": True,
"strict": True,
"subpartition": True,
"subpartitions": True,
"substring": True,
"superuser": True,
"symmetric": True,
"sysid": True,
"system": True,
"table": True,
"tablespace": True,
"temp": True,
"template": True,
"temporary": True,
"then": True,
"threshold": True,
"ties": True,
"time": True,
"timestamp": True,
"to": True,
"trailing": True,
"transaction": True,
"transform": True,
"treat": True,
"trigger": True,
"trim": True,
"true": True,
"truncate": True,
"trusted": True,
"type": True,
"unbounded": True,
"uncommitted": True,
"unencrypted": True,
"union": True,
"unique": True,
"unknown": True,
"unlisten": True,
"until": True,
"update": True,
"user": True,
"using": True,
"vacuum": True,
"valid": True,
"validation": True,
"validator": True,
"values": True,
"varchar": True,
"varying": True,
"verbose": True,
"view": True,
"volatile": True,
"web": True,
"when": True,
"where": True,
"window": True,
"with": True,
"without": True,
"work": True,
"write": True,
"year": True,
"zone": True
}
def is_keyword(tab):
if tab in keywords:
return True
else:
return False
def caseInsensitiveDictLookup(key, dictionary):
"""
Do a case insensitive dictionary lookup. Return the dictionary value if found,
or None if not found.
"""
for entry in dictionary:
if entry.lower() == key.lower():
return dictionary[entry]
return None
def sqlIdentifierCompare(x, y):
"""
Compare x and y as SQL identifiers. Use SQL rules for comparing delimited
and non-delimited identifiers. Return True if they are equivalent or False
if they are not equivalent.
"""
if x is None or y is None:
return False
if isDelimited(x):
x = quote_unident(x)
else:
x = x.lower()
if isDelimited(y):
y = quote_unident(y)
else:
y = y.lower()
if x == y:
return True
else:
return False
def isDelimited(value):
"""
This method simply checks to see if the user supplied value has delimiters.
That is, if it starts and ends with double-quotes, then it is delimited.
"""
if len(value) < 2:
return False
if value[0] == '"' and value[-1] == '"':
return True
else:
return False
def convertListToDelimited(identifiers):
"""
This method will convert a list of identifiers, which may be a mix of
delimited and non-delimited identifiers, and return a list of
delimited identifiers.
"""
returnList = []
for id in identifiers:
if isDelimited(id) == False:
id = id.lower()
returnList.append(quote_ident(id))
else:
returnList.append(id)
return returnList
def splitUpMultipartIdentifier(id):
"""
Given a sql identifier like sch.tab, return a list of its
individual elements (e.g. sch.tab would return ['sch','tab']
"""
returnList = []
elementList = splitIntoLiteralsAndNonLiterals(id, quoteValue='"')
# If there is a leading empty string, remove it.
if elementList[0] == ' ':
elementList.pop(0)
# Remove the dots, and split up undelimited multipart names
for e in elementList:
if e != '.':
if e[0] != '"':
subElementList = e.split('.')
else:
subElementList = [e]
for se in subElementList:
# remove any empty elements
if se != '':
returnList.append(se)
return returnList
def splitIntoLiteralsAndNonLiterals(str1, quoteValue="'"):
"""
Break the string (str1) into a list of literals and non-literals where every
even number element is a non-literal and every odd number element is a literal.
The delimiter between literals and non-literals is the quoteValue, so this
function will not take into account any modifiers on a literal (e.g. E'adf').
"""
returnList = []
if len(str1) > 1 and str1[0] == quoteValue:
# Always start with a non-literal
str1 = ' ' + str1
inLiteral = False
i = 0
tokenStart = 0
while i < len(str1):
if str1[i] == quoteValue:
if inLiteral == False:
# We are at start of literal
inLiteral = True
returnList.append(str1[tokenStart:i])
tokenStart = i
elif i + 1 < len(str1) and str1[i+1] == quoteValue:
# We are in a literal and found quote quote, so skip over it
i = i + 1
else:
# We are at the end of a literal or end of str1
returnList.append(str1[tokenStart:i+1])
tokenStart = i + 1
inLiteral = False
i = i + 1
if tokenStart < len(str1):
returnList.append(str1[tokenStart:])
return returnList
def quote_ident(val):
"""
This method returns a new string replacing " with "",
and adding a " at the start and end of the string.
"""
return '"' + val.replace('"', '""') + '"'
def quote_unident(val):
"""
This method returns a new string replacing "" with ",
and removing the " at the start and end of the string.
"""
if val != None and len(val) > 0:
val = val.replace('""', '"')
if val != None and len(val) > 1 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
return val
def notice_processor(notice):
if windowsPlatform == True:
# We don't have a pygresql with our notice fix, so skip for windows.
# This means we will not get any warnings on windows (MPP10989).
return
theNotices = notice
r = re.compile("^NOTICE: found (\d+) data formatting errors.*")
messageNumber = 0
m = None
while messageNumber < len(theNotices) and m is None:
aNotice = theNotices[messageNumber]
m = r.match(aNotice)
messageNumber = messageNumber + 1
if m:
global NUM_WARN_ROWS
NUM_WARN_ROWS = int(m.group(1))
def handle_kill(signum, frame):
# already dying?
global received_kill
if received_kill:
return
received_kill = True
g.log(g.INFO, "received signal %d" % signum)
g.exitValue = 2
sys.exit(2)
def bytestr(size, precision=1):
"""Return a string representing the greek/metric suffix of a size"""
if size==1:
return '1 byte'
for factor, suffix in _abbrevs:
if size >= factor:
break
float_string_split = "size/float(factor)".split('.')
integer_part = float_string_split[0]
decimal_part = float_string_split[1]
if int(decimal_part[0:precision]):
float_string = '.'.join([integer_part, decimal_part[0:precision]])
else:
float_string = integer_part
return float_string + suffix
class CatThread(threading.Thread):
"""
Simple threading wrapper to read a file descriptor and put the contents
in the log file.
The fd is assumed to be stdout and stderr from gpfdist. We must use select.select
and locks to ensure both threads are not read at the same time. A dead lock
situation could happen if they did. communicate() is not used since it blocks.
We will wait 1 second between read attempts.
"""
def __init__(self,gpload,fd, sharedLock = None):
threading.Thread.__init__(self)
self.gpload = gpload
self.fd = fd
self.theLock = sharedLock
def run(self):
try:
if windowsPlatform == True:
while 1:
# Windows select does not support select on non-file fd's, so we can use the lock fix. Deadlock is possible here.
# We need to look into the Python windows module to see if there is another way to do this in Windows.
line = self.fd.readline().decode()
if line=='':
break
self.gpload.log(self.gpload.DEBUG, 'gpfdist: ' + line.strip('\n'))
else:
while 1:
retList = select.select( [self.fd]
, []
, []
, 1
)
if retList[0] == [self.fd]:
self.theLock.acquire()
line = self.fd.readline().decode()
self.theLock.release()
else:
continue
if line=='':
break
self.gpload.log(self.gpload.DEBUG, 'gpfdist: ' + line.strip('\n'))
except Exception as e:
# close fd so that not block the worker thread because of stdout/stderr pipe not finish/closed.
self.fd.close()
sys.stderr.write("\n\nWarning: gpfdist log halt because Log Thread '%s' got an exception: %s \n" % (self.getName(), str(e)))
self.gpload.log(self.gpload.WARN, "gpfdist log halt because Log Thread '%s' got an exception: %s" % (self.getName(), str(e)))
raise
class Progress(threading.Thread):
"""
Determine our progress from the gpfdist daemon
"""
def __init__(self,gpload,ports):
threading.Thread.__init__(self)
self.gpload = gpload
self.ports = ports
self.number = 0
self.condition = threading.Condition()
def get(self,port):
"""
Connect to gpfdist and issue an HTTP query. No need to do this with
httplib as the transaction is extremely simple
"""
addrinfo = socket.getaddrinfo('localhost', port)
s = socket.socket(addrinfo[0][0],socket.SOCK_STREAM)
s.connect(('localhost',port))
s.sendall('GET gpfdist/status HTTP/1.0\r\n\r\n')
f = s.makefile()
read_bytes = -1
total_bytes = -1
total_sessions = -1
for line in f:
self.gpload.log(self.gpload.DEBUG, "gpfdist stat: %s" % \
line.strip('\n'))
a = line.split(' ')
if not a:
continue
if a[0]=='read_bytes':
read_bytes = int(a[1])
elif a[0]=='total_bytes':
total_bytes = int(a[1])
elif a[0]=='total_sessions':
total_sessions = int(a[1])
s.close()
f.close()
return read_bytes,total_bytes,total_sessions
def get1(self):
"""
Parse gpfdist output
"""
read_bytes = 0
total_bytes = 0
for port in self.ports:
a = self.get(port)
if a[2]<1:
return
if a[0]!=-1:
read_bytes += a[0]
if a[1]!=-1:
total_bytes += a[1]
self.gpload.log(self.gpload.INFO,'transferred %s of %s' % \
(bytestr(read_bytes),bytestr(total_bytes)))
def run(self):
"""
Thread worker
"""
while 1:
try:
self.condition.acquire()
n = self.number
self.condition.release()
self.get1()
if n:
self.gpload.log(self.gpload.DEBUG, "gpfdist status thread told to stop")
self.condition.acquire()
self.condition.notify()
self.condition.release()
break
except socket.error as e:
self.gpload.log(self.gpload.DEBUG, "got socket exception: %s" % e)
break
time.sleep(1)
def cli_help():
help_path = os.path.join(sys.path[0], '..', 'docs', 'cli_help', EXECNAME +
'_help');
f = None
try:
try:
f = open(help_path);
return f.read(-1)
except:
return ''
finally:
if f: f.close()
#============================================================
def usage(error = None):
print (cli_help() or __doc__)
sys.stdout.flush()
if error:
sys.stderr.write('ERROR: ' + error + '\n')
sys.stderr.write('\n')
sys.stderr.flush()
sys.exit(2)
def quote(a):
"""
SQLify a string
"""
return "'"+a.replace("'","''").replace('\\','\\\\')+"'"
def quote_no_slash(a):
"""
SQLify a string
"""
return "'"+a.replace("'","''")+"'"
def splitPgpassLine(a):
"""
If the user has specified a .pgpass file, we'll have to parse it. We simply
split the string into arrays at :. We could just use a native python
function but we need to escape the ':' character.
"""
b = []
escape = False
d = ''
for c in a:
if not escape and c=='\\':
escape = True
elif not escape and c==':':
b.append(d)
d = ''
else:
d += c
escape = False
if escape:
d += '\\'
b.append(d)
return b
def test_key(gp, key, crumb):
"""
Make sure that a key is a valid keyword in the configuration grammar and
that it appears in the configuration file where we expect -- that is, where
it has the parent we expect
"""
val = valid_tokens.get(key)
if val is None:
gp.log(gp.ERROR, 'unrecognized key: "%s"' % key)
p = val['parent']
# simplify for when the same keyword can appear in multiple places
if type(p) != list:
p = [p]
c = None
if len(crumb):
c = crumb[-1]
found = False
for m in p:
if m == c:
found = True
break
if not found:
gp.log(gp.ERROR, 'unexpected key: "%s"' % key)
return val
def yaml_walk(gp, node, crumb):
if type(node) == list:
for a in node:
if type(a) == tuple:
key = a[0].value.lower()
val = test_key(gp, key, crumb)
if (len(a) > 1 and val['parse_children'] and
(isinstance(a[1], yaml.nodes.MappingNode) or
isinstance(a[1], yaml.nodes.SequenceNode))):
crumb.append(key)
yaml_walk(gp, a[1], crumb)
crumb.pop()
elif isinstance(a, yaml.nodes.ScalarNode):
test_key(gp, a.value, crumb)
else:
yaml_walk(gp, a, crumb)
elif isinstance(node, yaml.nodes.MappingNode):
yaml_walk(gp, node.value, crumb)
elif isinstance(node, yaml.nodes.ScalarNode):
pass
elif isinstance(node, yaml.nodes.SequenceNode):
yaml_walk(gp, node.value, crumb)
elif isinstance(node, yaml.nodes.CollectionNode):
pass
def changeToUnicode(a):
"""
Change every entry in a list or dictionary to a unicode item
"""
if type(a) == list:
return list(map(changeToUnicode,a))
if type(a) == dict:
b = dict()
for key,value in list(a.items()):
if type(key) == str:
key = str(key)
b[key] = changeToUnicode(value)
return b
if type(a) == str:
a = str(a)
return a
def dictKeyToLower(a):
"""
down case all entries in a list or dict
"""
if type(a) == list:
return list(map(dictKeyToLower,a))
if type(a) == dict:
b = dict()
for key,value in list(a.items()):
if type(key) == str:
key = str(key.lower())
b[key] = dictKeyToLower(value)
return b
if type(a) == str:
a = str(a)
return a
#
# MPP-13348
#
'''Jenkins hash - http://burtleburtle.net/bob/hash/doobs.html'''
def jenkinsmix(a, b, c):
a &= 0xffffffff; b &= 0xffffffff; c &= 0xffffffff
a -= b; a -= c; a ^= (c>>13); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<8); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>13); c &= 0xffffffff
a -= b; a -= c; a ^= (c>>12); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<16); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>5); c &= 0xffffffff
a -= b; a -= c; a ^= (c>>3); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<10); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>15); c &= 0xffffffff
return a, b, c
def jenkins(data, initval = 0):
length = lenpos = len(data)
if length == 0:
return 0
a = b = 0x9e3779b9
c = initval
p = 0
while lenpos >= 12:
a += (ord(data[p+0]) + (ord(data[p+1])<<8) + (ord(data[p+2])<<16) + (ord(data[p+3])<<24))
b += (ord(data[p+4]) + (ord(data[p+5])<<8) + (ord(data[p+6])<<16) + (ord(data[p+7])<<24))
c += (ord(data[p+8]) + (ord(data[p+9])<<8) + (ord(data[p+10])<<16) + (ord(data[p+11])<<24))
a, b, c = jenkinsmix(a, b, c)
p += 12
lenpos -= 12
c += length
if lenpos >= 11: c += ord(data[p+10])<<24
if lenpos >= 10: c += ord(data[p+9])<<16
if lenpos >= 9: c += ord(data[p+8])<<8
if lenpos >= 8: b += ord(data[p+7])<<24
if lenpos >= 7: b += ord(data[p+6])<<16
if lenpos >= 6: b += ord(data[p+5])<<8
if lenpos >= 5: b += ord(data[p+4])
if lenpos >= 4: a += ord(data[p+3])<<24
if lenpos >= 3: a += ord(data[p+2])<<16
if lenpos >= 2: a += ord(data[p+1])<<8
if lenpos >= 1: a += ord(data[p+0])
a, b, c = jenkinsmix(a, b, c)
return c
# MPP-20927: gpload external table name problem
# Not sure if it is used by other components, just leave it here.
def shortname(name):
"""
Returns a 10 character string formed by concatenating the first two characters
of the name with another 8 character string computed using the Jenkins hash
function of the table name. When the original name has only a single non-space
ascii character, we return '00' followed by 8 char hash.
For example:
>>> shortname('mytable')
'my3cbb7ba8'
>>> shortname('some_pretty_long_test_table_name')
'so9068664a'
>>> shortname('t')
'006742be70'
@param name: the input tablename
@returns: a string 10 characters or less built from the table name
"""
# Remove spaces from original name
name = re.sub(r' ', '', name)
# Run the hash function
j = jenkins(name)
# Now also remove non ascii chars from original name.
# We do this after jenkins so that we exclude the
# (very rare) case of passing an empty string to jenkins
name = "".join(i for i in name if ord(i) < 128)
if len(name) > 1:
return '%2s%08x' % (name[0:2], j)
else:
return '00%08x' % (j) # could be len 0 or 1
class options:
pass
class gpload:
"""
Main class wrapper
"""
def __init__(self,argv):
self.threads = [] # remember threads so that we can join() against them
self.exitValue = 0
self.options = options()
self.options.h = None
self.options.gpfdist_timeout = None
self.options.p = None
self.options.U = None
self.options.W = False
self.options.D = False
self.options.no_auto_trans = False
self.options.password = None
self.options.d = None
self.DEBUG = 5
self.LOG = 4
self.INFO = 3
self.WARN = 2
self.ERROR = 1
self.options.qv = self.INFO
self.options.l = None
self.formatOpts = ""
self.startTimestamp = time.time()
self.error_table = False
self.gpdb_version = ""
self.options.max_retries = 0
seenv = False
seenq = False
# Create Temp and External table names. However external table name could
# get overwritten with another name later on (see create_external_table_name).
# MPP-20927: gpload external table name problem. We use uuid to avoid
# external table name confliction.
self.unique_suffix = str(uuid.uuid1()).replace('-', '_')
self.staging_table_name = 'temp_staging_gpload_' + self.unique_suffix
self.extTableName = 'ext_gpload_' + self.unique_suffix
# SQL to run in order to undo our temporary work
self.cleanupSql = []
self.distkey = None
configFilename = None
while argv:
try:
try:
if argv[0]=='-h':
self.options.h = argv[1]
argv = argv[2:]
elif argv[0]=='--gpfdist_timeout':
self.options.gpfdist_timeout = argv[1]
argv = argv[2:]
elif argv[0]=='-p':
self.options.p = int(argv[1])
argv = argv[2:]
elif argv[0]=='-l':
self.options.l = argv[1]
argv = argv[2:]
elif argv[0]=='-q':
self.options.qv -= 1
argv = argv[1:]
seenq = True
elif argv[0]=='--version':
sys.stderr.write("gpload version $Revision$\n")
sys.exit(0)
elif argv[0]=='-v':
self.options.qv = self.LOG
argv = argv[1:]
seenv = True
elif argv[0]=='-V':
self.options.qv = self.DEBUG
argv = argv[1:]
seenv = True
elif argv[0]=='-W':
self.options.W = True
argv = argv[1:]
elif argv[0]=='-D':
self.options.D = True
argv = argv[1:]
elif argv[0]=='-U':
self.options.U = argv[1]
argv = argv[2:]
elif argv[0]=='-d':
self.options.d = argv[1]
argv = argv[2:]
elif argv[0]=='-f':
configFilename = argv[1]
argv = argv[2:]
elif argv[0]=='--max_retries':
self.options.max_retries = int(argv[1])
argv = argv[2:]
elif argv[0]=='--no_auto_trans':
self.options.no_auto_trans = True
argv = argv[1:]
elif argv[0]=='-?':
usage()
else:
break
except IndexError:
sys.stderr.write("Option %s needs a parameter.\n"%argv[0])
sys.exit(2)
except ValueError:
sys.stderr.write("Parameter for option %s must be an integer.\n"%argv[0])
sys.exit(2)
if configFilename==None:
usage('configuration file required')
elif argv:
a = ""
if len(argv) > 1:
a = "s"
usage('unrecognized argument%s: %s' % (a, ' '.join(argv)))
# default to gpAdminLogs for a log file, may be overwritten
if self.options.l is None:
self.options.l = os.path.join(os.environ.get('HOME', '.'),'gpAdminLogs')
if not os.path.isdir(self.options.l):
os.mkdir(self.options.l)
self.options.l = os.path.join(self.options.l, 'gpload_' + \
datetime.date.today().strftime('%Y%m%d') + '.log')
try:
self.logfile = open(self.options.l,'a')
except Exception as e:
self.log(self.ERROR, "could not open logfile %s: %s" % \
(self.options.l, e))
if seenv and seenq:
self.log(self.ERROR, "-q conflicts with -v and -V")
if self.options.D:
self.log(self.INFO, 'gpload has the -D option, so it does not actually load any data')
try:
f = open(configFilename,'r')
except IOError as e:
self.log(self.ERROR, "could not open configuration file: %s" % e)
# pull in the config file, which should be in valid YAML
try:
# do an initial parse, validating the config file
doc = f.read()
self.config = yaml.safe_load(doc)
self.configOriginal = changeToUnicode(self.config)
self.config = dictKeyToLower(self.config)
ver = self.getconfig('version', str, extraStuff = ' tag')
if ver != '1.0.0.1':
self.control_file_error("gpload configuration schema version must be 1.0.0.1")
# second parse, to check that the keywords are sensible
y = yaml.compose(doc)
# first should be MappingNode
if not isinstance(y, yaml.MappingNode):
self.control_file_error("configuration file must begin with a mapping")
yaml_walk(self, y.value, [])
except yaml.scanner.ScannerError as e:
self.log(self.ERROR, "configuration file error: %s, line %s" % \
(e.problem, e.problem_mark.line))
except yaml.reader.ReaderError as e:
es = ""
if isinstance(e.character, str):
es = "'%s' codec can't decode byte #x%02x: %s position %d" % \
(e.encoding, ord(e.character), e.reason,
e.position)
else:
es = "unacceptable character #x%04x at byte %d: %s" \
% (ord(e.character), e.position, e.reason)
self.log(self.ERROR, es)
except yaml.error.MarkedYAMLError as e:
self.log(self.ERROR, "configuration file error: %s, line %s" % \
(e.problem, e.problem_mark.line))
f.close()
self.subprocesses = []
self.log(self.INFO,'gpload session started ' + \
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
def control_file_warning(self, msg):
self.log(self.WARN, "A gpload control file processing warning occurred. %s" % msg)
def control_file_error(self, msg):
self.log(self.ERROR, "A gpload control file processing error occurred. %s" % msg)
def elevel2str(self, level):
if level == self.DEBUG:
return "DEBUG"
elif level == self.LOG:
return "LOG"
elif level == self.INFO:
return "INFO"
elif level == self.ERROR:
return "ERROR"
elif level == self.WARN:
return "WARN"
else:
self.log(self.ERROR, "unknown log type %i" % level)
def log(self, level, a):
"""
Level is either DEBUG, LOG, INFO, ERROR. a is the message
"""
try:
log = '|'.join(
[datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
self.elevel2str(level), a]) + '\n'
#str = str.encode('utf-8')
except Exception as e:
# log even if contains non-utf8 data and pass this exception
self.logfile.write("\nWarning: Log() threw an exception: %s \n" % (e))
if level <= self.options.qv:
sys.stdout.write(log)
if level <= self.options.qv or level <= self.INFO:
try:
self.logfile.write(log)
self.logfile.flush()
except AttributeError as e:
pass
if level == self.ERROR:
self.exitValue = 2;
sys.exit(self.exitValue)
def getconfig(self, a, typ=None, default='error', extraStuff='', returnOriginal=False):
"""
Look for a config entry, via a column delimited string. a:b:c points to
a:
b:
c
Make sure that end point is of type 'typ' when not set to None.
If returnOriginal is False, the return value will be in lower case,
else the return value will be in its original form (i.e. the case that
the user specified in their yaml file).
"""
self.log(self.DEBUG, "getting config for " + a)
if returnOriginal == True:
config = self.configOriginal
else:
config = self.config
for s in a.split(':'):
self.log(self.DEBUG, "trying " + s)
index = 1
if s[-1:]==')':
j = s.index('(')
index = int(s[j+1:-1])
s = s[:j]
if type(config)!=list:
config = [config]
for c in config:
if type(c)==dict:
temp = caseInsensitiveDictLookup(s, c)
if temp != None:
index -= 1
if not index:
self.log(self.DEBUG, "found " + s)
config = temp
break
else:
if default=='error':
self.control_file_error("The configuration must contain %s%s"%(a,extraStuff))
sys.exit(2)
return default
if typ != None and type(config) != typ:
if typ == list:
self.control_file_error("The %s entry must be a YAML sequence %s"% (a ,extraStuff))
elif typ == dict:
self.control_file_error("The %s entry must be a YAML mapping %s"% (a, extraStuff))
elif typ == str or typ == str:
self.control_file_error("%s must be a string %s" % (a, extraStuff))
elif typ == int:
self.control_file_error("The %s entry must be a YAML integer %s" % (a, extraStuff))
else:
assert 0
self.control_file_error("Encountered unknown configuration type %s"% type(config))
sys.exit(2)
return config
def read_config(self):
"""
Configure ourselves
"""
# ensure output is of type list
self.getconfig('gpload:output', list)
# The user supplied table name can be completely or partially delimited,
# and it can be a one or two part name. Get the originally supplied name
# and parse it into its delimited one or two part name.
self.schemaTable = self.getconfig('gpload:output:table', str, returnOriginal=True)
schemaTableList = splitUpMultipartIdentifier(self.schemaTable)
schemaTableList = convertListToDelimited(schemaTableList)
if len(schemaTableList) == 2:
self.schema = schemaTableList[0]
self.table = schemaTableList[1]
else:
self.schema = None
self.table = schemaTableList[0]
# Precedence for configuration: command line > config file > env
# variable
# host to connect to
if not self.options.h:
self.options.h = self.getconfig('host', str, None)
if self.options.h:
self.options.h = str(self.options.h)
if not self.options.h:
self.options.h = os.environ.get('PGHOST')
if not self.options.h or len(self.options.h) == 0:
self.log(self.INFO, "no host supplied, defaulting to localhost")
self.options.h = "localhost"
# Port to connect to
if not self.options.p:
self.options.p = self.getconfig('port',int,None)
if not self.options.p:
try:
self.options.p = int(os.environ.get('PGPORT'))
except (ValueError, TypeError):
pass
if not self.options.p:
self.options.p = 5432
# User to connect as
if not self.options.U:
self.options.U = self.getconfig('user', str, None)
if not self.options.U:
self.options.U = os.environ.get('PGUSER')
if not self.options.U:
self.options.U = getpass.getuser()
self.log(self.INFO, "no user supplied, defaulting to "+self.options.U)
#self.options.U = os.environ.get('USER') or \
# os.environ.get('LOGNAME') or \
# os.environ.get('USERNAME')
if not self.options.U or len(self.options.U) == 0:
self.log(self.ERROR,
"You need to specify your username with the -U " +
"option or in your configuration or in your " +
"environment as PGUSER")
# database to connect to
if not self.options.d:
self.options.d = self.getconfig('database', str, None)
if not self.options.d:
self.options.d = os.environ.get('PGDATABASE')
if not self.options.d:
# like libpq, just inherit USER
self.options.d = self.options.U
if self.getconfig('gpload:input:error_table', str, None):
self.error_table = True
self.log(self.WARN,
"ERROR_TABLE is not supported. " +
"We will set LOG_ERRORS and REUSE_TABLES to True for compatibility.")
def gpfdist_port_options(self, name, availablePorts, popenList):
"""
Adds gpfdist -p / -P port options to popenList based on port and port_range in YAML file.
Raises errors if options are invalid or ports are unavailable.
@param name: input source name from YAML file.
@param availablePorts: current set of available ports
@param popenList: gpfdist options (updated)
"""
port = self.getconfig(name + ':port', int, None)
port_range = self.getconfig(name+':port_range', list, None)
if port:
startPort = endPort = port
endPort += 1
elif port_range:
try:
startPort = int(port_range[0])
endPort = int(port_range[1])
except (IndexError,ValueError):
self.control_file_error(name + ":port_range must be a YAML sequence of two integers")
else:
startPort = self.getconfig(name+':port',int,8000)
endPort = self.getconfig(name+':port',int,9000)
if (startPort > 65535 or endPort > 65535):
# Do not allow invalid ports
self.control_file_error("Invalid port. Port values must be less than or equal to 65535.")
elif not (set(range(startPort,endPort+1)) & availablePorts):
self.log(self.ERROR, "no more ports available for gpfdist")
popenList.append('-p')
popenList.append(str(startPort))
popenList.append('-P')
popenList.append(str(endPort))
def gpfdist_filenames(self, name, popenList):
"""
Adds gpfdist -f filenames to popenList.
Raises errors if YAML file option is invalid.
@param name: input source name from YAML file.
@param popenList: gpfdist options (updated)
@return: list of files names
"""
file = self.getconfig(name+':file',list)
for i in file:
if type(i)!= str and type(i) != str:
self.control_file_error(name + ":file must be a YAML sequence of strings")
popenList.append('-f')
popenList.append('"'+' '.join(file)+'"')
return file
def gpfdist_timeout_options(self, popenList):
"""
Adds gpfdist -t timeout option to popenList.
@param popenList: gpfdist options (updated)
"""
if self.options.gpfdist_timeout != None:
gpfdistTimeout = self.options.gpfdist_timeout
else:
gpfdistTimeout = 30
popenList.append('-t')
popenList.append(str(gpfdistTimeout))
def gpfdist_verbose_options(self, popenList):
"""
Adds gpfdist -v / -V options to popenList depending on logging level
@param popenList: gpfdist options (updated)
"""
if self.options.qv == self.LOG:
popenList.append('-v')
elif self.options.qv > self.LOG:
popenList.append('-V')
def gpfdist_max_line_length(self, popenList):
"""
Adds gpfdist -m option to popenList when max_line_length option specified in YAML file.
@param popenList: gpfdist options (updated)
"""
max_line_length = self.getconfig('gpload:input:max_line_length',int,None)
if max_line_length is not None:
popenList.append('-m')
popenList.append(str(max_line_length))
def gpfdist_transform(self, popenList):
"""
Compute and return url fragment if transform option specified in YAML file.
Checks for readable transform config file if transform_config option is specified.
Adds gpfdist -c option to popenList if transform_config is specified.
Validates that transform_config is present when transform option is specified.
@param popenList: gpfdist options (updated)
@returns: uri fragment for transform or "" if not appropriate.
"""
transform = self.getconfig('gpload:input:transform', str, None)
transform_config = self.getconfig('gpload:input:transform_config', str, None)
if transform_config:
try:
f = open(transform_config,'r')
except IOError as e:
self.log(self.ERROR, "could not open transform_config file: %s" % e)
f.close()
popenList.append('-c')
popenList.append(transform_config)
else:
if transform:
self.control_file_error("transform_config is required when transform is specified")
fragment = ""
if transform is not None:
fragment = "#transform=" + transform
return fragment
def gpfdist_ssl(self, popenList):
"""
Adds gpfdist --ssl option to popenList when ssl option specified as true in YAML file.
@param popenList: gpfdist options (updated)
"""
ssl = self.getconfig('gpload:input:source:ssl',bool, False)
certificates_path = self.getconfig('gpload:input:source:certificates_path', str, None)
if ssl and certificates_path:
dir_exists = os.path.isdir(certificates_path)
if dir_exists == False:
self.log(self.ERROR, "could not access CERTIFICATES_PATH directory: %s" % certificates_path)
popenList.append('--ssl')
popenList.append(certificates_path)
else:
if ssl:
self.control_file_error("CERTIFICATES_PATH is required when SSL is specified as true")
elif certificates_path: # ssl=false (or not specified) and certificates_path is specified
self.control_file_error("CERTIFICATES_PATH is specified while SSL is not specified as true")
def start_gpfdists(self):
"""
Start gpfdist daemon(s)
"""
self.locations = []
self.ports = []
sourceIndex = 0
availablePorts = set(range(1,65535))
found_source = False
while 1:
sourceIndex += 1
name = 'gpload:input:source(%d)'%sourceIndex
a = self.getconfig(name,None,None)
if not a:
break
found_source = True
local_hostname = self.getconfig(name+':local_hostname', list, False)
# do default host, the current one
if not local_hostname:
# if fully_qualified_domain_name is defined and set to true we want to
# resolve the fqdn rather than just grabbing the hostname.
fqdn = self.getconfig('gpload:input:fully_qualified_domain_name', bool, False)
if fqdn:
local_hostname = [socket.getfqdn()]
else:
local_hostname = [socket.gethostname()]
# build gpfdist parameters
popenList = ['gpfdist']
self.gpfdist_ssl(popenList)
self.gpfdist_port_options(name, availablePorts, popenList)
file = self.gpfdist_filenames(name, popenList)
self.gpfdist_timeout_options(popenList)
self.gpfdist_verbose_options(popenList)
self.gpfdist_max_line_length(popenList)
fragment = self.gpfdist_transform(popenList)
try:
self.log(self.LOG, 'trying to run %s' % ' '.join(popenList))
cfds = True
if platform.system() in ['Windows', 'Microsoft']: # not supported on win32
cfds = False
cmd = ' '.join(popenList)
needshell = False
else:
srcfile = None
if os.environ.get('GPHOME_LOADERS'):
srcfile = os.path.join(os.environ.get('GPHOME_LOADERS'),
'greenplum_loaders_path.sh')
elif os.environ.get('GPHOME'):
srcfile = os.path.join(os.environ.get('GPHOME'),
'greenplum_path.sh')
if (not (srcfile and os.path.exists(srcfile))):
self.log(self.ERROR, 'cannot find greenplum environment ' +
'file: environment misconfigured')
cmd = 'source %s ; exec ' % srcfile
cmd += ' '.join(popenList)
needshell = True
a = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=cfds, shell=needshell)
self.subprocesses.append(a)
except Exception as e:
self.log(self.ERROR, "could not run %s: %s" % \
(' '.join(popenList), str(e)))
"""
Reading from stderr and stdout on a Popen object can result in a dead lock if done at the same time.
Create a lock to share when reading stderr and stdout from gpfdist.
"""
readLock = threading.Lock()
# get all the output from the daemon(s)
t = CatThread(self,a.stderr, readLock)
t.start()
self.threads.append(t)
while 1:
readLock.acquire()
line = a.stdout.readline().decode()
readLock.release()
if line=='':
self.log(self.ERROR,'failed to start gpfdist: ' +
'gpfdist command line: ' + ' '.join(popenList))
line = line.strip('\n')
self.log(self.LOG,'gpfdist says: ' + line)
if (line.startswith('Serving HTTP on port ') or line.startswith('Serving HTTPS on port ')):
port = int(line[21:line.index(',')])
break
self.log(self.INFO, 'started %s' % ' '.join(popenList))
self.log(self.LOG,'gpfdist is running on port %d'%port)
if port in availablePorts:
availablePorts.remove(port)
self.ports.append(port)
t = CatThread(self,a.stdout,readLock)
t.start()
self.threads.append(t)
ssl = self.getconfig('gpload:input:source:ssl', bool, False)
if ssl:
protocol = 'gpfdists'
else:
protocol = 'gpfdist'
for l in local_hostname:
if type(l) != str and type(l) != str:
self.control_file_error(name + ":local_hostname must be a YAML sequence of strings")
l = str(l)
sep = ''
if file[0] != '/':
sep = '/'
# MPP-13617
if ':' in l:
l = '[' + l + ']'
self.locations.append('%s://%s:%d%s%s%s' % (protocol, l, port, sep, '%20'.join(file), fragment))
if not found_source:
self.control_file_error("configuration file must contain source definition")
def readPgpass(self,pgpassname):
"""
Get password form .pgpass file
"""
try:
f = open(pgpassname,'r')
except IOError:
return
for row in f:
try:
row = row.rstrip("\n")
line = splitPgpassLine(row)
if line[0]!='*' and line[0].lower()!=self.options.h.lower():
continue
if line[1]!='*' and int(line[1])!=self.options.p:
continue
if line[2]!='*' and line[2]!=self.options.d:
continue
if line[3]!='*' and line[3]!=self.options.U:
continue
self.options.password = line[4]
break
except (ValueError,IndexError):
pass
f.close()
def setup_connection(self, recurse = 0):
"""
Connect to the backend
"""
if self.db != None:
self.db.close()
self.db = None
if self.options.W:
if self.options.password==None:
self.options.password = getpass.getpass()
else:
if self.options.password==None:
self.options.password = self.getconfig('password', str,
None)
if self.options.password==None:
self.options.password = os.environ.get('PGPASSWORD')
if self.options.password==None:
self.readPgpass(os.environ.get('PGPASSFILE',
os.environ.get('HOME','.')+'/.pgpass'))
try:
self.log(self.DEBUG, "connection string:" +
" user=" + str(self.options.U) +
" host=" + str(self.options.h) +
" port=" + str(self.options.p) +
" database=" + str(self.options.d))
self.db = pg.DB( dbname=self.options.d
, host=self.options.h
, port=self.options.p
, user=self.options.U
, passwd=self.options.password
)
self.log(self.DEBUG, "Successfully connected to database")
if noGpVersion == False:
# Get GPDB version
curs = self.db.query("SELECT version()")
self.gpdb_version = GpVersion(curs.getresult()[0][0])
self.log(self.DEBUG, "GPDB version is: %s" % self.gpdb_version)
except Exception as e:
errorMessage = str(e)
if errorMessage.find("no password supplied") != -1:
self.options.password = getpass.getpass()
recurse += 1
if recurse > 10:
self.log(self.ERROR, "too many login attempt failures")
self.setup_connection(recurse)
elif errorMessage.find("Connection timed out") != -1 and self.options.max_retries != 0:
recurse += 1
if self.options.max_retries > 0:
if recurse > self.options.max_retries: # retry failed
self.log(self.ERROR, "could not connect to database after retry %d times, " \
"error message:\n %s" % (recurse-1, errorMessage))
else:
self.log(self.INFO, "retry to connect to database, %d of %d times" % (recurse,
self.options.max_retries))
else: # max_retries < 0, retry forever
self.log(self.INFO, "retry to connect to database.")
self.setup_connection(recurse)
else:
self.log(self.ERROR, "could not connect to database: %s. Is " \
"the Greenplum Database running on port %i?" % (errorMessage,
self.options.p))
def read_columns(self):
'''
get from columns
'''
columns = self.getconfig('gpload:input:columns',list,None, returnOriginal=True)
if columns != None:
self.from_cols_from_user = True # user specified from columns
self.from_columns = []
for d in columns:
if type(d)!=dict:
self.control_file_error("gpload:input:columns must be a sequence of YAML mappings")
tempkey = list(d.keys())[0]
value = d[tempkey]
""" remove leading or trailing spaces """
d = { tempkey.strip() : value }
key = list(d.keys())[0]
if d[key] is None or not d[key]:
self.log(self.DEBUG,
'getting source column data type from target')
for name, typ, mapto, hasseq in self.into_columns:
if sqlIdentifierCompare(name, key):
d[key] = typ
break
# perform the same kind of magic type change that postgres does
if d[key] == 'bigserial':
d[key] = 'bigint'
elif d[key] == 'serial':
d[key] = 'int4'
# Mark this column as having no mapping, which is important
# for do_insert()
self.from_columns.append([key,d[key].lower(),None, False])
else:
self.from_columns = self.into_columns
self.from_cols_from_user = False
# make sure that all columns have a type
for name, typ, map, hasseq in self.from_columns:
if typ is None:
self.log(self.ERROR, 'column "%s" has no type ' % name +
'and does not appear in target table "%s"' % self.schemaTable)
self.log(self.DEBUG, 'from columns are:')
for c in self.from_columns:
name = c[0]
typ = c[1]
self.log(self.DEBUG, '%s: %s'%(name,typ))
def read_table_metadata(self):
'''
get into columns list like: [column name, column data type, mapping target, has_sequence(bool)]
'''
# KAS Note to self. If schema is specified, then probably should use PostgreSQL rules for defining it.
# find the shema name for this table (according to search_path)
# if it was not explicitly specified in the configuration file.
if self.schema is None:
queryString = """SELECT n.nspname
FROM pg_catalog.pg_class c
INNER JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid);""" % quote_unident(self.table)
resultList = self.db.query(queryString).getresult()
if len(resultList) > 0:
self.schema = (resultList[0])[0]
self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table)))
else:
self.log(self.ERROR, "table %s not found in any database schema" % self.table)
queryString = """select nt.nspname as table_schema,
c.relname as table_name,
a.attname as column_name,
a.attnum as ordinal_position,
format_type(a.atttypid, a.atttypmod) as data_type,
c.relkind = 'r' AS is_updatable,
a.atttypid in (23, 20) and a.atthasdef and
(select position ( 'nextval(' in pg_catalog.pg_get_expr(adbin,adrelid) ) > 0 and
position ( '::regclass)' in pg_catalog.pg_get_expr(adbin,adrelid) ) > 0
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) as has_sequence
from pg_catalog.pg_class c join pg_catalog.pg_namespace nt on (c.relnamespace = nt.oid)
join pg_attribute a on (a.attrelid = c.oid)
where a.attnum > 0 and a.attisdropped = 'f'
and a.attrelid = (select c.oid from pg_catalog.pg_class c join pg_catalog.pg_namespace nt on (c.relnamespace = nt.oid) where c.relname = '%s' and nt.nspname = '%s')
order by a.attnum """ % (quote_unident(self.table), quote_unident(self.schema))
count = 0
self.into_columns = []
self.into_columns_dict = dict()
resultList = self.db.query(queryString).dictresult()
while count < len(resultList):
row = resultList[count]
count += 1
ct = str(row['data_type'])
if ct == 'bigserial':
ct = 'bigint'
elif ct == 'serial':
ct = 'int4'
name = row['column_name']
name = quote_ident(name)
has_seq = row['has_sequence']
if has_seq == str('f') or has_seq==False:
has_seq_bool = False
if has_seq == str('t') or has_seq==True:
has_sql_bool = True
i = [name,ct,None, has_seq_bool]
# i: [column name, column data type, mapping target, has_sequence]
self.into_columns.append(i)
self.into_columns_dict[name] = i
self.log(self.DEBUG, "found input column: " + str(i))
if count == 0:
# see if it's a permissions issue or it actually doesn't exist
tableName = quote_unident(self.table)
tableSchema = quote_unident(self.schema)
sql = """select 1 from pg_class c, pg_namespace n
where c.relname = '%s' and
n.nspname = '%s' and
n.oid = c.relnamespace""" % (tableName, tableSchema)
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
self.log(self.ERROR, "permission denied for table %s.%s" % \
(tableSchema, tableName))
else:
self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d))
def read_mapping(self):
'''
get mapping for into_colums and record the mapping at into_columns[2].
if no mapping in cofig file, this function will get mapping from from_columns
'''
mapping = self.getconfig('gpload:output:mapping',dict,None, returnOriginal=True)
if mapping:
for key,value in list(mapping.items()):
if type(key) != str or type(value) != str:
self.control_file_error("gpload:output:mapping must be a YAML type mapping from strings to strings")
found = False
for a in self.into_columns:
if sqlIdentifierCompare(a[0], key) == True:
a[2] = value
found = True
break
if found == False:
self.log(self.ERROR,'%s in mapping is not in table %s'% \
(key, self.schemaTable))
else:
# Now, map anything yet to be mapped to itself, picking up on those
# columns which are not found in the table.
for x in self.from_columns:
# Check to see if it already has a mapping value
i = [a for a in self.into_columns if a[2] == x[0]]
if not i:
# Check to see if the target column names match the input column names.
for a in self.into_columns:
if sqlIdentifierCompare(a[0], x[0]) == True:
i = a
break
if i:
if i[2] is None: i[2] = i[0]
else:
self.log(self.ERROR, 'no mapping for input column ' +
'"%s" to output table' % x[0])
for name,typ,mapto,seq in self.into_columns:
self.log(self.DEBUG,'%s: %s = %s'%(name,typ,mapto))
def get_reuse_exttable_query(self, formatType, formatOpts, limitStr, from_cols, schemaName, log_errors, encodingCode):
'''
In order to find out whether we have an existing external table in the
catalog which could be reused for this operation we need to make sure
that it has the same column names and types, the same data format, and
location specification, and single row error handling specs.
Return:
SQL to run in order to find out whether such a table exists.
'''
sqlFormat = """select attrelid::regclass
from (
select
attrelid,
row_number() over (partition by attrelid order by attnum) as attord,
attnum,
attname,
atttypid::regtype
from
pg_attribute
join
pg_class
on (pg_class.oid = attrelid)
%s
where
relkind = '%s' and
relname like 'ext_gpload_reusable_%%' and
attnum > 0 and
not attisdropped and %s
) pgattr
join
pg_exttable pgext
on(pgattr.attrelid = pgext.reloid)
"""
joinStr = ""
relkind = ""
conditionStr = ""
# if schemaName is None, find the resuable ext table which is visible to
# current search path. Else find the resuable ext table under the specific
# schema, and this needs to join pg_namespace.
if schemaName is None:
joinStr = ""
conditionStr = "pg_table_is_visible(pg_class.oid)"
else:
joinStr = """join
pg_namespace pgns
on(pg_class.relnamespace = pgns.oid)
"""
conditionStr = "pgns.nspname = '%s'" % schemaName
if noGpVersion or self.gpdb_version < "7.0.0":
relkind='r'
else:
relkind='f'
sql = sqlFormat % (joinStr, relkind, conditionStr)
if noGpVersion or self.gpdb_version < "6.0.0":
if log_errors:
sql += " WHERE pgext.fmterrtbl = pgext.reloid "
else:
sql += " WHERE pgext.fmterrtbl IS NULL "
else:
if log_errors:
sql += " WHERE pgext.logerrors='t' "
else:
sql += " WHERE pgext.logerrors='f' "
for i, l in enumerate(self.locations):
sql += " and pgext.urilocation[%s] = %s\n" % (i + 1, quote(l))
sql+= """and pgext.fmttype = %s
and pgext.writable = false
and pgext.fmtopts like %s """ % (quote(formatType[0]),quote("%" + quote_unident(formatOpts.rstrip()) +"%"))
if limitStr:
sql += "and pgext.rejectlimit = %s " % limitStr
else:
sql += "and pgext.rejectlimit IS NULL "
if encodingCode:
sql += "and pgext.encoding = %s " % encodingCode
sql+= "group by attrelid "
sql+= """having
count(*) = %s and
bool_and(case """ % len(from_cols)
for i, c in enumerate(from_cols):
name = c[0]
typ = c[1]
sql+= "when attord = %s then atttypid = %s::regtype and attname = %s\n" % (i+1, quote(typ), quote(quote_unident(name)))
sql+= """else true
end)
limit 1;"""
self.log(self.DEBUG, "query used to identify reusable external relations: %s" % sql)
return sql
def get_fast_match_exttable_query(self, formatType, formatOpts, limitStr, schemaName, log_errors, encodingCode):
'''
Fast path to find out whether we have an existing external table in the
catalog which could be reused for this operation. we only make sure the
location, data format and error limit are same. we don't check column
names and types
Return: SQL to run in order to find out whether
such a table exists. The results of this SQl are table names without schema
'''
sqlFormat = """select relname from pg_class
join
pg_exttable pgext
on(pg_class.oid = pgext.reloid)
%s
where
relkind = '%s' and
relname like 'ext_gpload_reusable_%%' and
%s
"""
joinStr = ""
relkind = ""
conditionStr = ""
# if schemaName is None, find the resuable ext table which is visible to
# current search path. Else find the resuable ext table under the specific
# schema, and this needs to join pg_namespace.
if schemaName is None:
joinStr = ""
conditionStr = "pg_table_is_visible(pg_class.oid)"
else:
joinStr = """join
pg_namespace pgns
on(pg_class.relnamespace = pgns.oid)"""
conditionStr = "pgns.nspname = '%s'" % schemaName
if noGpVersion or self.gpdb_version < "7.0.0":
relkind='r'
else:
relkind='f'
sql = sqlFormat % (joinStr, relkind, conditionStr)
if noGpVersion or self.gpdb_version < "6.0.0":
if log_errors:
sql += " and pgext.fmterrtbl = pgext.reloid "
else:
sql += " and pgext.fmterrtbl IS NULL "
else:
if log_errors:
sql += " and pgext.logerrors='t' "
else:
sql += " and pgext.logerrors='f' "
for i, l in enumerate(self.locations):
sql += " and pgext.urilocation[%s] = %s\n" % (i + 1, quote(l))
sql+= """and pgext.fmttype = %s
and pgext.writable = false
and pgext.fmtopts like %s """ % (quote(formatType[0]),quote("%" + quote_unident(formatOpts.rstrip()) +"%"))
if limitStr:
sql += "and pgext.rejectlimit = %s " % limitStr
else:
sql += "and pgext.rejectlimit IS NULL "
if encodingCode:
sql += "and pgext.encoding = %s " % encodingCode
sql+= "limit 1;"
self.log(self.DEBUG, "query used to fast match external relations:\n %s" % sql)
return sql
def get_staging_conditions_string(self, target_table_name, staging_cols, distribution_cols):
'''
Create a string from the following conditions to reuse staging table:
1. same target table
2. same number of columns
3. same names and types, in the same order
4. same distribution key (according to columns' names and their order)
Return:
string (target_table_name:columns_num:staging_cols_str:distribution_cols_str)
'''
columns_num = len(staging_cols)
staging_cols_str = '-'.join(['%s-%s' % (quote(quote_unident(col[0])), quote(col[1])) for col in staging_cols])
distribution_cols_str = '-'.join([quote(quote_unident(col)) for col in distribution_cols])
return '%s:%s:%s:%s' % (target_table_name, columns_num, staging_cols_str, distribution_cols_str)
def get_reuse_staging_table_query(self, encoding_conditions):
'''
This function will return the SQL to run in order to find out whether
we have an existing staging table in the catalog which could be reused for this
operation, according to the method and the encoding conditions.
return:
sql(string)
'''
sql = """SELECT oid::regclass
FROM pg_class
WHERE relname = 'staging_gpload_reusable_%s';""" % (encoding_conditions)
self.log(self.DEBUG, "query used to identify reusable temporary relations: %s" % sql)
return sql
def get_table_oid(self, tableName):
'''get oid for table from pg_class, None if not exist'''
if tableName:
sql = "select %s::regclass::oid" % quote(quote_unident(tableName))
try:
resultList = self.db.query(sql).getresult()
return resultList[0][0]
except Exception as e:
pass
return None
def get_ext_schematable(self, schemaName, tableName):
'''
return formated table name
'''
if schemaName is None:
return tableName
else:
schemaTable = "%s.%s" % (schemaName, tableName)
return schemaTable
def get_external_table_formatOpts(self, option, specify=''):
'''
add option, specify to self.formatOpts for creating external table
'''
formatType = self.getconfig('gpload:input:format', str, 'text').lower()
if formatType == 'text':
valid_token = ['delimiter','escape']
elif formatType == 'csv':
valid_token = ['delimiter', 'quote', 'escape']
else:
valid_token = []
if not option in valid_token:
self.control_file_error("The option you specified doesn't support now")
return
if option == 'delimiter':
defval = ',' if formatType == 'csv' else '\t'
val = self.getconfig('gpload:input:delimiter', str, defval)
elif option == 'escape':
defval = self.getconfig('gpload:input:quote', str, '"')
val = self.getconfig('gpload:input:escape', str, defval)
elif option == 'quote':
val = self.getconfig('gpload:input:quote', str, '"')
else:
self.control_file_error("unexpected error -- backtrace " +
"written to log file")
sys.exit(2)
specify_str = str(specify) if specify else option
if len(val) != 1:
val_decoded = val.encode().decode('unicode-escape')
subval_decoded = val[2:-1].encode().decode('unicode-escape')
if val.startswith("E'") and val.endswith("'") and len(subval_decoded) == 1:
subval = val[2:-1]
if subval == "\\'":
self.formatOpts += "%s %s " % (specify_str, val)
else:
val = subval_decoded
self.formatOpts += "%s '%s' " % (specify_str, val)
elif len(val_decoded) == 1:
val = val_decoded
self.formatOpts += "%s '%s' " % (specify_str, val)
else:
self.control_file_warning(option +''' must be single ASCII character, you can also use unprintable characters(for example: '\\x1c' / E'\\x1c' or '\\u001c' / E'\\u001c' ''')
self.control_file_error("Invalid option, gpload quit immediately")
sys.exit(2)
else:
self.formatOpts += "%s '%s' " % (specify_str, val)
def create_external_table(self):
'''
extract all control file information and transform it accordingly,
create a new external table or find a reusable external table to use for this operation or later
'''
formatType = self.getconfig('gpload:input:format', str, 'text').lower()
locationStr = ','.join(map(quote,self.locations))
self.get_external_table_formatOpts('delimiter')
nullas = self.getconfig('gpload:input:null_as', str, False)
self.log(self.DEBUG, "null " + str(nullas))
if nullas != False: # could be empty string
self.formatOpts += "null %s " % quote_no_slash(nullas)
elif formatType=='csv':
self.formatOpts += "null '' "
else:
self.formatOpts += "null %s " % quote_no_slash("\\N")
esc = self.getconfig('gpload:input:escape', None, None)
if esc:
if type(esc) != str and type(esc) != str:
self.control_file_error("gpload:input:escape must be a string")
if esc.lower() == 'off':
if formatType == 'csv':
self.control_file_error("ESCAPE cannot be set to OFF in CSV mode")
self.formatOpts += "escape 'off' "
else:
self.get_external_table_formatOpts('escape')
else:
if formatType=='csv':
self.get_external_table_formatOpts('quote','escape')
else:
self.formatOpts += "escape '\\'"
if formatType=='csv':
self.get_external_table_formatOpts('quote')
if self.getconfig('gpload:input:header',bool,False):
self.formatOpts += "header "
### should be true or false
force_not_null_columns = self.getconfig('gpload:input:force_not_null',list,[])
if force_not_null_columns:
for i in force_not_null_columns:
if type(i) != str and type(i) != str:
self.control_file_error("gpload:input:force_not_null must be a YAML sequence of strings")
self.formatOpts += "force not null %s " % ','.join(force_not_null_columns)
encodingCode = None
encodingStr = self.getconfig('gpload:input:encoding', str, None)
if encodingStr is None:
result = self.db.query("SHOW SERVER_ENCODING").getresult()
if len(result) > 0:
encodingStr = result[0][0]
if encodingStr:
sql = "SELECT pg_char_to_encoding('%s')" % encodingStr
result = self.db.query(sql).getresult()
if len(result) > 0:
encodingCode = result[0][0]
limitStr = self.getconfig('gpload:input:error_limit',int, None)
if self.log_errors and not limitStr:
self.control_file_error("gpload:input:log_errors requires " +
"gpload:input:error_limit to be specified")
self.extSchemaName = self.getconfig('gpload:external:schema', str, None)
if self.extSchemaName == '%':
self.extSchemaName = self.schema
# get the list of columns to use in the extnernal table
if not self.from_cols_from_user:
# don't put values serial columns
from_cols = [a for a in self.from_columns if a[3] != True]
else:
from_cols = self.from_columns
if formatType == 'csv' or formatType == 'text':
if self.getconfig('gpload:input:fill_missing_fields', bool, False):
self.formatOpts += 'fill missing fields'
# If the 'reuse tables' option was specified we now try to find an
# already existing external table in the catalog which will match
# the one that we need to use. It must have identical attributes,
# external location, format, and encoding specifications.
if self.reuse_tables == True:
if self.staging_table:
if '.' in self.staging_table:
self.log(self.ERROR, "Character '.' is not allowed in staging_table parameter. Please use EXTERNAL->SCHEMA to set the schema of external table")
self.extTableName = quote_unident(self.staging_table)
sql = """SELECT n.nspname as Schema, c.relname as Name
FROM pg_catalog.pg_class c
INNER JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','S','f','')
AND c.relname = '%s'
""" % self.extTableName
if self.extSchemaName is not None:
sql += "AND n.nspname = '%s'" % quote_unident(self.extSchemaName)
else:
sql += """AND pg_catalog.pg_table_is_visible(c.oid)
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'"""
result = self.db.query(sql).getresult()
if len(result) > 0:
self.extSchemaTable = self.get_ext_schematable(quote_unident(self.extSchemaName), self.extTableName)
self.log(self.INFO, "reusing external staging table %s" % self.extSchemaTable)
return
# staging table is not specified, we need to find it manually
else:
# process the single quotes in order to successfully find an existing external table to reuse.
self.formatOpts = self.formatOpts.replace("E'\\''","'\''")
if self.fast_match:
sql = self.get_fast_match_exttable_query(formatType, self.formatOpts,
limitStr, self.extSchemaName, self.log_errors, encodingCode)
else:
sql = self.get_reuse_exttable_query(formatType, self.formatOpts,
limitStr, from_cols, self.extSchemaName, self.log_errors, encodingCode)
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
# found an external table to reuse. no need to create one. we're done here.
self.extTableName = (resultList[0])[0]
# fast match result is only table name, so we need add schema info
if self.fast_match:
self.extSchemaTable = self.get_ext_schematable(quote_unident(self.extSchemaName), self.extTableName)
else:
self.extSchemaTable = self.extTableName
self.log(self.INFO, "reusing external table %s" % self.extSchemaTable)
return
# didn't find an existing external table suitable for reuse. Format a reusable
# name and issue a CREATE EXTERNAL TABLE on it. Hopefully we can use it next time
# around
self.extTableName = "ext_gpload_reusable_%s" % self.unique_suffix
self.log(self.INFO, "did not find an external table to reuse. creating %s" % self.get_ext_schematable(self.extSchemaName, self.extTableName))
# process the single quotes in order to successfully create an external table.
self.formatOpts = self.formatOpts.replace("'\''","E'\\''")
# construct a CREATE EXTERNAL TABLE statement and execute it
self.extSchemaTable = self.get_ext_schematable(self.extSchemaName, self.extTableName)
sql = "create external table %s" % self.extSchemaTable
sql += "(%s)" % ','.join(['%s %s' % (a[0], a[1]) for a in from_cols])
sql += "location(%s) "%locationStr
sql += "format%s "% quote(formatType)
if len(self.formatOpts) > 0:
sql += "(%s) "% self.formatOpts
if encodingStr:
sql += "encoding%s "%quote(encodingStr)
if self.log_errors:
sql += "log errors "
if limitStr:
if limitStr < 2:
self.control_file_error("error_limit must be 2 or higher")
sql += "segment reject limit %s "%limitStr
try:
self.db.query(sql.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not run SQL "%s": %s' % (sql, unicode(e)))
# set up to drop the external table at the end of operation, unless user
# specified the 'reuse_tables' option, in which case we don't drop
if self.reuse_tables == False:
self.cleanupSql.append('drop external table if exists %s'%self.extSchemaTable)
def create_staging_table(self):
'''
Create a new staging table or find a reusable staging table to use for this operation
(only valid for update/merge operations).
'''
# make sure we set the correct distribution policy
distcols = self.getconfig('gpload:output:match_columns', list)
sql = "SELECT * FROM pg_class WHERE relname LIKE 'temp_gpload_reusable_%%';"
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found.
Greenplum recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""")
# If the 'reuse tables' option was specified we now try to find an
# already existing staging table in the catalog which will match
# the one that we need to use. It must meet the reuse conditions
is_temp_table = 'TEMP '
target_columns = []
for column in self.into_columns:
if column[2]:
target_columns.append([quote_unident(column[0]), column[1]])
if self.reuse_tables == True:
is_temp_table = ''
target_table_name = quote_unident(self.table)
# create a string from all reuse conditions for staging tables and ancode it
conditions_str = self.get_staging_conditions_string(target_table_name, target_columns, distcols).encode()
encoding_conditions = hashlib.md5(conditions_str).hexdigest()
sql = self.get_reuse_staging_table_query(encoding_conditions)
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
# found a temp table to reuse. no need to create one. we're done here.
self.staging_table_name = (resultList[0])[0]
self.log(self.INFO, "reusing staging table %s" % self.staging_table_name)
# truncate it so we don't use old data
self.do_truncate(self.staging_table_name)
return
# didn't find an existing staging table suitable for reuse. Format a reusable
# name and issue a CREATE TABLE on it (without TEMP!). Hopefully we can use it
# next time around
# we no longer need the timestamp, since we will never want to create few
# tables with same encoding_conditions
self.staging_table_name = "staging_gpload_reusable_%s" % (encoding_conditions)
self.log(self.INFO, "did not find a staging table to reuse. creating %s" % self.staging_table_name)
# MPP-14667 - self.reuse_tables should change one, and only one, aspect of how we build the following table,
# and that is, whether it's a temp table or not. In other words, is_temp_table = '' iff self.reuse_tables == True.
sql = 'CREATE %sTABLE %s ' % (is_temp_table, self.staging_table_name)
cols = ['"%s" %s' % (a[0], a[1]) for a in target_columns]
sql += "(%s)" % ','.join(cols)
#sql += " DISTRIBUTED BY (%s)" % ', '.join(distcols)
self.log(self.LOG, sql)
if not self.options.D:
self.db.query(sql)
if not self.reuse_tables:
self.cleanupSql.append('DROP TABLE IF EXISTS %s' % self.staging_table_name)
def count_errors(self):
if self.gpdb_version < "7.0.0": # for gpdb6
notice_processor(self.db.notices())
else:
self.db.set_notice_receiver(notice_processor)
if self.log_errors and not self.options.D:
# make sure we only get errors for our own instance
if not self.reuse_tables:
queryStr = "select count(*) from gp_read_error_log('%s')" % pg.escape_string(self.extSchemaTable)
results = self.db.query(queryStr).getresult()
return (results[0])[0]
else: # reuse_tables
queryStr = "select count(*) from gp_read_error_log('%s') where cmdtime > to_timestamp(%s)" % (pg.escape_string(self.extSchemaTable), self.startTimestamp)
results = self.db.query(queryStr).getresult()
global NUM_WARN_ROWS
NUM_WARN_ROWS = (results[0])[0]
return (results[0])[0];
return 0
def report_errors(self):
errors = self.count_errors()
if errors==1:
self.log(self.WARN, '1 bad row')
elif errors:
self.log(self.WARN, '%d bad rows'%errors)
# error message is also deleted if external table is dropped.
# if reuse_table is set, error message is not deleted.
if errors and self.log_errors and self.reuse_tables:
self.log(self.WARN, "Please use following query to access the detailed error")
self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime > to_timestamp('{1}')".format(pg.escape_string(self.extSchemaTable), self.startTimestamp))
self.exitValue = 1 if errors else 0
def do_insert(self, dest):
"""
Handle the INSERT case
insert data into dest table from self external table
"""
self.log(self.DEBUG, "into columns " + str(self.into_columns))
# a[2] is mapping target
#cols = filter(lambda a:a[2]!=None, self.into_columns)
cols = [a for a in self.into_columns if a[2]!=None]
# only insert non-serial columns, unless the user told us to
# insert the serials explicitly
# a[3] is has_sequence (bool)
if not self.from_cols_from_user:
cols = [a for a in cols if a[3] == False]
sql = 'INSERT INTO %s' % dest
sql += ' (%s)' % ','.join([a[0] for a in cols])
sql += ' SELECT %s' % ','.join([a[2] for a in cols])
sql += ' FROM %s' % self.extSchemaTable
# cktan: progress thread is not reliable. revisit later.
#progress = Progress(self,self.ports)
#progress.start()
#self.threads.append(progress)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsInserted = self.db.query(sql.encode('utf-8'))
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = e.__str__().encode().decode('unicode-escape')
strF = sql.encode().decode('unicode-escape')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
#progress.condition.acquire()
#progress.number = 1
#progress.condition.wait()
#progress.condition.release()
self.report_errors()
def do_method_insert(self):
self.create_external_table()
self.do_insert(self.get_qualified_tablename())
def map_stuff(self,config,configFormat,index):
'''
get the config and find it in into_columns_dict,
report error if no column finded in into_columns_dict or no mapping for it.
Return
list: [ configFormat(into_clomuns[0], into_clomuns[index]) ]
'''
lis = []
theList = self.getconfig(config,list)
theList = convertListToDelimited(theList)
for i in theList:
if type(i) != str and type(i) != str:
self.control_file_error("%s must be a YAML sequence of strings"%config)
j = self.into_columns_dict.get(i)
if not j:
self.log(self.ERROR,'column %s in %s does not exist'%(i,config))
if not j[index]:
self.log(self.ERROR,'there is no mapping from the column %s in %s'%(i,config))
# append ( j[0] = from_table.j[index])
# column_name = from_table.column_name
lis.append(configFormat(j[0],j[index]))
return lis
def fix_update_cond(self, match):
self.log(self.DEBUG, match.group(0))
return 'into_table.' + match.group(0)
def do_update(self,fromname,index):
"""
UPDATE case. Update into_table from staging_table
form the update sql from update_columns, match_columns and update_condition
"""
sql = 'update %s into_table ' % self.get_qualified_tablename()
sql += 'set %s '%','.join(self.map_stuff('gpload:output:update_columns',(lambda x,y:'%s=from_table.%s' % (x, y)),index))
sql += 'from %s from_table' % fromname
match = self.map_stuff('gpload:output:match_columns'
, lambda x,y:'into_table.%s=from_table.%s' % (x, y)
, index)
update_condition = self.getconfig('gpload:output:update_condition',
str, None)
if update_condition:
### need to optimize
#
# Place the table alias infront of column references.
#
# The following logic is not bullet proof. It may not work
# correctly if the user uses an identifier in both its
# delimited and un-delimited format (e.g. where c1 < 7 and "c1" > 2)
# Better lexing and parsing needs to be done here to fix all cases.
#
update_condition = ' ' + update_condition + ' '
for name, colType, mapto, seq in self.into_columns:
regexp = '(?<=[^\w])%s(?=[^\w])' % name
self.log(self.DEBUG, 'update_condition re: ' + regexp)
temp_update_condition = update_condition
updateConditionList = splitIntoLiteralsAndNonLiterals(update_condition)
skip = False
update_condition = """"""
for uc in updateConditionList:
if skip == False:
uc = re.sub(regexp, self.fix_update_cond, uc)
skip = True
update_condition = update_condition + uc
if update_condition == temp_update_condition:
# see if column can be undelimited, and try again.
if len(name) > 2 and name[1:-1] == name[1:-1].lower():
regexp = '(?<=[^\w])%s(?=[^\w])' % name[1:-1]
self.log(self.DEBUG, 'update_condition undelimited re: ' + regexp)
update_condition = re.sub( regexp
, self.fix_update_cond
, update_condition
)
self.log(self.DEBUG, "updated update_condition to %s" %
update_condition)
match.append(update_condition)
sql += ' where %s' % ' and '.join(match)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsUpdated = self.db.query(sql.encode('utf-8'))
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
def get_qualified_tablename(self):
'''
return a qualified table name from self.schema and self.table
'''
tblname = "%s.%s" % (self.schema, self.table)
return tblname
def get_table_dist_key(self):
'''
'''
# NOTE: this query should be re-written better. the problem is that it is
# not possible to perform a cast on a table name with spaces...
if noGpVersion or self.gpdb_version < "6.0.0":
sql = "select attname from pg_attribute a, gp_distribution_policy p , pg_class c, pg_namespace n "+\
"where a.attrelid = c.oid and " + \
"a.attrelid = p.localoid and " + \
"a.attnum = any (p.attrnums) and " + \
"c.relnamespace = n.oid and " + \
"n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table))
else:
sql = "select attname from pg_attribute a, gp_distribution_policy p , pg_class c, pg_namespace n "+\
"where a.attrelid = c.oid and " + \
"a.attrelid = p.localoid and " + \
"a.attnum = any (p.distkey) and " + \
"c.relnamespace = n.oid and " + \
"n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table))
resultList = self.db.query(sql).getresult()
attrs = []
count = 0
while count < len(resultList):
attrs.append((resultList[count])[0])
count = count + 1
return attrs
def table_supports_update(self):
""" Check wether columns being updated are distribution key."""
distKeyList = self.get_table_dist_key()
distkey = set()
for dk in distKeyList:
distkey.add(quote_ident(dk))
self.distkey = distkey
if len(distkey) != 0:
# not randomly distributed - check that UPDATE_COLUMNS isn't part of the distribution key
updateColumnList = self.getconfig('gpload:output:update_columns',
list,
returnOriginal=True)
update_columns = convertListToDelimited(updateColumnList)
update_columns = set(update_columns)
a = distkey.intersection(update_columns)
if len(a):
self.control_file_error('update_columns cannot reference column(s) in distribution key (%s)' % ', '.join(list(distkey)))
def do_method_update(self):
"""Load the data in and update an existing table based upon it"""
self.table_supports_update()
self.create_staging_table()
self.create_external_table()
self.do_insert(self.staging_table_name)
# These rows are inserted temporarily for processing, so set inserted rows back to zero.
self.rowsInserted = 0
self.do_update(self.staging_table_name, 0)
def do_method_merge(self):
"""insert data not already in the table, update remaining items"""
self.table_supports_update()
self.create_staging_table()
self.create_external_table()
self.do_insert(self.staging_table_name)
self.rowsInserted = 0 # MPP-13024. No rows inserted yet (only to temp table).
self.do_update(self.staging_table_name, 0)
# delete the updated rows in staging table for merge
# so we can directly insert new rows left in staging table
# and avoid left outer join when insert new rows which is poor in performance
match = self.map_stuff('gpload:output:match_columns'
, lambda x,y:'staging_table.%s=into_table.%s' % (x, y)
, 0)
sql = 'DELETE FROM %s staging_table '% self.staging_table_name
sql += 'USING %s into_table WHERE '% self.get_qualified_tablename()
sql += ' %s' % ' AND '.join(match)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.db.query(sql.encode('utf-8'))
except Exception as e:
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
# insert new rows to the target table
match = self.map_stuff('gpload:output:match_columns',lambda x,y:'into_table.%s=from_table.%s'%(x,y),0)
matchColumns = self.getconfig('gpload:output:match_columns',list)
cols = [a for a in self.into_columns if a[2] != None]
sql = 'INSERT INTO %s ' % self.get_qualified_tablename()
sql += '(%s) ' % ','.join([a[0] for a in cols])
sql += '(SELECT %s ' % ','.join(['from_table.%s' % a[0] for a in cols])
sql += 'FROM (SELECT *, row_number() OVER (PARTITION BY %s) AS gpload_row_number ' % ','.join(matchColumns)
sql += 'FROM %s) AS from_table ' % self.staging_table_name
sql += 'WHERE gpload_row_number=1)'
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsInserted = self.db.query(sql.encode('utf-8'))
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
def do_truncate(self, tblname):
self.log(self.LOG, "Truncate table %s" %(tblname))
if not self.options.D:
try:
truncateSQLtext = "truncate %s" % tblname
self.db.query(truncateSQLtext.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not execute truncate target %s: %s' % (tblname, str(e)))
def do_method(self):
'''
setup gpload config,
start a transaction
execute the 'before sql',
do method (insert upade, merge) accordingly,
execute the 'after sql'
'''
# Is the table to be truncated before the load?
preload = self.getconfig('gpload:preload', list, default=None)
method = self.getconfig('gpload:output:mode', str, 'insert').lower()
self.log_errors = self.getconfig('gpload:input:log_errors', bool, False)
truncate = False
self.reuse_tables = False
if not self.options.no_auto_trans and not method=='insert':
self.db.query("BEGIN")
if preload:
truncate = self.getconfig('gpload:preload:truncate',bool,False)
self.reuse_tables = self.getconfig('gpload:preload:reuse_tables',bool,False)
self.fast_match = self.getconfig('gpload:preload:fast_match',bool,False)
if self.reuse_tables == False and self.fast_match == True:
self.log(self.WARN, 'fast_match is ignored when reuse_tables is false!')
self.staging_table = self.getconfig('gpload:preload:staging_table', str, default=None)
if self.error_table:
self.log_errors = True
self.reuse_tables = True
self.staging_table = self.getconfig('gpload:preload:staging_table', str, default=None)
self.fast_match = self.getconfig('gpload:preload:fast_match',bool,False)
if truncate == True:
if method=='insert':
self.do_truncate(self.schemaTable)
else:
self.log(self.ERROR, 'preload truncate operation should be used with insert ' +
'operation only. used with %s' % method)
# sql pre or post processing?
sql = self.getconfig('gpload:sql', list, default=None)
before = None
after = None
if sql:
before = self.getconfig('gpload:sql:before', str, default=None)
after = self.getconfig('gpload:sql:after', str, default=None)
if before:
self.log(self.LOG, "Pre-SQL from user: %s" % before)
if not self.options.D:
try:
self.db.query(before.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not execute SQL in sql:before "%s": %s' %
(before, str(e)))
if method=='insert':
self.do_method_insert()
elif method=='update':
self.do_method_update()
elif method=='merge':
self.do_method_merge()
else:
self.control_file_error('unsupported method %s' % method)
# truncate the staging table to avoid dumping it's content - see MPP-15474
if method=='merge' or method=='update':
self.do_truncate(self.staging_table_name)
if after:
self.log(self.LOG, "Post-SQL from user: %s" % after)
if not self.options.D:
try:
self.db.query(after.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not execute SQL in sql:after "%s": %s' %
(after, str(e)))
if not self.options.no_auto_trans and not method=='insert':
self.db.query("COMMIT")
def stop_gpfdists(self):
if self.subprocesses:
self.log(self.LOG, 'killing gpfdist')
for a in self.subprocesses:
try:
if platform.system() in ['Windows', 'Microsoft']:
# win32 API is better but hard for us
# to install, so we use the crude method
subprocess.Popen("taskkill /F /T /PID %i" % a.pid,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
os.kill(a.pid, signal.SIGKILL)
except OSError:
pass
self.log(self.LOG, 'terminating all threads')
for t in self.threads:
t.join()
self.log(self.LOG, 'all threads are terminated')
def run2(self):
self.log(self.DEBUG, 'config ' + str(self.config))
start = time.time()
self.read_config()
self.setup_connection()
self.read_table_metadata()
self.read_columns()
self.read_mapping()
self.start_gpfdists()
self.do_method()
self.log(self.INFO, 'running time: %.2f seconds'%(time.time()-start))
def run(self):
self.db = None
self.rowsInserted = 0
self.rowsUpdated = 0
signal.signal(signal.SIGINT, handle_kill)
signal.signal(signal.SIGTERM, handle_kill)
# win32 doesn't do SIGQUIT
if not platform.system() in ['Windows', 'Microsoft']:
signal.signal(signal.SIGQUIT, handle_kill)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
try:
self.run2()
except Exception:
traceback.print_exc(file=self.logfile)
self.logfile.flush()
self.exitValue = 2
if (self.options.qv > self.INFO):
traceback.print_exc()
else:
self.log(self.ERROR, "unexpected error -- backtrace " +
"written to log file")
finally:
self.stop_gpfdists()
if self.cleanupSql:
self.log(self.LOG, 'removing temporary data')
self.setup_connection()
for a in self.cleanupSql:
try:
self.log(self.DEBUG, a)
self.db.query(a)
except (Exception, SystemExit):
traceback.print_exc(file=self.logfile)
self.logfile.flush()
traceback.print_exc()
if self.db != None:
self.db.close()
self.log(self.INFO, 'rows Inserted = ' + str(self.rowsInserted))
self.log(self.INFO, 'rows Updated = ' + str(self.rowsUpdated))
self.log(self.INFO, 'data formatting errors = ' + str(NUM_WARN_ROWS))
if self.exitValue==0:
self.log(self.INFO, 'gpload succeeded')
elif self.exitValue==1:
self.log(self.INFO, 'gpload succeeded with warnings')
else:
self.log(self.INFO, 'gpload failed')
if __name__ == '__main__':
g = gpload(sys.argv[1:])
g.run()
sys.stdout.flush()
sys.stderr.flush()
os._exit(g.exitValue)
| 50wu/gpdb | gpMgmt/bin/gpload.py | Python | apache-2.0 | 112,056 | [
"ADF"
] | 96a61ecf23af382c4170fae3627a30ed757a126cf430662957170b7c8488ce22 |
from forte.core import flog
from forte.solvers.solver import Feature, Solver
from forte.model import MolecularModel
from forte import SCFInfo
class HF(Solver):
"""
A class to run Hartree-Fock computations
"""
def __init__(
self,
input_nodes,
state,
restricted=True,
e_convergence=1.0e-10,
d_convergence=1.0e-6,
docc=None,
socc=None,
options=None,
cbh=None
):
"""
initialize a HF object
Parameters
----------
input_nodes: Solver
the object that provides information about this computation
state: StateInfo
the state to optimize (defines the number of alpha/beta electrons and m_s)
restricted: bool
do restricted HF?
e_convergence: float
energy convergence criterion
d_convergence: float
density matrix convergence criterion
docc: list(int)
The number of doubly occupied orbitals per irrep
socc: list(int)
The number of singly occupied orbitals per irrep
options: dict()
Additional options passed to control psi4
cbh: CallbackHandler
A callback object used to inject code into the HF class
"""
# initialize common objects
super().__init__(
input_nodes=input_nodes,
needs=[Feature.MODEL],
provides=[Feature.MODEL, Feature.ORBITALS],
options=options,
cbh=cbh
)
self._data = self.input_nodes[0].data
self._state = state
self._restricted = restricted
self._e_convergence = e_convergence
self._d_convergence = d_convergence
self._docc = docc
self._socc = socc
def __repr__(self):
"""
return a string representation of this object
"""
return f'HF(restricted={self._restricted},e_convergence={self._e_convergence},d_convergence={self._d_convergence})'
def __str__(self):
"""
return a string representation of this object
"""
return repr(self)
@property
def restricted(self):
return self._restricted
@property
def state(self):
return self._state
@property
def charge(self):
# compute the number of electrons
molecule = self.data.model.molecule
natom = molecule.natom()
charge = round(sum([molecule.Z(i) for i in range(natom)])) - self.state.na() - self.state.nb()
return charge
@property
def multiplicity(self):
return self.state.multiplicity()
@property
def e_convergence(self):
return self._e_convergence
@property
def d_convergence(self):
return self._d_convergence
@property
def docc(self):
return self._docc
@property
def socc(self):
return self._socc
def check_symmetry_(self, psi_wfn):
socc = psi_wfn.soccpi()
sym = 0
for h in range(socc.n()):
if socc[h] % 2 == 1:
sym = sym ^ h
if self.state.irrep() != sym:
model = self.data.model
target = model.symmetry.irrep_label(self.state.irrep())
actual = model.symmetry.irrep_label(sym)
raise RuntimeError(
f'(HF) The HF equations converged on a state with the wrong symmetry ({actual}).'
'\nPass the docc and socc options to converge to a solution with the correct symmetry.'
)
def _run(self):
"""Run a Hartree-Fock computation"""
import psi4
# reset psi4's options to avoid pollution
psi4.core.clean_options()
# currently limited to molecules
if not isinstance(self.data.model, MolecularModel):
raise RuntimeError('HF.energy() is implemented only for MolecularModel objects')
molecule = self.data.model.molecule
molecule.set_molecular_charge(self.charge)
molecule.set_multiplicity(self.multiplicity)
# prepare options for psi4
scf_type_dict = {
'CONVENTIONAL': 'PK',
'STD': 'PK',
}
# convert to psi4 terminology
int_type = self.model.int_type.upper()
if int_type in scf_type_dict:
scf_type = scf_type_dict[int_type]
else:
scf_type = int_type
if self._restricted:
ref = 'RHF' if self.multiplicity == 1 else 'ROHF'
else:
ref = 'UHF'
options = {
'BASIS': self.data.model.basis,
'REFERENCE': ref,
'SCF_TYPE': scf_type,
'E_CONVERGENCE': self.e_convergence,
'D_CONVERGENCE': self.d_convergence
}
# optionally specify docc/socc
if self.docc is not None:
options['DOCC'] = self.docc
if self.socc is not None:
options['SOCC'] = self.socc
if self.data.model.scf_aux_basis is not None:
options['DF_BASIS_SCF'] = self.data.model.scf_aux_basis
full_options = {**options, **self._options}
# set the options
psi4.set_options(full_options)
# pipe output to the file self._output_file
psi4.core.set_output_file(self._output_file, True)
# pre hf callback
self._cbh.call('pre hf', self)
# run scf and return the energy and a wavefunction object
flog('info', 'HF: calling psi4.energy().')
energy, psi_wfn = psi4.energy('scf', molecule=molecule, return_wfn=True)
flog('info', 'HF: psi4.energy() done')
# check symmetry
flog('info', 'HF: checking symmetry of the HF solution')
self.check_symmetry_(psi_wfn)
# add the energy to the results
flog('info', f'HF: hf energy = {energy}')
self._results.add('hf energy', energy, 'Hartree-Fock energy', 'Eh')
# store calculation outputs in the Data object
self.data.psi_wfn = psi_wfn
self.data.scf_info = SCFInfo(psi_wfn)
# post hf callback
self._cbh.call('post hf', self)
flog('info', 'HF: calling psi4.core.clean()')
psi4.core.clean()
return self
| evangelistalab/forte | forte/solvers/hf.py | Python | lgpl-3.0 | 6,269 | [
"Psi4"
] | 6d6e8ab094bb1f3a09105b5c436fda9888bf420d72cb6cbf2e5e574435e79215 |
cast = ['Cleese', 'Palin', 'Jones', 'Idle']
movies = ['The Holy Grail', 1975, 'Terry Jones & Terry Gilliam', 91, ['Graham Chapman', ['Michael Palin', 'John Cleese', 'Terry Gilliam', 'Eric Idle', 'Terry Jones']]]
fav_movies = ['The Holy Grail', 'The Life of Brian']
def print_lol(the_list):
for each_item in the_list:
if isinstance(each_item, list):
print_lol(each_item)
else:
print(each_item)
| devtronics/Gitme | monty.py | Python | agpl-3.0 | 405 | [
"Brian"
] | 9c2b841b2a4a1e91a07ace979b41f154b9ef60f2724c091e1f85b12f09b54049 |
class ASTVisitor():
def visit(self, astnode):
'A read-only function which looks at a single AST node.'
pass
def return_value(self):
return None
class ASTModVisitor(ASTVisitor):
'''A visitor class that can also construct a new, modified AST.
Two methods are offered: the normal visit() method, which focuses on analyzing
and/or modifying a single node; and the post_visit() method, which allows you
to modify the child list of a node.
The default implementation does nothing; it simply builds up itself, unmodified.'''
def visit(self, astnode):
# Note that this overrides the super's implementation, because we need a
# non-None return value.
return astnode
def post_visit(self, visit_value, child_values):
'''A function which constructs a return value out of its children.
This can be used to modify an AST by returning a different or modified
ASTNode than the original. The top-level return value will then be the
new AST.'''
return visit_value
class ASTNode(object):
def __init__(self):
self.parent = None
self._children = []
@property
def children(self):
return self._children
@children.setter
def children(self, children):
self._children = children
for child in children:
child.parent = self
def pprint(self,indent=''):
'''Recursively prints a formatted string representation of the AST.'''
print(indent + self.__class__.__name__)
indent = indent + ' '
for child in self._children:
child.pprint(indent)
def walk(self, visitor):
'''Traverses an AST, calling visitor.visit() on every node.
This is a depth-first, pre-order traversal. Parents will be visited before
any children, children will be visited in order, and (by extension) a node's
children will all be visited before its siblings.
The visitor may modify attributes, but may not add or delete nodes.'''
visitor.visit(self)
for child in self.children:
child.walk(visitor)
return visitor.return_value()
def mod_walk(self, mod_visitor):
'''Traverses an AST, building up a return value from visitor methods.
Similar to walk(), but constructs a return value from the result of
postvisit() calls. This can be used to modify an AST by building up the
desired new AST with return values.'''
selfval = mod_visitor.visit(self)
child_values = [child.mod_walk(mod_visitor) for child in self.children]
retval = mod_visitor.post_visit(self, selfval, child_values)
return retval
class ASTProgram(ASTNode):
def __init__(self, statements):
super().__init__()
self.children = statements
class ASTImport(ASTNode):
def __init__(self, mod):
super().__init__()
self.mod = mod
@property
def module(self):
return self.mod
class ASTComponent(ASTNode):
def __init__(self, component_id, expression_list=[]):
super().__init__()
self.children.append(component_id)
for expression in expression_list:
self.children.append(expression)
@property
def name(self): # return an element of self.children
return self.children[0]
@property
def expressions(self): # return one or more children
return self.children[1:]
class ASTInputExpr(ASTNode):
def __init__(self, input_list):
super().__init__()
for input_i in input_list:
self.children.append(input_i)
class ASTOutputExpr(ASTNode):
def __init__(self, output_list):
super().__init__()
for output_i in output_list:
self.children.append(output_i)
class ASTAssignmentExpr(ASTNode):
def __init__(self, expression_id, expression):
super().__init__()
self.children.append(expression_id)
self.children.append(expression)
@property
def binding(self):
return self.children[0]
@property
def value(self):
return self.children[1]
class ASTEvalExpr(ASTNode):
def __init__(self, operator, parameter_list=[]):
super().__init__()
self.children.append(operator)
for parameter in parameter_list:
self.children.append(parameter)
@property
def op(self):
return self.children[0]
@property
def args(self):
return self.children[1:]
# These are already complete.
class ASTID(ASTNode):
def __init__(self, name, typedecl=None):
super().__init__()
self.name = name
self.type = typedecl
class ASTLiteral(ASTNode):
def __init__(self, value):
super().__init__()
self.value = value
self.type = 'Scalar'
| 207leftovers/cs207project | pype/ast.py | Python | mit | 4,473 | [
"VisIt"
] | 9a7e874cc78052fa06af29d926288bb0860f8ed7220404ea3a391f955c84befa |
# Orca
#
# Copyright (C) 2011-2013 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Heuristic means to infer the functional/displayed label of a widget."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (C) 2011-2013 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
from . import debug
class LabelInference:
def __init__(self, script):
"""Creates an instance of the LabelInference class.
Arguments:
- script: the script with which this instance is associated.
"""
self._script = script
self._lineCache = {}
self._extentsCache = {}
self._isWidgetCache = {}
def infer(self, obj, focusedOnly=True):
"""Attempt to infer the functional/displayed label of obj.
Arguments
- obj: the unlabeled widget
- focusedOnly: If True, only infer if the widget has focus.
Returns the text which we think is the label, or None.
"""
debug.println(debug.LEVEL_FINE, "INFER label for: %s" % obj)
if not obj:
return None, []
if focusedOnly and not obj.getState().contains(pyatspi.STATE_FOCUSED):
debug.println(debug.LEVEL_FINE, "INFER - object not focused")
return None, []
result, objects = None, []
if not result:
result, objects = self.inferFromTextLeft(obj)
debug.println(debug.LEVEL_FINE, "INFER - Text Left: %s" % result)
if not result or self._preferRight(obj):
result, objects = self.inferFromTextRight(obj) or result
debug.println(debug.LEVEL_FINE, "INFER - Text Right: %s" % result)
if not result:
result, objects = self.inferFromTable(obj)
debug.println(debug.LEVEL_FINE, "INFER - Table: %s" % result)
if not result:
result, objects = self.inferFromTextAbove(obj)
debug.println(debug.LEVEL_FINE, "INFER - Text Above: %s" % result)
if not result:
result, objects = self.inferFromTextBelow(obj)
debug.println(debug.LEVEL_FINE, "INFER - Text Below: %s" % result)
# TODO - We probably do not wish to "infer" from these. Instead, we
# should ensure that this content gets presented as part of the widget.
# (i.e. the label is something on screen. Widget name and description
# are each something other than a label.)
if not result:
result, objects = obj.name, []
debug.println(debug.LEVEL_FINE, "INFER - Name: %s" % result)
if not result:
result, objects = obj.description, []
debug.println(debug.LEVEL_FINE, "INFER - Description: %s" % result)
if result:
result = result.strip()
result = result.replace("\n", " ")
self.clearCache()
return result, objects
def clearCache(self):
"""Dumps whatever we've stored for performance purposes."""
self._lineCache = {}
self._extentsCache = {}
self._isWidgetCache = {}
def _preferRight(self, obj):
"""Returns True if we should prefer text on the right, rather than the
left, for the object obj."""
onRightRoles = [pyatspi.ROLE_CHECK_BOX, pyatspi.ROLE_RADIO_BUTTON]
return obj.getRole() in onRightRoles
def _preventRight(self, obj):
"""Returns True if we should not permit inference based on text to
the right for the object obj."""
roles = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_BOX]
return obj.getRole() in roles
def _preferTop(self, obj):
"""Returns True if we should prefer text above, rather than below for
the object obj."""
roles = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_BOX]
return obj.getRole() in roles
def _preventBelow(self, obj):
"""Returns True if we should not permit inference based on text below
the object obj."""
roles = [pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT]
return obj.getRole() not in roles
def _isSimpleObject(self, obj):
"""Returns True if the given object has 'simple' contents, such as text
without embedded objects or a single embedded object without text."""
if not obj:
return False
try:
children = [child for child in obj]
except (LookupError, RuntimeError):
debug.println(debug.LEVEL_FINE, 'Dead Accessible in %s' % obj)
return False
children = [x for x in children if x.getRole() != pyatspi.ROLE_LINK]
if len(children) > 1:
return False
try:
text = obj.queryText()
except NotImplementedError:
return True
string = text.getText(0, -1).strip()
if string.count(self._script.EMBEDDED_OBJECT_CHARACTER) > 1:
return False
return True
def _cannotLabel(self, obj):
"""Returns True if the given object should not be treated as a label."""
if not obj:
return True
nonLabelTextRoles = [pyatspi.ROLE_HEADING]
if obj.getRole() in nonLabelTextRoles:
return True
return self._isWidget(obj)
def _isWidget(self, obj):
"""Returns True if the given object is a widget."""
if not obj:
return False
rv = self._isWidgetCache.get(hash(obj))
if rv != None:
return rv
widgetRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PUSH_BUTTON]
isWidget = obj.getRole() in widgetRoles
self._isWidgetCache[hash(obj)] = isWidget
return isWidget
def _getExtents(self, obj, startOffset=0, endOffset=-1):
"""Returns (x, y, width, height) of the text at the given offsets
if the object implements accessible text, or just the extents of
the object if it doesn't implement accessible text."""
if not obj:
return 0, 0, 0, 0
rv = self._extentsCache.get((hash(obj), startOffset, endOffset))
if rv:
return rv
extents = 0, 0, 0, 0
try:
text = obj.queryText()
except NotImplementedError:
pass
else:
skipTextExtents = [pyatspi.ROLE_ENTRY, pyatspi.ROLE_PASSWORD_TEXT]
if not obj.getRole() in skipTextExtents:
if endOffset == -1:
endOffset = text.characterCount
extents = text.getRangeExtents(startOffset, endOffset, 0)
if not (extents[2] and extents[3]):
ext = obj.queryComponent().getExtents(0)
extents = ext.x, ext.y, ext.width, ext.height
self._extentsCache[(hash(obj), startOffset, endOffset)] = extents
return extents
def _createLabelFromContents(self, obj):
"""Gets the functional label text associated with the object obj."""
if not self._isSimpleObject(obj):
return ''
if self._cannotLabel(obj):
return ''
contents = self._script.utilities.getObjectsFromEOCs(obj)
objects = [content[0] for content in contents]
if list(filter(self._isWidget, objects)):
return ''
strings = [content[3] or content[0].name for content in contents]
return ''.join(strings)
def _getLineContents(self, obj):
"""Get the (obj, startOffset, endOffset, string) tuples for the line
containing the object, obj."""
rv = self._lineCache.get(hash(obj))
if rv:
return rv
key = hash(obj)
start = None
if self._isWidget(obj):
start, end = self._script.utilities.getHyperlinkRange(obj)
obj = obj.parent
try:
text = obj.queryText()
except:
start = 0
else:
if start == None:
start = max(0, text.caretOffset)
rv = self._script.utilities.getLineContentsAtOffset(obj, start)
self._lineCache[key] = rv
return rv
def _getPreviousObject(self, obj):
"""Gets the object prior to obj."""
index = obj.getIndexInParent()
if not index > 0:
return obj.parent
prevObj = obj.parent[index-1]
if prevObj and prevObj.childCount:
prevObj = prevObj[prevObj.childCount - 1]
return prevObj
def inferFromTextLeft(self, obj, proximity=75):
"""Attempt to infer the functional/displayed label of obj by
looking at the contents of the current line, which are to the
left of this object
Arguments
- obj: the unlabeled widget
- proximity: pixels expected for a match
Returns the text which we think is the label, or None.
"""
extents = self._getExtents(obj)
contents = self._getLineContents(obj)
content = [o for o in contents if o[0] == obj]
try:
index = contents.index(content[0])
except IndexError:
index = len(contents)
onLeft = contents[0:index]
start = 0
for i in range(len(onLeft) - 1, -1, -1):
if self._cannotLabel(onLeft[i][0]):
start = i + 1
break
onLeft = onLeft[start:]
if not (onLeft and onLeft[0]):
return None, []
lObj, start, end, string = onLeft[-1]
lExtents = self._getExtents(lObj, start, end)
distance = extents[0] - (lExtents[0] + lExtents[2])
if 0 <= distance <= proximity:
strings = [content[3] or content[0].name for content in onLeft]
result = ''.join(strings)
if result.strip():
return result, [content[0] for content in onLeft]
return None, []
def inferFromTextRight(self, obj, proximity=25):
"""Attempt to infer the functional/displayed label of obj by
looking at the contents of the current line, which are to the
right of this object
Arguments
- obj: the unlabeled widget
- proximity: pixels expected for a match
Returns the text which we think is the label, or None.
"""
if self._preventRight(obj):
return None, []
extents = self._getExtents(obj)
contents = self._getLineContents(obj)
content = [o for o in contents if o[0] == obj]
try:
index = contents.index(content[0])
except IndexError:
index = len(contents)
onRight = contents[min(len(contents), index+1):]
end = len(onRight)
for i, item in enumerate(onRight):
if self._cannotLabel(item[0]):
if not self._preferRight(obj):
return None, []
end = i + 1
break
onRight = onRight[0:end]
if not (onRight and onRight[0]):
return None, []
rObj, start, end, string = onRight[0]
rExtents = self._getExtents(rObj, start, end)
distance = rExtents[0] - (extents[0] + extents[2])
if distance <= proximity or self._preferRight(obj):
strings = [content[3] or content[0].name for content in onRight]
result = ''.join(strings)
if result.strip():
return result, [content[0] for content in onRight]
return None, []
def inferFromTextAbove(self, obj, proximity=20):
"""Attempt to infer the functional/displayed label of obj by
looking at the contents of the line above the line containing
the object obj.
Arguments
- obj: the unlabeled widget
- proximity: pixels expected for a match
Returns the text which we think is the label, or None.
"""
thisLine = self._getLineContents(obj)
if not (thisLine and thisLine[0]):
return None, []
prevObj, start, end, string = thisLine[0]
if obj == prevObj:
start, end = self._script.utilities.getHyperlinkRange(prevObj)
prevObj = prevObj.parent
try:
text = prevObj.queryText()
except (AttributeError, NotImplementedError):
return None, []
objX, objY, objWidth, objHeight = self._getExtents(obj)
if not (objWidth and objHeight):
return None, []
start = max(start - 1, 0)
prevLine = self._script.utilities.getLineContentsAtOffset(prevObj, start)
if not (prevLine and prevLine[0]):
return None, []
prevObj, start, end, string = prevLine[0]
if string.strip() and not self._cannotLabel(prevObj):
x, y, width, height = self._getExtents(prevObj, start, end)
distance = objY - (y + height)
if 0 <= distance <= proximity:
return string, [prevObj]
while prevObj:
prevObj = self._getPreviousObject(prevObj)
x, y, width, height = self._getExtents(prevObj)
distance = objY - (y + height)
if distance > proximity:
return None, []
if prevObj.getRole() == pyatspi.ROLE_TABLE_CELL \
and not prevObj in [obj.parent, obj.parent.parent]:
return None, []
if distance < 0:
continue
if x + 150 < objX:
continue
string = self._createLabelFromContents(prevObj)
if string:
return string, [prevObj]
return None, []
def inferFromTextBelow(self, obj, proximity=20):
"""Attempt to infer the functional/displayed label of obj by
looking at the contents of the line above the line containing
the object obj.
Arguments
- obj: the unlabeled widget
- proximity: pixels expected for a match
Returns the text which we think is the label, or None.
"""
if self._preventBelow(obj):
return None, []
thisLine = self._getLineContents(obj)
if not (thisLine and thisLine[0]):
return None, []
lastObj, start, end, string = thisLine[-1]
if obj == lastObj:
start, end = self._script.utilities.getHyperlinkRange(obj)
lastObj = lastObj.parent
objX, objY, objWidth, objHeight = self._getExtents(obj)
if not (objWidth and objHeight):
return None, []
nextLine = self._script.utilities.getLineContentsAtOffset(lastObj, end)
if not (nextLine and nextLine[0]):
return None, []
nextObj, start, end, string = nextLine[0]
if string.strip() and not self._cannotLabel(nextObj):
x, y, width, height = self._getExtents(nextObj, start, end)
distance = y - (objY + objHeight)
if 0 <= distance <= proximity:
return string, [nextObj]
return None, []
def inferFromTable(self, obj, proximityForRight=50):
"""Attempt to infer the functional/displayed label of obj by looking
at the contents of the surrounding table cells. Note that this approach
assumes a simple table in which the widget is the sole occupant of its
cell.
Arguments
- obj: the unlabeled widget
Returns the text which we think is the label, or None.
"""
pred = lambda x: x.getRole() == pyatspi.ROLE_TABLE_CELL
cell = pyatspi.utils.findAncestor(obj, pred)
if not self._isSimpleObject(cell):
return None, []
if not cell in [obj.parent, obj.parent.parent]:
return None, []
pred = lambda x: x.getRole() == pyatspi.ROLE_TABLE
grid = pyatspi.utils.findAncestor(cell, pred)
if not grid:
return None, []
try:
table = grid.queryTable()
except NotImplementedError:
return None, []
index = self._script.utilities.cellIndex(cell)
row = table.getRowAtIndex(index)
col = table.getColumnAtIndex(index)
objX, objY, objWidth, objHeight = self._getExtents(obj)
if col > 0 and not self._preferRight(obj):
candidate = table.getAccessibleAt(row, col - 1)
label = self._createLabelFromContents(candidate)
if label.strip():
return label, [candidate]
if col < table.nColumns and not self._preventRight(obj):
candidate = table.getAccessibleAt(row, col + 1)
x, y, width, height = self._getExtents(candidate)
distance = x - (objX + objWidth)
if distance <= proximityForRight or self._preferRight(obj):
label = self._createLabelFromContents(candidate)
if label.strip():
return label, [candidate]
cellAbove = cellBelow = labelAbove = labelBelow = None
if row > 0:
cellAbove = table.getAccessibleAt(row - 1, col)
labelAbove = self._createLabelFromContents(cellAbove)
if labelAbove and self._preferTop(obj):
return labelAbove, [cellAbove]
if row < table.nRows and not self._preventBelow(obj):
cellBelow = table.getAccessibleAt(row + 1, col)
labelBelow = self._createLabelFromContents(cellBelow)
if labelAbove and labelBelow:
aboveX, aboveY, aboveWidth, aboveHeight = self._getExtents(cellAbove)
belowX, belowY, belowWidth, belowHeight = self._getExtents(cellBelow)
dAbove = objY - (aboveY + aboveHeight)
dBelow = belowY - (objY + objHeight)
if dAbove <= dBelow:
return labelAbove, [cellAbove]
return labelBelow, [cellBelow]
if labelAbove:
return labelAbove, [cellAbove]
if labelBelow:
return labelBelow, [cellBelow]
# None of the cells immediately surrounding this cell seem to be serving
# as a functional label. Therefore, see if this table looks like a grid
# of widgets with the functional labels in the first row.
firstRow = [table.getAccessibleAt(0, i) for i in range(table.nColumns)]
if not firstRow or list(filter(self._isWidget, firstRow)):
return None, []
cells = [table.getAccessibleAt(i, col) for i in range(1, table.nRows)]
cells = [x for x in cells if x != None]
if [x for x in cells if x.childCount and x[0].getRole() != obj.getRole()]:
return None, []
label = self._createLabelFromContents(firstRow[col])
if label:
return label, [firstRow[col]]
return None, []
| ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/label_inference.py | Python | gpl-3.0 | 20,024 | [
"ORCA"
] | 03894fabcdfcb8742c02522716c2c15c9749b43cfcc7dced1b9bfaa804437ace |
import socket
import webbrowser
import httplib2
import oauth2client.clientsecrets as clientsecrets
from apiclient.discovery import build
from functools import wraps
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import OOB_CALLBACK_URN
from oauth2client.file import Storage
from oauth2client.file import CredentialsFileSymbolicLinkError
from oauth2client.tools import ClientRedirectHandler
from oauth2client.tools import ClientRedirectServer
from oauth2client.util import scopes_to_string
from .apiattr import ApiAttribute
from .apiattr import ApiAttributeMixin
from .settings import LoadSettingsFile
from .settings import ValidateSettings
from .settings import SettingsError
from .settings import InvalidConfigError
class AuthError(Exception):
"""Base error for authentication/authorization errors."""
class InvalidCredentialsError(IOError):
"""Error trying to read credentials file."""
class AuthenticationRejected(AuthError):
"""User rejected authentication."""
class AuthenticationError(AuthError):
"""General authentication error."""
class RefreshError(AuthError):
"""Access token refresh error."""
def LoadAuth(decoratee):
"""Decorator to check if the auth is valid and loads auth if not."""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
if self.auth is None: # Initialize auth if needed.
self.auth = GoogleAuth()
if self.auth.access_token_expired:
self.auth.LocalWebserverAuth()
if self.auth.service is None: # Check if drive api is built.
self.auth.Authorize()
return decoratee(self, *args, **kwargs)
return _decorated
def CheckAuth(decoratee):
"""Decorator to check if it requires OAuth2 flow request."""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
dirty = False
code = None
save_credentials = self.settings.get('save_credentials')
if self.credentials is None and save_credentials:
self.LoadCredentials()
if self.flow is None:
self.GetFlow()
if self.credentials is None:
code = decoratee(self, *args, **kwargs)
dirty = True
else:
if self.access_token_expired:
if self.credentials.refresh_token is not None:
self.Refresh()
else:
code = decoratee(self, *args, **kwargs)
dirty = True
if code is not None:
self.Auth(code)
if dirty and save_credentials:
self.SaveCredentials()
return _decorated
class GoogleAuth(ApiAttributeMixin, object):
"""Wrapper class for oauth2client library in google-api-python-client.
Loads all settings and credentials from one 'settings.yaml' file
and performs common OAuth2.0 related functionality such as authentication
and authorization.
"""
DEFAULT_SETTINGS = {
'client_config_backend': 'file',
'client_config_file': 'client_secrets.json',
'save_credentials': False,
'oauth_scope': ['https://www.googleapis.com/auth/drive']
}
CLIENT_CONFIGS_LIST = ['client_id', 'client_secret', 'auth_uri',
'token_uri', 'revoke_uri', 'redirect_uri']
settings = ApiAttribute('settings')
client_config = ApiAttribute('client_config')
flow = ApiAttribute('flow')
credentials = ApiAttribute('credentials')
http = ApiAttribute('http')
service = ApiAttribute('service')
def __init__(self, settings_file='settings.yaml'):
"""Create an instance of GoogleAuth.
This constructor just sets the path of settings file.
It does not actually read the file.
:param settings_file: path of settings file. 'settings.yaml' by default.
:type settings_file: str.
"""
ApiAttributeMixin.__init__(self)
self.client_config = {}
try:
self.settings = LoadSettingsFile(settings_file)
except SettingsError:
self.settings = self.DEFAULT_SETTINGS
else:
if self.settings is None:
self.settings = self.DEFAULT_SETTINGS
else:
ValidateSettings(self.settings)
@property
def access_token_expired(self):
"""Checks if access token doesn't exist or is expired.
:returns: bool -- True if access token doesn't exist or is expired.
"""
if self.credentials is None:
return True
return self.credentials.access_token_expired
@CheckAuth
def LocalWebserverAuth(self, host_name='localhost',
port_numbers=[8080, 8090]):
"""Authenticate and authorize from user by creating local webserver and
retrieving authentication code.
This function is not for webserver application. It creates local webserver
for user from standalone application.
:param host_name: host name of the local webserver.
:type host_name: str.
:param port_numbers: list of port numbers to be tried to used.
:type port_numbers: list.
:returns: str -- code returned from local webserver
:raises: AuthenticationRejected, AuthenticationError
"""
success = False
port_number = 0
for port in port_numbers:
port_number = port
try:
httpd = ClientRedirectServer((host_name, port), ClientRedirectHandler)
except socket.error, e:
pass
else:
success = True
break
if success:
oauth_callback = 'http://%s:%s/' % (host_name, port_number)
else:
print 'Failed to start a local webserver. Please check your firewall'
print 'settings and locally running programs that may be blocking or'
print 'using configured ports. Default ports are 8080 and 8090.'
raise AuthenticationError()
self.flow.redirect_uri = oauth_callback
authorize_url = self.GetAuthUrl()
webbrowser.open(authorize_url, new=1, autoraise=True)
print 'Your browser has been opened to visit:'
print
print ' ' + authorize_url
print
httpd.handle_request()
if 'error' in httpd.query_params:
print 'Authentication request was rejected'
raise AuthenticationRejected('User rejected authentication')
if 'code' in httpd.query_params:
return httpd.query_params['code']
else:
print 'Failed to find "code" in the query parameters of the redirect.'
print 'Try command-line authentication'
raise AuthenticationError('No code found in redirect')
@CheckAuth
def CommandLineAuth(self):
"""Authenticate and authorize from user by printing authentication url
retrieving authentication code from command-line.
:returns: str -- code returned from commandline.
"""
self.flow.redirect_uri = OOB_CALLBACK_URN
authorize_url = self.GetAuthUrl()
print 'Go to the following link in your browser:'
print
print ' ' + authorize_url
print
return raw_input('Enter verification code: ').strip()
def LoadCredentials(self, backend=None):
"""Loads credentials or create empty credentials if it doesn't exist.
:param backend: target backend to save credential to.
:type backend: str.
:raises: InvalidConfigError
"""
if backend is None:
backend = self.settings.get('save_credentials_backend')
if backend is None:
raise InvalidConfigError('Please specify credential backend')
if backend == 'file':
self.LoadCredentialsFile()
else:
raise InvalidConfigError('Unknown save_credentials_backend')
def LoadCredentialsFile(self, credentials_file=None):
"""Loads credentials or create empty credentials if it doesn't exist.
Loads credentials file from path in settings if not specified.
:param credentials_file: path of credentials file to read.
:type credentials_file: str.
:raises: InvalidConfigError, InvalidCredentialsError
"""
if credentials_file is None:
credentials_file = self.settings.get('save_credentials_file')
if credentials_file is None:
raise InvalidConfigError('Please specify credentials file to read')
try:
storage = Storage(credentials_file)
self.credentials = storage.get()
except CredentialsFileSymbolicLinkError:
raise InvalidCredentialsError('Credentials file cannot be symbolic link')
def SaveCredentials(self, backend=None):
"""Saves credentials according to specified backend.
If you have any specific credentials backend in mind, don't use this
function and use the corresponding function you want.
:param backend: backend to save credentials.
:type backend: str.
:raises: InvalidConfigError
"""
if backend is None:
backend = self.settings.get('save_credentials_backend')
if backend is None:
raise InvalidConfigError('Please specify credential backend')
if backend == 'file':
self.SaveCredentialsFile()
else:
raise InvalidConfigError('Unknown save_credentials_backend')
def SaveCredentialsFile(self, credentials_file=None):
"""Saves credentials to the file in JSON format.
:param credentials_file: destination to save file to.
:type credentials_file: str.
:raises: InvalidConfigError, InvalidCredentialsError
"""
if self.credentials is None:
raise InvalidCredentialsError('No credentials to save')
if credentials_file is None:
credentials_file = self.settings.get('save_credentials_file')
if credentials_file is None:
raise InvalidConfigError('Please specify credentials file to read')
try:
storage = Storage(credentials_file)
storage.put(self.credentials)
self.credentials.set_store(storage)
except CredentialsFileSymbolicLinkError:
raise InvalidCredentialsError('Credentials file cannot be symbolic link')
def LoadClientConfig(self, backend=None):
"""Loads client configuration according to specified backend.
If you have any specific backend to load client configuration from in mind,
don't use this function and use the corresponding function you want.
:param backend: backend to load client configuration from.
:type backend: str.
:raises: InvalidConfigError
"""
if backend is None:
backend = self.settings.get('client_config_backend')
if backend is None:
raise InvalidConfigError('Please specify client config backend')
if backend == 'file':
self.LoadClientConfigFile()
elif backend == 'settings':
self.LoadClientConfigSettings()
else:
raise InvalidConfigError('Unknown client_config_backend')
def LoadClientConfigFile(self, client_config_file=None):
"""Loads client configuration file downloaded from APIs console.
Loads client config file from path in settings if not specified.
:param client_config_file: path of client config file to read.
:type client_config_file: str.
:raises: InvalidConfigError
"""
if client_config_file is None:
client_config_file = self.settings['client_config_file']
try:
client_type, client_info = clientsecrets.loadfile(client_config_file)
except clientsecrets.InvalidClientSecretsError, error:
raise InvalidConfigError('Invalid client secrets file %s' % error)
if not client_type in (clientsecrets.TYPE_WEB,
clientsecrets.TYPE_INSTALLED):
raise InvalidConfigError('Unknown client_type of client config file')
try:
config_index = ['client_id', 'client_secret', 'auth_uri', 'token_uri']
for config in config_index:
self.client_config[config] = client_info[config]
self.client_config['revoke_uri'] = client_info.get('revoke_uri')
self.client_config['redirect_uri'] = client_info['redirect_uris'][0]
except KeyError:
raise InvalidConfigError('Insufficient client config in file')
def LoadClientConfigSettings(self):
"""Loads client configuration from settings file.
:raises: InvalidConfigError
"""
for config in self.CLIENT_CONFIGS_LIST:
try:
self.client_config[config] = self.settings['client_config'][config]
except KeyError:
print config
raise InvalidConfigError('Insufficient client config in settings')
def GetFlow(self):
"""Gets Flow object from client configuration.
:raises: InvalidConfigError
"""
if not all(config in self.client_config \
for config in self.CLIENT_CONFIGS_LIST):
self.LoadClientConfig()
constructor_kwargs = {
'redirect_uri': self.client_config['redirect_uri'],
'auth_uri': self.client_config['auth_uri'],
'token_uri': self.client_config['token_uri'],
}
if self.client_config['revoke_uri'] is not None:
constructor_kwargs['revoke_uri'] = self.client_config['revoke_uri']
self.flow = OAuth2WebServerFlow(
self.client_config['client_id'],
self.client_config['client_secret'],
scopes_to_string(self.settings['oauth_scope']),
**constructor_kwargs)
if self.settings.get('get_refresh_token'):
self.flow.params.update({'access_type': 'offline'})
def Refresh(self):
"""Refreshes the access_token.
:raises: RefreshError
"""
if self.credentials is None:
raise RefreshError('No credential to refresh.')
if self.credentials.refresh_token is None:
raise RefreshError('No refresh_token found.'
'Please set access_type of OAuth to offline.')
if self.http is None:
self.http = httplib2.Http()
try:
self.credentials.refresh(self.http)
except AccessTokenRefreshError, error:
raise RefreshError('Access token refresh failed: %s' % error)
def GetAuthUrl(self, keys = None):
"""Creates authentication url where user visits to grant access.
:returns: str -- Authentication url.
"""
if(keys != None):
#update some of the settings in the client_config dict
self.client_config['client_id'] = keys['client_id']
self.client_config['client_secret'] = keys['client_secret']
if self.flow is None:
self.GetFlow()
return self.flow.step1_get_authorize_url()
def Auth(self, code):
"""Authenticate, authorize, and build service.
:param code: Code for authentication.
:type code: str.
:raises: AuthenticationError
"""
self.Authenticate(code)
self.Authorize()
def Authenticate(self, code):
"""Authenticates given authentication code back from user.
:param code: Code for authentication.
:type code: str.
:raises: AuthenticationError
"""
if self.flow is None:
self.GetFlow()
try:
self.credentials = self.flow.step2_exchange(code)
except FlowExchangeError, e:
raise AuthenticationError('OAuth2 code exchange failed: %s' % e)
print 'Authentication successful.'
def Authorize(self):
"""Authorizes and builds service.
:raises: AuthenticationError
"""
if self.http is None:
self.http = httplib2.Http()
if self.access_token_expired:
raise AuthenticationError('No valid credentials provided to authorize')
self.http = self.credentials.authorize(self.http)
self.service = build('drive', 'v2', http=self.http)
| azumimuo/family-xbmc-addon | script.xbmcbackup/resources/lib/pydrive/auth.py | Python | gpl-2.0 | 15,110 | [
"VisIt"
] | 0dae35d8e0f92b045386d5b51147ec19451c4951d6a2e3da5fb0e34efba300f4 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import pi, cos, sin, ravel, unravel_index
try:
from scipy.spatial import cKDTree
except ImportError:
print('The kdtree_fast method requires the scipy.spatial module.')
print('Ignore this warning when using the tunnel_fast method.')
__author__ = 'kmu'
'''
The tunnel_fast and kdtree_fast methods are taken from the Unidata Python Workshop.
See (https://github.com/kmunve/unidata-python-workshop/blob/master/netcdf-by-coordinates.ipynb) for more information.
'''
def tunnel_fast(latvar, lonvar, lat0, lon0):
'''
Find closest point in a set of (lat,lon) points to specified point
latvar - 2D latitude variable from an open netCDF dataset
lonvar - 2D longitude variable from an open netCDF dataset
lat0, lon0 - query point
Returns iy,ix such that the square of the tunnel distance
between (latval[iy,ix], lonval[iy,ix]) and (lat0, lon0)
is minimum.
:param latvar:
:param lonvar:
:param lat0:
:param lon0:
:return:
'''
rad_factor = pi / 180.0 # for trignometry, need angles in radians
# Read latitude and longitude from file into numpy arrays
latvals = latvar[:] * rad_factor
lonvals = lonvar[:] * rad_factor
ny, nx = latvals.shape
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
# Compute numpy arrays for all values, no loops
clat, clon = cos(latvals), cos(lonvals)
slat, slon = sin(latvals), sin(lonvals)
delX = cos(lat0_rad) * cos(lon0_rad) - clat * clon
delY = cos(lat0_rad) * sin(lon0_rad) - clat * slon
delZ = sin(lat0_rad) - slat;
dist_sq = delX ** 2 + delY ** 2 + delZ ** 2
minindex_1d = dist_sq.argmin() # 1D index of minimum element
iy_min, ix_min = unravel_index(minindex_1d, latvals.shape)
return iy_min, ix_min
def kdtree_fast(latvar, lonvar, lat0, lon0):
'''
:param latvar:
:param lonvar:
:param lat0:
:param lon0:
:return:
'''
rad_factor = pi / 180.0 # for trignometry, need angles in radians
# Read latitude and longitude from file into numpy arrays
latvals = latvar[:] * rad_factor
lonvals = lonvar[:] * rad_factor
ny, nx = latvals.shape
clat, clon = cos(latvals), cos(lonvals)
slat, slon = sin(latvals), sin(lonvals)
# Build kd-tree from big arrays of 3D coordinates
triples = list(zip(ravel(clat * clon), ravel(clat * slon), ravel(slat)))
kdt = cKDTree(triples)
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
clat0, clon0 = cos(lat0_rad), cos(lon0_rad)
slat0, slon0 = sin(lat0_rad), sin(lon0_rad)
dist_sq_min, minindex_1d = kdt.query([clat0 * clon0, clat0 * slon0, slat0])
iy_min, ix_min = unravel_index(minindex_1d, latvals.shape)
return iy_min, ix_min
if __name__ == "__main__":
print("...") | kmunve/APS | aps/util/nc_index_by_coordinate.py | Python | mit | 2,835 | [
"NetCDF"
] | dd9aac4569d1eeb5d6cc23c8f2a88c68a7cfdaaab7115bc5c1c769d957643ada |
# Copyright (C) 2013 by Yanbo Ye (yeyanbo289@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
""" Classes and methods for finding consensus trees.
This module contains a ``_BitString`` class to assist the consensus tree
searching and some common consensus algorithms such as strict, majority rule and
adam consensus.
"""
from __future__ import division
import random
import itertools
from ast import literal_eval
from Bio.Phylo import BaseTree
__docformat__ = "restructuredtext en"
class _BitString(str):
"""Helper class for binary string data (PRIVATE).
Assistant class of binary string data used for storing and
counting compatible clades in consensus tree searching. It includes
some binary manipulation(&|^~) methods.
_BitString is a sub-class of ``str`` object that only accepts two
characters('0' and '1'), with additional functions for binary-like
manipulation(&|^~). It is used to count and store the clades in
multiple trees in consensus tree searching. During counting, the
clades will be considered the same if their terminals(in terms of
``name`` attribute) are the same.
For example, let's say two trees are provided as below to search
their strict consensus tree::
tree1: (((A, B), C),(D, E))
tree2: ((A, (B, C)),(D, E))
For both trees, a _BitString object '11111' will represent their
root clade. Each '1' stands for the terminal clade in the list
[A, B, C, D, E](the order might not be the same, it's determined
by the ``get_terminal`` method of the first tree provided). For
the clade ((A, B), C) in tree1 and (A, (B, C)) in tree2, they both
can be represented by '11100'. Similarly, '11000' represents clade
(A, B) in tree1, '01100' represents clade (B, C) in tree2, and '00011'
represents clade (D, E) in both trees.
So, with the ``_count_clades`` function in this module, finally we
can get the clade counts and their _BitString representation as follows
(the root and terminals are omitted)::
clade _BitString count
ABC '11100' 2
DE '00011' 2
AB '11000' 1
BC '01100' 1
To get the _BitString representation of a clade, we can use the following
code snippet::
# suppose we are provided with a tree list, the first thing to do is
# to get all the terminal names in the first tree
term_names = [term.name for term in trees[0].get_terminals()]
# for a specific clade in any of the tree, also get its terminal names
clade_term_names = [term.name for term in clade.get_terminals()]
# then create a boolean list
boolvals = [name in clade_term_names for name in term_names]
# create the string version and pass it to _BitString
bitstr = _BitString(''.join(map(str, map(int, boolvals))))
# or, equivalently:
bitstr = _BitString.from_bool(boolvals)
To convert back::
# get all the terminal clades of the first tree
terms = [term for term in trees[0].get_terminals()]
# get the index of terminal clades in bitstr
index_list = bitstr.index_one()
# get all terminal clades by index
clade_terms = [terms[i] for i in index_list]
# create a new calde and append all the terminal clades
new_clade = BaseTree.Clade()
new_clade.clades.extend(clade_terms)
Example
-------
>>> from Bio.Phylo.Consensus import _BitString
>>> bitstr1 = _BitString('11111')
>>> bitstr2 = _BitString('11100')
>>> bitstr3 = _BitString('01101')
>>> bitstr1
_BitString('11111')
>>> bitstr2 & bitstr3
_BitString('01100')
>>> bitstr2 | bitstr3
_BitString('11101')
>>> bitstr2 ^ bitstr3
_BitString('10001')
>>> bitstr2.index_one()
[0, 1, 2]
>>> bitstr3.index_one()
[1, 2, 4]
>>> bitstr3.index_zero()
[0, 3]
>>> bitstr1.contains(bitstr2)
True
>>> bitstr2.contains(bitstr3)
False
>>> bitstr2.independent(bitstr3)
False
>>> bitstr2.independent(bitstr4)
True
>>> bitstr1.iscompatible(bitstr2)
True
>>> bitstr2.iscompatible(bitstr3)
False
>>> bitstr2.iscompatible(bitstr4)
True
"""
def __new__(cls, strdata):
"""init from a binary string data"""
if (isinstance(strdata, str) and
len(strdata) == strdata.count('0') + strdata.count('1')):
return str.__new__(cls, strdata)
else:
raise TypeError(
"The input should be a binary string composed of '0' and '1'")
def __and__(self, other):
selfint = literal_eval('0b' + self)
otherint = literal_eval('0b' + other)
resultint = selfint & otherint
return _BitString(bin(resultint)[2:].zfill(len(self)))
def __or__(self, other):
selfint = literal_eval('0b' + self)
otherint = literal_eval('0b' + other)
resultint = selfint | otherint
return _BitString(bin(resultint)[2:].zfill(len(self)))
def __xor__(self, other):
selfint = literal_eval('0b' + self)
otherint = literal_eval('0b' + other)
resultint = selfint ^ otherint
return _BitString(bin(resultint)[2:].zfill(len(self)))
def __rand__(self, other):
selfint = literal_eval('0b' + self)
otherint = literal_eval('0b' + other)
resultint = otherint & selfint
return _BitString(bin(resultint)[2:].zfill(len(self)))
def __ror__(self, other):
selfint = literal_eval('0b' + self)
otherint = literal_eval('0b' + other)
resultint = otherint | selfint
return _BitString(bin(resultint)[2:].zfill(len(self)))
def __rxor__(self, other):
selfint = literal_eval('0b' + self)
otherint = literal_eval('0b' + other)
resultint = otherint ^ selfint
return _BitString(bin(resultint)[2:].zfill(len(self)))
def __repr__(self):
return '_BitString(' + str.__repr__(self) + ')'
def index_one(self):
"""Return a list of positions where the element is '1'"""
return [i for i, n in enumerate(self) if n == '1']
def index_zero(self):
"""Return a list of positions where the element is '0'"""
return [i for i, n in enumerate(self) if n == '0']
def contains(self, other):
"""Check if current bitstr1 contains another one bitstr2.
That is to say, the bitstr2.index_one() is a subset of
bitstr1.index_one().
Examples:
"011011" contains "011000", "011001", "000011"
Be careful, "011011" also contains "000000". Actually, all _BitString
objects contain all-zero _BitString of the same length.
"""
xorbit = self ^ other
return (xorbit.count('1') == self.count('1') - other.count('1'))
def independent(self, other):
"""Check if current bitstr1 is independent of another one bitstr2.
That is to say the bitstr1.index_one() and bitstr2.index_one() have
no intersection.
Be careful, all _BitString objects are independent of all-zero _BitString
of the same length.
"""
xorbit = self ^ other
return (xorbit.count('1') == self.count('1') + other.count('1'))
def iscompatible(self, other):
"""Check if current bitstr1 is compatible with another bitstr2.
Two conditions are considered as compatible:
1. bitstr1.contain(bitstr2) or vise versa;
2. bitstr1.independent(bitstr2).
"""
return (self.contains(other) or other.contains(self) or
self.independent(other))
@classmethod
def from_bool(cls, bools):
return cls(''.join(map(str, map(int, bools))))
def strict_consensus(trees):
"""Search strict consensus tree from multiple trees.
:Parameters:
trees : iterable
iterable of trees to produce consensus tree.
"""
trees_iter = iter(trees)
first_tree = next(trees_iter)
terms = first_tree.get_terminals()
bitstr_counts, tree_count = _count_clades(
itertools.chain([first_tree], trees_iter))
# Store bitstrs for strict clades
strict_bitstrs = [bitstr for bitstr, t in bitstr_counts.items()
if t[0] == tree_count]
strict_bitstrs.sort(key=lambda bitstr: bitstr.count('1'), reverse=True)
# Create root
root = BaseTree.Clade()
if strict_bitstrs[0].count('1') == len(terms):
root.clades.extend(terms)
else:
raise ValueError('Taxons in provided trees should be consistent')
# make a bitstr to clades dict and store root clade
bitstr_clades = {strict_bitstrs[0]: root}
# create inner clades
for bitstr in strict_bitstrs[1:]:
clade_terms = [terms[i] for i in bitstr.index_one()]
clade = BaseTree.Clade()
clade.clades.extend(clade_terms)
for bs, c in bitstr_clades.items():
# check if it should be the parent of current clade
if bs.contains(bitstr):
# remove old bitstring
del bitstr_clades[bs]
# update clade childs
new_childs = [child for child in c.clades
if child not in clade_terms]
c.clades = new_childs
# set current clade as child of c
c.clades.append(clade)
# update bitstring
bs = bs ^ bitstr
# update clade
bitstr_clades[bs] = c
break
# put new clade
bitstr_clades[bitstr] = clade
return BaseTree.Tree(root=root)
def majority_consensus(trees, cutoff=0):
"""Search majority rule consensus tree from multiple trees.
This is a extend majority rule method, which means the you can set any
cutoff between 0 ~ 1 instead of 0.5. The default value of cutoff is 0 to
create a relaxed binary consensus tree in any condition (as long as one of
the provided trees is a binary tree). The branch length of each consensus
clade in the result consensus tree is the average length of all counts for
that clade.
:Parameters:
trees : iterable
iterable of trees to produce consensus tree.
"""
tree_iter = iter(trees)
first_tree = next(tree_iter)
terms = first_tree.get_terminals()
bitstr_counts, tree_count = _count_clades(
itertools.chain([first_tree], tree_iter))
# Sort bitstrs by descending #occurrences, then #tips, then tip order
bitstrs = sorted(bitstr_counts.keys(),
key=lambda bitstr: (bitstr_counts[bitstr][0],
bitstr.count('1'),
str(bitstr)),
reverse=True)
root = BaseTree.Clade()
if bitstrs[0].count('1') == len(terms):
root.clades.extend(terms)
else:
raise ValueError('Taxons in provided trees should be consistent')
# Make a bitstr-to-clades dict and store root clade
bitstr_clades = {bitstrs[0]: root}
# create inner clades
for bitstr in bitstrs[1:]:
# apply majority rule
count_in_trees, branch_length_sum = bitstr_counts[bitstr]
confidence = 100.0 * count_in_trees / tree_count
if confidence < cutoff * 100.0:
break
clade_terms = [terms[i] for i in bitstr.index_one()]
clade = BaseTree.Clade()
clade.clades.extend(clade_terms)
clade.confidence = confidence
clade.branch_length = branch_length_sum / count_in_trees
bsckeys = sorted(bitstr_clades, key=lambda bs: bs.count('1'),
reverse=True)
# check if current clade is compatible with previous clades and
# record it's possible parent and child clades.
compatible = True
parent_bitstr = None
child_bitstrs = [] # multiple independent childs
for bs in bsckeys:
if not bs.iscompatible(bitstr):
compatible = False
break
# assign the closest ancestor as its parent
# as bsckeys is sorted, it should be the last one
if bs.contains(bitstr):
parent_bitstr = bs
# assign the closest descendant as its child
# the largest and independent clades
if (bitstr.contains(bs) and bs != bitstr and
all(c.independent(bs) for c in child_bitstrs)):
child_bitstrs.append(bs)
if not compatible:
continue
if parent_bitstr:
# insert current clade; remove old bitstring
parent_clade = bitstr_clades.pop(parent_bitstr)
# update parent clade childs
parent_clade.clades = [c for c in parent_clade.clades
if c not in clade_terms]
# set current clade as child of parent_clade
parent_clade.clades.append(clade)
# update bitstring
# parent = parent ^ bitstr
# update clade
bitstr_clades[parent_bitstr] = parent_clade
if child_bitstrs:
remove_list = []
for c in child_bitstrs:
remove_list.extend(c.index_one())
child_clade = bitstr_clades[c]
parent_clade.clades.remove(child_clade)
clade.clades.append(child_clade)
remove_terms = [terms[i] for i in remove_list]
clade.clades = [c for c in clade.clades if c not in remove_terms]
# put new clade
bitstr_clades[bitstr] = clade
if ((len(bitstr_clades) == len(terms) - 1) or
(len(bitstr_clades) == len(terms) - 2 and len(root.clades) == 3)):
break
return BaseTree.Tree(root=root)
def adam_consensus(trees):
"""Search Adam Consensus tree from multiple trees
:Parameters:
trees : list
list of trees to produce consensus tree.
"""
clades = [tree.root for tree in trees]
return BaseTree.Tree(root=_part(clades), rooted=True)
def _part(clades):
"""recursive function of adam consensus algorithm"""
new_clade = None
terms = clades[0].get_terminals()
term_names = [term.name for term in terms]
if len(terms) == 1 or len(terms) == 2:
new_clade = clades[0]
else:
bitstrs = set([_BitString('1' * len(terms))])
for clade in clades:
for child in clade.clades:
bitstr = _clade_to_bitstr(child, term_names)
to_remove = set()
to_add = set()
for bs in bitstrs:
if bs == bitstr:
continue
elif bs.contains(bitstr):
to_add.add(bitstr)
to_add.add(bs ^ bitstr)
to_remove.add(bs)
elif bitstr.contains(bs):
to_add.add(bs ^ bitstr)
elif not bs.independent(bitstr):
to_add.add(bs & bitstr)
to_add.add(bs & bitstr ^ bitstr)
to_add.add(bs & bitstr ^ bs)
to_remove.add(bs)
# bitstrs = bitstrs | to_add
bitstrs ^= to_remove
if to_add:
for ta in sorted(to_add, key=lambda bs: bs.count('1')):
independent = True
for bs in bitstrs:
if not ta.independent(bs):
independent = False
break
if independent:
bitstrs.add(ta)
new_clade = BaseTree.Clade()
for bitstr in sorted(bitstrs):
indices = bitstr.index_one()
if len(indices) == 1:
new_clade.clades.append(terms[indices[0]])
elif len(indices) == 2:
bifur_clade = BaseTree.Clade()
bifur_clade.clades.append(terms[indices[0]])
bifur_clade.clades.append(terms[indices[1]])
new_clade.clades.append(bifur_clade)
elif len(indices) > 2:
part_names = [term_names[i] for i in indices]
next_clades = []
for clade in clades:
next_clades.append(_sub_clade(clade, part_names))
# next_clades = [clade.common_ancestor([clade.find_any(name=name) for name in part_names]) for clade in clades]
new_clade.clades.append(_part(next_clades))
return new_clade
def _sub_clade(clade, term_names):
"""extract a compatible subclade that only contains the given terminal names"""
term_clades = [clade.find_any(name) for name in term_names]
sub_clade = clade.common_ancestor(term_clades)
if len(term_names) != sub_clade.count_terminals():
temp_clade = BaseTree.Clade()
temp_clade.clades.extend(term_clades)
for c in sub_clade.find_clades(terminal=False, order="preorder"):
if c == sub_clade.root:
continue
childs = set(c.find_clades(terminal=True)) & set(term_clades)
if childs:
for tc in temp_clade.find_clades(terminal=False,
order="preorder"):
tc_childs = set(tc.clades)
tc_new_clades = tc_childs - childs
if childs.issubset(tc_childs) and tc_new_clades:
tc.clades = list(tc_new_clades)
child_clade = BaseTree.Clade()
child_clade.clades.extend(list(childs))
tc.clades.append(child_clade)
sub_clade = temp_clade
return sub_clade
def _count_clades(trees):
"""Count distinct clades (different sets of terminal names) in the trees.
Return a tuple first a dict of bitstring (representing clade) and a tuple of its count of
occurrences and sum of branch length for that clade, second the number of trees processed.
:Parameters:
trees : iterable
An iterable that returns the trees to count
"""
bitstrs = {}
tree_count = 0
for tree in trees:
tree_count += 1
clade_bitstrs = _tree_to_bitstrs(tree)
for clade in tree.find_clades(terminal=False):
bitstr = clade_bitstrs[clade]
if bitstr in bitstrs:
count, sum_bl = bitstrs[bitstr]
count += 1
sum_bl += clade.branch_length or 0
bitstrs[bitstr] = (count, sum_bl)
else:
bitstrs[bitstr] = (1, clade.branch_length or 0)
return bitstrs, tree_count
def get_support(target_tree, trees, len_trees=None):
"""Calculate branch support for a target tree given bootstrap replicate trees.
:Parameters:
target_tree : Tree
tree to calculate branch support for.
trees : iterable
iterable of trees used to calculate branch support.
len_trees : int
optional count of replicates in trees. len_trees must be provided
when len(trees) is not a valid operation.
"""
term_names = sorted(term.name
for term in target_tree.find_clades(terminal=True))
bitstrs = {}
size = len_trees
if size is None:
try:
size = len(trees)
except TypeError:
raise TypeError("Trees does not support len(trees), "
"you must provide the number of replicates in trees "
"as the optional parameter len_trees.")
for clade in target_tree.find_clades(terminal=False):
bitstr = _clade_to_bitstr(clade, term_names)
bitstrs[bitstr] = (clade, 0)
for tree in trees:
for clade in tree.find_clades(terminal=False):
bitstr = _clade_to_bitstr(clade, term_names)
if bitstr in bitstrs:
c, t = bitstrs[bitstr]
c.confidence = (t + 1) * 100.0 / size
bitstrs[bitstr] = (c, t + 1)
return target_tree
def bootstrap(msa, times):
"""Generate bootstrap replicates from a multiple sequence alignment object
:Parameters:
msa : MultipleSeqAlignment
multiple sequence alignment to generate replicates.
times : int
number of bootstrap times.
"""
length = len(msa[0])
i = 0
while i < times:
i += 1
item = None
for j in range(length):
col = random.randint(0, length - 1)
if not item:
item = msa[:, col:col + 1]
else:
item += msa[:, col:col + 1]
yield item
def bootstrap_trees(msa, times, tree_constructor):
"""Generate bootstrap replicate trees from a multiple sequence alignment.
:Parameters:
msa : MultipleSeqAlignment
multiple sequence alignment to generate replicates.
times : int
number of bootstrap times.
tree_constructor : TreeConstructor
tree constructor to be used to build trees.
"""
msas = bootstrap(msa, times)
for aln in msas:
tree = tree_constructor.build_tree(aln)
yield tree
def bootstrap_consensus(msa, times, tree_constructor, consensus):
"""Consensus tree of a series of bootstrap trees for a multiple sequence alignment
:Parameters:
msa : MultipleSeqAlignment
Multiple sequence alignment to generate replicates.
times : int
Number of bootstrap times.
tree_constructor : TreeConstructor
Tree constructor to be used to build trees.
consensus : function
Consensus method in this module: `strict_consensus`,
`majority_consensus`, `adam_consensus`.
"""
trees = bootstrap_trees(msa, times, tree_constructor)
tree = consensus(list(trees))
return tree
def _clade_to_bitstr(clade, tree_term_names):
"""Create a BitString representing a clade, given ordered tree taxon names."""
clade_term_names = set(term.name for term in
clade.find_clades(terminal=True))
return _BitString.from_bool((name in clade_term_names)
for name in tree_term_names)
def _tree_to_bitstrs(tree):
"""Create a dict of a tree's clades to corresponding BitStrings."""
clades_bitstrs = {}
term_names = [term.name for term in tree.find_clades(terminal=True)]
for clade in tree.find_clades(terminal=False):
bitstr = _clade_to_bitstr(clade, term_names)
clades_bitstrs[clade] = bitstr
return clades_bitstrs
def _bitstring_topology(tree):
"""Generates a branch length dict for a tree, keyed by BitStrings.
Create a dict of all clades' BitStrings to the corresponding branch
lengths (rounded to 5 decimal places)."""
bitstrs = {}
for clade, bitstr in _tree_to_bitstrs(tree).items():
bitstrs[bitstr] = round(clade.branch_length or 0.0, 5)
return bitstrs
def _equal_topology(tree1, tree2):
"""Are two trees are equal in terms of topology and branch lengths.
(Branch lengths checked to 5 decimal places.)
"""
term_names1 = set(term.name for term in tree1.find_clades(terminal=True))
term_names2 = set(term.name for term in tree2.find_clades(terminal=True))
return ((term_names1 == term_names2) and
(_bitstring_topology(tree1) == _bitstring_topology(tree2)))
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Phylo/Consensus.py | Python | gpl-2.0 | 23,804 | [
"Biopython"
] | 1a4562934f07d59c76ddf9112f80f204587b38fe93fe4c231d10e00c92283fb2 |
bl_info = {
"name": "Caffe Nodes",
"category": "Object",
}
import subprocess
import bpy
from bpy.types import NodeTree, Node, NodeSocket
def calcsize(self, context, axis='x'):
'''NOTE - this function works out the dimensions of an image by the time it has reached a certain layer.
It traverses all the layers, builds up several lists about the properties of each layer, then computes the
size up to a given layer.'''
x = 0.0
node = self
try:
node.inputs[0].links[0].from_node
except IndexError:
return 0
# These are the lists to be populated
kernelsizes = []
strides = []
paddings = []
offsets = []
fcsizes = []
reversals = []
passes = []
counter = 0
while 1 == 1:
if node.bl_idname in ["ConvNodeType", "PoolNodeType", "DeConvNodeType"]:
if node.square_kernel:
kernelsizes.extend([node.kernel_size])
elif axis == 'x':
kernelsizes.extend([node.kernel_w])
elif axis == 'y':
kernelsizes.extend([node.kernel_h])
else:
raise RuntimeError
if node.square_stride:
strides.extend([node.stride])
elif axis == 'x':
strides.extend([node.stride_w])
elif axis == 'y':
strides.extend([node.stride_h])
else:
raise RuntimeError
if node.square_padding:
paddings.extend([node.pad])
elif axis == 'x':
paddings.extend([node.pad_w])
elif axis == 'y':
paddings.extend([node.pad_h])
else:
raise RuntimeError
offsets.extend([0])
fcsizes.extend([0])
passes.extend([0])
if node.bl_idname == "DeConvNodeType":
reversals.extend([1])
else:
reversals.extend([0])
node = node.inputs[0].links[0].from_node
elif node.bl_idname == "FCNodeType":
kernelsizes.extend([0])
strides.extend([0])
paddings.extend([0])
offsets.extend([0])
passes.extend([0])
reversals.extend([0])
fcsizes.extend([node.num_output])
node = node.inputs[0].links[0].from_node
elif node.bl_idname == "DataNodeType":
# When the data node is reached, we must be at the back of the nodetree, so start to work forwards
square = 0
if node.db_type == 'ImageData':
if node.new_height == node.new_width:
square = 1
h = node.new_height
w = node.new_width
if node.db_type != 'ImageData':
if node.height == node.width:
square = 1
h = node.height
w = node.width
if square:
x = float(w)
elif axis == 'x':
x = float(w)
else:
x = float(h)
# work forwards
numofnodes = len(passes)
for node in range(numofnodes):
# - 1 as starts from 0
node = (numofnodes - 1) - node
padding = paddings[node]
stride = strides[node]
ksize = kernelsizes[node]
offset = offsets[node]
reversal = reversals[node]
if passes[node] == 0:
if fcsizes[node] == 0:
if reversal == 0:
#########################
x = ((x + (2 * padding) - ksize) / stride + 1 - offset)
###################
else:
x = (x * stride - stride) + ksize - 2 * padding
else:
x = fcsizes[node]
break
elif node.bl_idname not in ["DataNodeType", "FCNodeType", "DeConvNodeType", "ConvNodeType", "PoolNodeType"]:
kernelsizes.extend([0])
strides.extend([0])
paddings.extend([0])
offsets.extend([0])
reversals.extend([0])
fcsizes.extend([0])
passes.extend([1])
node = node.inputs[0].links[0].from_node
counter += 1
if counter > 1000:
x = 0
break
return str(round(x, 2))
############################## Function for determining number of gpus
def getgpus():
command = ['nvidia-smi', '-L']
try:
proc = subprocess.Popen(command, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
except OSError:
return 'Error'
lines = []
while proc.poll() is None: # while alive
line = proc.stdout.readline()
if line:
# Process output here
lines.append(line)
return len(lines)
##################################
# Derived from the NodeTree base type, similar to Menu, Operator, Panel, etc.
class CaffeTree(NodeTree):
# Description string
'''A custom node tree type that will show up in the node editor header'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'CaffeNodeTree'
# Label for nice name display
bl_label = 'Caffe Node Tree'
bl_icon = 'NODETREE'
# Custom socket type
class ImageSocket(NodeSocket):
# Description string
'''Blob socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ImageSocketType'
# Label for nice name display
bl_label = 'Image Socket'
# Enum items list
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label(text)
# Socket color
def draw_color(self, context, node):
return (0.0, 1.0, 1.0, 0.5)
class OutputSocket(NodeSocket):
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'OutputSocketType'
# Label for nice name display
bl_label = 'Output Socket'
# Enum items list
output_name = bpy.props.StringProperty(name='')
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label("Optional name")
layout.prop(self, "output_name")
# Socket color
def draw_color(self, context, node):
return (0.0, 1.0, 1.0, 0.5)
class InPlaceOutputSocket(NodeSocket):
# Description string
'''Custom node socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'InPlaceOutputSocketType'
# Label for nice name display
bl_label = 'In Place Output Socket'
# Enum items list
output_name = bpy.props.StringProperty(name='', default='')
def draw(self, context, layout, node, text):
layout.label(text)
# Socket color
def draw_color(self, context, node):
return (0.0, 1.0, 1.0, 0.5)
class LabelSocket(NodeSocket):
# Description string
'''Label socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'LabelSocketType'
# Label for nice name display
bl_label = 'Label Socket'
# Enum items list
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label(text)
# Socket color
def draw_color(self, context, node):
return (0.5, 1.0, 0.2, 0.5)
class LossSocket(NodeSocket):
# Description string
'''Loss socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'LossSocketType'
# Label for nice name display
bl_label = 'Loss Socket'
# Enum items list
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label(text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.3, 1.0, 0.5)
class NAFlatSocket(NodeSocket):
# Description string
'''NAFlat socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'NAFlatSocketType'
# Label for nice name display
bl_label = 'Linear Flat Socket'
# Enum items list
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label(text)
# Socket color
def draw_color(self, context, node):
return (1.0, 0.2, 0.2, 0.5)
class AFlatSocket(NodeSocket):
# Description string
'''AFlat socket type'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'AFlatSocketType'
# Label for nice name display
bl_label = 'Non linear Flat Socket'
# Enum items list
# Optional function for drawing the socket input value
def draw(self, context, layout, node, text):
layout.label(text)
# Socket color
def draw_color(self, context, node):
return (0.0, 0.8, 0.8, 0.5)
class params_p_gw(bpy.types.PropertyGroup):
name = bpy.props.StringProperty(name='Shared name')
lr_mult = bpy.props.FloatProperty(default=1.0)
decay_mult = bpy.props.FloatProperty(default=1.0)
def draw(self, context, layout):
# layout.prop(self, "name")
layout.prop(self, "lr_mult")
layout.prop(self, "decay_mult")
class params_p_gb(bpy.types.PropertyGroup):
name = bpy.props.StringProperty(name='Shared name')
lr_mult = bpy.props.FloatProperty(default=2.0)
decay_mult = bpy.props.FloatProperty(default=0.0)
def draw(self, context, layout):
# layout.prop(self, "name")
layout.prop(self, "lr_mult")
layout.prop(self, "decay_mult")
class CaffeTreeNode:
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'CaffeNodeTree'
extra_params = bpy.props.BoolProperty(name='Extra Parameters', default=False)
weight_params = bpy.props.PointerProperty(type=params_p_gw)
bias_params = bpy.props.PointerProperty(type=params_p_gb)
phases = [("TRAIN", "TRAIN", "Train only"),
("TEST", "TEST", "Test only"),
("BOTH", "BOTH", "Both")]
include_in = bpy.props.EnumProperty(items=phases, default="BOTH")
def draw_include_in(self, layout):
layout.prop(self, "include_in")
def draw_extra_params(self, context, layout):
layout.prop(self, "extra_params")
if self.extra_params:
layout.label("Weight Params")
self.weight_params.draw(context, layout)
layout.label("Bias Params")
self.bias_params.draw(context, layout)
class DataNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A data node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'DataNodeType'
# Label for nice name display
bl_label = 'Data input'
# Icon identifier
bl_icon = 'SOUND'
DBs = [
("LMDB", "LMDB", "Lmdb database"),
("LEVELDB", "LEVELDB", "LevelDB database"),
("ImageData", "ImageData", "Image files"),
("HDF5Data", "HDF5Data", "HDF5 Data")
]
Phases = [
("TRAINANDTEST", "TRAINANDTEST", "Train and Test"),
("TRAIN", "TRAIN", "Train"),
("TEST", "TEST", "Test")
]
# === Custom Properties ===
db_type = bpy.props.EnumProperty(name="Database type", description="Type of Data", items=DBs, default='HDF5Data')
train_batch_size = bpy.props.IntProperty(min=1, default=100)
test_batch_size = bpy.props.IntProperty(min=1, default=100)
train_path = bpy.props.StringProperty(
name="Train Data Path",
default="",
description="Get the path to the data",
subtype='DIR_PATH'
)
test_path = bpy.props.StringProperty(
name="Test Data Path",
default="",
description="Get the path to the data",
subtype='DIR_PATH'
)
train_data = bpy.props.StringProperty(
name="Train Data File",
default="",
description="Get the path to the data",
subtype='FILE_PATH'
)
test_data = bpy.props.StringProperty(
name="Test Data File",
default="",
description="Get the path to the data",
subtype='FILE_PATH'
)
# Transformation params
# include_in = bpy.props.EnumProperty(name="Include in", description="Phases to include in",items=Phases,default='TRAINANDTEST')
scale = bpy.props.FloatProperty(default=1.0, min=0)
mirror = bpy.props.BoolProperty(name='Random Mirror', default=False)
use_mean_file = bpy.props.BoolProperty(name='Use mean file', default=False)
mean_file = bpy.props.StringProperty(
name="Mean File Path",
default="",
description="Mean file location",
subtype='FILE_PATH'
)
# TODO: Add Mean Value and random crop
# Image data params
new_height = bpy.props.IntProperty(name="New image height", min=0, default=0, soft_max=1000)
new_width = bpy.props.IntProperty(name="New image width", min=0, default=0, soft_max=1000)
height = bpy.props.IntProperty(name="Image height", min=0, default=0, soft_max=1000)
width = bpy.props.IntProperty(name="Image width", min=0, default=0, soft_max=1000)
channels = bpy.props.IntProperty(name="Image Channels", min=1, default=1, soft_max=5)
is_color = bpy.props.BoolProperty(name="Is color image", default=True)
# For Image data + HDF5 data
shuffle = bpy.props.BoolProperty(name='Shuffle', default=False)
# For Data + Image data
rand_skip = bpy.props.IntProperty(name="Random skip", min=0, default=0, soft_max=1000)
# TODO: Add non supervised property
# === Optional Functions ===
def init(self, context):
self.outputs.new('OutputSocketType', "Image Stack")
self.outputs.new('OutputSocketType', "Label")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "db_type")
layout.prop(self, "include_in")
if self.db_type in ('ImageData', 'HDF5Data'):
if self.include_in != 'TEST':
layout.prop(self, "train_data")
if self.include_in != 'TRAIN':
layout.prop(self, "test_data")
else:
if self.include_in != 'TEST':
layout.prop(self, "train_path")
if self.include_in != 'TRAIN':
layout.prop(self, "test_path")
if self.include_in != 'TEST':
layout.prop(self, "train_batch_size")
if self.include_in != 'TRAIN':
layout.prop(self, "test_batch_size")
if self.db_type in ('ImageData', 'LMDB', 'LEVELDB'):
layout.label("Transformation Parameters")
layout.prop(self, "scale")
layout.prop(self, "mirror")
layout.prop(self, "use_mean_file")
if self.use_mean_file:
layout.prop(self, "mean_file")
layout.label("Special Parameters")
if self.db_type == 'ImageData':
layout.prop(self, "shuffle")
layout.prop(self, "new_height")
layout.prop(self, "new_width")
layout.prop(self, "channels")
layout.prop(self, "is_color")
layout.prop(self, "rand_skip")
elif self.db_type == 'HDF5Data':
layout.prop(self, "shuffle")
layout.prop(self, "height")
layout.prop(self, "width")
layout.prop(self, "channels")
else:
layout.prop(self, "rand_skip")
layout.prop(self, "height")
layout.prop(self, "width")
layout.prop(self, "channels")
def draw_label(self):
return "Data Node"
class filler_p_g(bpy.types.PropertyGroup):
types = [("constant", "constant", "Constant val"),
("uniform", "uniform", "Uniform dist"),
("gaussian", "gaussian", "Gaussian dist"),
("positive_unitball", "positive_unitball", "Positive unit ball dist"),
("xavier", "xavier", "Xavier dist"),
("msra", "msra", "MSRA dist"),
("bilinear", "bilinear", "Bi-linear upsample weights")]
vnormtypes = [("FAN_IN", "FAN_IN", "Constant val"),
("FAN_OUT", "FAN_OUT", "Uniform dist"),
("AVERAGE", "AVERAGE", "Gaussian dist")]
type = bpy.props.EnumProperty(name='Type', items=types, default='xavier')
value = bpy.props.FloatProperty(default=0.0, soft_max=1000.0, soft_min=-1000.0)
min = bpy.props.FloatProperty(default=0.0, soft_max=1000.0, soft_min=-1000.0)
max = bpy.props.FloatProperty(default=1.0, soft_max=1000.0, soft_min=-1000.0)
mean = bpy.props.FloatProperty(default=0.0, soft_max=1000.0, soft_min=-1000.0)
std = bpy.props.FloatProperty(default=1.0, soft_max=1000.0, soft_min=-1000.0)
variance_norm = bpy.props.EnumProperty(name='Weight variance norm', default='FAN_IN', items=vnormtypes)
is_sparse = bpy.props.BoolProperty(name="Use Sparsity", default=False)
sparse = bpy.props.IntProperty(default=100, min=1)
def draw(self, context, layout):
layout.prop(self, "type")
if self.type == 'constant':
layout.prop(self, "value")
elif self.type in ('xavier', 'msra'):
layout.prop(self, "variance_norm")
elif self.type == 'gaussian':
layout.prop(self, "mean")
layout.prop(self, "std")
layout.prop(self, "is_sparse")
if self.is_sparse:
layout.prop(self, "sparse")
elif self.type == 'uniform':
layout.prop(self, "min")
layout.prop(self, "max")
class PoolNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A pooling node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'PoolNodeType'
# Label for nice name display
bl_label = 'Pooling Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Pooling'
# === Custom Properties ===
modes = [
("MAX", "MAX", "Max pooling"),
("AVE", "AVE", "Average pooling"),
("STOCHASTIC", "SGD", "Stochastic pooling"),
]
square_padding = bpy.props.BoolProperty(name="Equal x,y padding", default=True)
pad = bpy.props.IntProperty(name="Padding", default=0, min=0, soft_max=5)
pad_h = bpy.props.IntProperty(name="Padding height", default=0, min=0, soft_max=5)
pad_w = bpy.props.IntProperty(name="Padding width", default=0, min=0, soft_max=5)
square_kernel = bpy.props.BoolProperty(name="Equal x,y kernel", default=True)
kernel_size = bpy.props.IntProperty(name="Kernel size", default=5, min=1, soft_max=25)
kernel_h = bpy.props.IntProperty(name="Kernel height", default=5, min=1, soft_max=25)
kernel_w = bpy.props.IntProperty(name="Kernel width", default=5, min=1, soft_max=25)
# TODO: Maybe add group
square_stride = bpy.props.BoolProperty(name="Equal x,y stride", default=True)
stride = bpy.props.IntProperty(name="Stride", default=1, min=1, soft_max=5)
stride_h = bpy.props.IntProperty(name="Stride height", default=1, min=1, soft_max=5)
stride_w = bpy.props.IntProperty(name="Stride width", default=1, min=1, soft_max=5)
mode = bpy.props.EnumProperty(name='Mode', default='MAX', items=modes)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Output image")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
try:
if calcsize(self, context, axis='x') != calcsize(self, context, axis='y'):
layout.label("image x,y output is %s,%s pixels" %
(calcsize(self, context, axis='x'), calcsize(self, context, axis='y')))
else:
layout.label("image output is %s pixels" % calcsize(self, context, axis='x'))
except IndexError:
pass
if self.square_padding:
layout.prop(self, "pad")
else:
layout.prop(self, "pad_h")
layout.prop(self, "pad_w")
if self.square_kernel:
layout.prop(self, "kernel_size")
else:
layout.prop(self, "kernel_h")
layout.prop(self, "kernel_w")
if self.square_stride:
layout.prop(self, "stride")
else:
layout.prop(self, "stride_h")
layout.prop(self, "stride_w")
layout.prop(self, "square_padding")
layout.prop(self, "square_kernel")
layout.prop(self, "square_stride")
layout.prop(self, "mode")
class EltwiseNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''An element-wise node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'EltwiseNodeType'
# Label for nice name display
bl_label = 'Element-wise Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Eltwise'
# === Custom Properties ===
eltwiseOps = [
("PROD", "PROD", "Eltwise prod: c(i) -> a(i)*b(i)"),
("SUM", "SUM", "Eltwise sum: c(i) -> a(i)+b(i)"),
("MAX", "MAX", "Eltwise max: c(i) -> max [a(i),b(i)]"),
]
coeff = bpy.props.FloatProperty(default=2.0, soft_max=10.0, min=0)
stable_prod_grad = bpy.props.BoolProperty(name='Stable(slower) gradient', default=1)
operation = bpy.props.EnumProperty(name='Operation', default='SUM', items=eltwiseOps)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input blob A")
self.inputs.new('ImageSocketType', "Input blob B")
self.outputs.new('OutputSocketType', "Output blob C")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "operation")
if self.operation == 'PROD':
layout.prop(self, "stable_prod_grad")
elif self.operation == 'SUM':
layout.prop(self, "coeff")
class ExpNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''An exponential node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ExpNodeType'
# Label for nice name display
bl_label = 'Exponential Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Exp'
# === Custom Properties ===
base = bpy.props.FloatProperty(default=-1.0, soft_max=10.0, min=0)
scale = bpy.props.FloatProperty(default=1.0, soft_max=10.0, min=0)
shift = bpy.props.FloatProperty(default=0.0, soft_max=10.0, min=-10)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input blob")
self.outputs.new('OutputSocketType', "Output blob")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "base")
layout.prop(self, "scale")
layout.prop(self, "shift")
self.draw_extra_params(context, layout)
class MVNNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Mean variance normalization node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'MVNNodeType'
# Label for nice name display
bl_label = 'MVN Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'MVN'
# === Custom Properties ===
normalize_variance = bpy.props.BoolProperty(default=True)
across_channels = bpy.props.BoolProperty(default=False)
eps = bpy.props.FloatProperty(default=1e-9, soft_max=1.0, min=1e-20)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input blob")
self.outputs.new('OutputSocketType', "Output blob")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "normalize_variance")
layout.prop(self, "across_channels")
layout.prop(self, "eps")
self.draw_extra_params(context, layout)
class BatchNormNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Batch normalization node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'BatchNormNodeType'
# Label for nice name display
bl_label = 'Batch Norm Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'BatchNorm'
# === Custom Properties ===
use_global_stats = bpy.props.BoolProperty(default=True)
eps = bpy.props.FloatProperty(default=1e-5, soft_max=1.0, min=1e-20)
moving_average_fraction = bpy.props.FloatProperty(default=.999, soft_max=1.0, min=.5)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input blob")
self.outputs.new('OutputSocketType', "Output blob")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
#TODO: Find a prototxt which shows how eps and mav are set
layout.label('''eps, mav, and use global average are all default.''')
layout.label('They will have sliders implemented when I can find')
layout.label('an example prototxt with them not default.')
# layout.prop(self, "use_global_stats")
# layout.prop(self, "moving_average_fraction")
# layout.prop(self, "eps")
class ConvNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ConvNodeType'
# Label for nice name display
bl_label = 'Convolution Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = "Convolution"
# === Custom Properties ===
num_output = bpy.props.IntProperty(name="Number of outputs", default=20, min=1, soft_max=300)
bias_term = bpy.props.BoolProperty(name='Include Bias term', default=True)
square_padding = bpy.props.BoolProperty(name="Equal x,y padding", default=True)
pad = bpy.props.IntProperty(name="Padding", default=0, min=0, soft_max=5)
pad_h = bpy.props.IntProperty(name="Padding height", default=0, min=0, soft_max=5)
pad_w = bpy.props.IntProperty(name="Padding width", default=0, min=0, soft_max=5)
square_kernel = bpy.props.BoolProperty(name="Equal x,y kernel", default=True)
kernel_size = bpy.props.IntProperty(name="Kernel size", default=5, min=1, soft_max=25)
kernel_h = bpy.props.IntProperty(name="Kernel height", default=5, min=1, soft_max=25)
kernel_w = bpy.props.IntProperty(name="Kernel width", default=5, min=1, soft_max=25)
# TODO: Maybe add group
square_stride = bpy.props.BoolProperty(name="Equal x,y stride", default=True)
stride = bpy.props.IntProperty(name="Stride", default=1, min=1, soft_max=5)
stride_h = bpy.props.IntProperty(name="Stride height", default=1, min=1, soft_max=5)
stride_w = bpy.props.IntProperty(name="Stride width", default=1, min=1, soft_max=5)
weight_filler = bpy.props.PointerProperty(type=filler_p_g)
bias_filler = bpy.props.PointerProperty(type=filler_p_g)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Output image")
self.color = [1, 0, 1]
self.use_custom_color = True
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
# TODO: Finish calcsize
try:
if calcsize(self, context, axis='x') != calcsize(self, context, axis='y'):
layout.label("image x,y output is %s,%s pixels" %
(calcsize(self, context, axis='x'), calcsize(self, context, axis='y')))
else:
layout.label("image output is %s pixels" % calcsize(self, context, axis='x'))
except IndexError:
pass
layout.prop(self, "num_output")
layout.prop(self, "bias_term")
if self.square_padding:
layout.prop(self, "pad")
else:
layout.prop(self, "pad_h")
layout.prop(self, "pad_w")
if self.square_kernel:
layout.prop(self, "kernel_size")
else:
layout.prop(self, "kernel_h")
layout.prop(self, "kernel_w")
if self.square_stride:
layout.prop(self, "stride")
else:
layout.prop(self, "stride_h")
layout.prop(self, "stride_w")
layout.prop(self, "square_padding")
layout.prop(self, "square_kernel")
layout.prop(self, "square_stride")
layout.label("Weight Filler")
self.weight_filler.draw(context, layout)
layout.label("bias Filler")
self.bias_filler.draw(context, layout)
self.draw_extra_params(context, layout)
class DeConvNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A DeConvolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'DeConvNodeType'
# Label for nice name display
bl_label = 'DeConvolution Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = "Deconvolution"
# === Custom Properties ===
num_output = bpy.props.IntProperty(name="Number of outputs", default=20, min=1, soft_max=300)
bias_term = bpy.props.BoolProperty(name='Include Bias term', default=True)
square_padding = bpy.props.BoolProperty(name="Equal x,y padding", default=True)
pad = bpy.props.IntProperty(name="Padding", default=0, min=0, soft_max=5)
pad_h = bpy.props.IntProperty(name="Padding height", default=0, min=0, soft_max=5)
pad_w = bpy.props.IntProperty(name="Padding width", default=0, min=0, soft_max=5)
square_kernel = bpy.props.BoolProperty(name="Equal x,y kernel", default=True)
kernel_size = bpy.props.IntProperty(name="Kernel size", default=5, min=1, soft_max=25)
kernel_h = bpy.props.IntProperty(name="Kernel height", default=5, min=1, soft_max=25)
kernel_w = bpy.props.IntProperty(name="Kernel width", default=5, min=1, soft_max=25)
# TODO: Maybe add group
square_stride = bpy.props.BoolProperty(name="Equal x,y stride", default=True)
stride = bpy.props.IntProperty(name="Stride", default=1, min=1, soft_max=5)
stride_h = bpy.props.IntProperty(name="Stride height", default=1, min=1, soft_max=5)
stride_w = bpy.props.IntProperty(name="Stride width", default=1, min=1, soft_max=5)
weight_filler = bpy.props.PointerProperty(type=filler_p_g)
bias_filler = bpy.props.PointerProperty(type=filler_p_g)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Output image")
self.color = [1, 0, 1]
self.use_custom_color = True
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
# TODO: Finish calcsize
try:
if calcsize(self, context, axis='x') != calcsize(self, context, axis='y'):
layout.label("image x,y output is %s,%s pixels" %
(calcsize(self, context, axis='x'), calcsize(self, context, axis='y')))
else:
layout.label("image output is %s pixels" % calcsize(self, context, axis='x'))
except IndexError:
pass
layout.prop(self, "num_output")
layout.prop(self, "bias_term")
layout.prop(self, "square_padding")
if self.square_padding:
layout.prop(self, "pad")
else:
layout.prop(self, "pad_h")
layout.prop(self, "pad_w")
layout.prop(self, "square_kernel")
if self.square_kernel:
layout.prop(self, "kernel_size")
else:
layout.prop(self, "kernel_h")
layout.prop(self, "kernel_w")
layout.prop(self, "square_stride")
if self.square_stride:
layout.prop(self, "stride")
else:
layout.prop(self, "stride_h")
layout.prop(self, "stride_w")
layout.label("Weight Filler")
self.weight_filler.draw(context, layout)
layout.label("bias Filler")
self.bias_filler.draw(context, layout)
self.draw_extra_params(context, layout)
class FCNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''An inner product node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'FCNodeType'
# Label for nice name display
bl_label = 'Fully connected Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'InnerProduct'
# === Custom Properties ===
num_output = bpy.props.IntProperty(name="Number of outputs", default=10, min=1)
bias_term = bpy.props.BoolProperty(name='Include Bias term', default=True)
weight_filler = bpy.props.PointerProperty(type=filler_p_g)
bias_filler = bpy.props.PointerProperty(type=filler_p_g)
specax = bpy.props.BoolProperty(name="Specify Axis", default=0)
axis = bpy.props.IntProperty(name="Starting axis", default=1)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Output Activations")
self.color = [1, 0, 0]
self.use_custom_color = True
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
try:
layout.label("Network is now %s neurons" % calcsize(self, context))
except IndexError:
pass
layout.prop(self, "num_output")
layout.prop(self, "bias_term")
layout.prop(self, "specax")
if self.specax:
layout.prop(self, "axis")
layout.label("Weight Filler")
self.weight_filler.draw(context, layout)
layout.label("bias Filler")
self.bias_filler.draw(context, layout)
self.draw_extra_params(context, layout)
class FlattenNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Flatten layer node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'FlattenNodeType'
# Label for nice name display
bl_label = 'Flatten Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Flatten'
# === Custom Properties ===
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Flat output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Flatten")
self.draw_extra_params(context, layout)
class SilenceNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Silence node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'SilenceNodeType'
# Label for nice name display
bl_label = 'Silence Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Silence'
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Silence")
class LRNNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'LRNNodeType'
# Label for nice name display
bl_label = 'LRN Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'LRN'
modes = [
("ACROSS_CHANNELS", "ACROSS_CHANNELS", "Go across Channels"),
("WITHIN_CHANNEL", "WITHIN_CHANNEL", "Go by location"),
]
# === Custom Properties ===
alpha = bpy.props.FloatProperty(default=1, min=0, soft_max=50)
beta = bpy.props.FloatProperty(default=5, min=0, soft_max=50)
size = bpy.props.IntProperty(default=5, min=1, soft_max=50)
mode = bpy.props.EnumProperty(name="Mode", default='ACROSS_CHANNELS', items=modes)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Normalized output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "alpha")
layout.prop(self, "beta")
layout.prop(self, "size")
layout.prop(self, "mode")
self.draw_extra_params(context, layout)
class ActivationNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'AcNodeType'
# Label for nice name display
bl_label = 'Activation Node'
# Icon identifier
bl_icon = 'SOUND'
modes = [
("Sigmoid", "Sigmoid", "Sigmoid"),
("TanH", "TanH", "TanH"),
]
# === Custom Properties ===
n_type = bpy.props.EnumProperty(name="Mode", default='Sigmoid', items=modes)
# === Optional Functions ===
def init(self, context):
self.inputs.new('NAFlatSocketType', "Linear input")
self.outputs.new('OutputSocketType', "Non Linear output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "n_type")
self.draw_extra_params(context, layout)
class ReLuNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A ReLU node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ReluNodeType'
# Label for nice name display
bl_label = 'ReLu Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'ReLU'
engines = [("DEFAULT", "DEFAULT", "Default"),
("CAFFE", "CAFFE", "Caffe"),
("CUDNN", "CUDNN", "CUDNN")]
# === Custom Properties ===
negative_slope = bpy.props.FloatProperty(default=0)
negslope = bpy.props.BoolProperty(default=0, name='Negative Slope')
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('InPlaceOutputSocketType', "Rectified output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "negslope")
if self.negslope:
layout.prop(self, "negative_slope")
# layout.prop(self, "engine")
self.draw_extra_params(context, layout)
class PReLuNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A PReLU node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'PReluNodeType'
# Label for nice name display
bl_label = 'PReLu Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'PReLU'
# === Custom Properties ===
channel_shared = bpy.props.BoolProperty(default=False)
filler = bpy.props.PointerProperty(type=filler_p_g)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input image")
self.outputs.new('OutputSocketType', "Rectified output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "channel_shared")
self.filler.draw(context, layout)
self.draw_extra_params(context, layout)
class SMLossNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'SMLossNodeType'
# Label for nice name display
bl_label = 'Softmax Loss Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'SoftmaxWithLoss'
w = bpy.props.FloatProperty(default=1)
def init(self, context):
self.inputs.new('NAFlatSocketType', "Input Probabilities")
self.inputs.new('LabelSocketType', "Input Label")
self.outputs.new('OutputSocketType', "Loss output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Softmax Loss")
layout.prop(self, "w")
self.draw_extra_params(context, layout)
class SCELossNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'SCELossNodeType'
# Label for nice name display
bl_label = 'SCE Loss Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'SigmoidCrossEntropyLoss'
w = bpy.props.FloatProperty(default=1)
# === Custom Properties ===
# === Optional Functions ===
def init(self, context):
self.inputs.new('NAFlatSocketType', "Input values")
self.inputs.new('AFlatSocketType', "Input values 2")
self.outputs.new('OutputSocketType', "Loss output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("SCE Loss")
layout.prop(self, "w")
self.draw_extra_params(context, layout)
class EULossNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'EULossNodeType'
# Label for nice name display
bl_label = 'EU Loss Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'EuclideanLoss'
w = bpy.props.FloatProperty(default=1)
# === Custom Properties ===
# === Optional Functions ===
def init(self, context):
self.inputs.new('AFlatSocketType', "Input values")
self.inputs.new('AFlatSocketType', "Input values 2")
self.outputs.new('OutputSocketType', "Loss output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("EU Loss")
layout.prop(self, "w")
self.draw_extra_params(context, layout)
class DropoutNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'DropoutNodeType'
# Label for nice name display
bl_label = 'Dropout Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Dropout'
# === Custom Properties ===
dropout_ratio = bpy.props.FloatProperty(default=0.5, min=0, max=1)
# === Optional Functions ===
def init(self, context):
self.inputs.new('NAFlatSocketType', "Input image")
self.outputs.new('InPlaceOutputSocketType', "Output image")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Dropout factor")
layout.prop(self, "dropout_ratio")
self.draw_extra_params(context, layout)
class ConcatNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Concatination node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ConcatNodeType'
# Label for nice name display
bl_label = 'Concatanation Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Concat'
def update_bottoms(self, context):
while len(self.inputs) < self.input_amount:
self.inputs.new('ImageSocketType', "Input%i" % (len(self.inputs) + 1))
while len(self.inputs) > self.input_amount:
self.inputs.remove(self.inputs[len(self.inputs) - 1])
input_amount = bpy.props.IntProperty(min=1, default=2, update=update_bottoms)
# === Custom Properties ===
axis = bpy.props.IntProperty(default=1)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input1")
self.inputs.new('ImageSocketType', "Input2")
self.outputs.new('OutputSocketType', "Output image")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "input_amount")
layout.prop(self, "axis")
self.draw_extra_params(context, layout)
class AccuracyNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Convolution node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'AccuracyNodeType'
# Label for nice name display
bl_label = 'Accuracy Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Accuracy'
# === Custom Properties ===
Testonly = bpy.props.BoolProperty(default=True)
# === Optional Functions ===
def init(self, context):
self.inputs.new('NAFlatSocketType', "Input class")
self.inputs.new('LabelSocketType', "Input label")
self.outputs.new('OutputSocketType', "Output Accuracy")
self.include_in = "TEST"
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.label("Tick for only testing")
layout.prop(self, "Testonly")
self.draw_include_in(layout)
self.draw_extra_params(context, layout)
class ArgMaxNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Arg Max Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ArgMaxNodeType'
# Label for nice name display
bl_label = 'Arg Max Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'ArgMax'
# === Custom Properties ===
OutMaxVal = bpy.props.BoolProperty(name='Output max value', default=False)
TopK = bpy.props.IntProperty(name='Top k', default=1, min=1, soft_max=200)
# === Optional Functions ===
def init(self, context):
self.inputs.new('LossSocketType', "Input loss")
self.outputs.new('OutputSocketType', "Output Arg Max")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "OutMaxVal")
layout.prop(self, "TopK")
self.draw_extra_params(context, layout)
class HDF5OutputNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''HDF5 Output Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'HDF5OutputNodeType'
# Label for nice name display
bl_label = 'HDF 5 Output Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'HDF5Output'
# === Custom Properties ===
filename = bpy.props.StringProperty \
(
name="HDF5 output File",
default="",
description="The path to the data file",
subtype='FILE_PATH'
)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input Image")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "filename")
class LogNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Log Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'LogNodeType'
# Label for nice name display
bl_label = 'Log Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Log'
# === Custom Properties ===
scale = bpy.props.FloatProperty(name='Scale', default=1, min=0, soft_max=200)
shift = bpy.props.FloatProperty(name='Shift', default=0, soft_min=-200, soft_max=200)
base = bpy.props.FloatProperty(name='base', default=-1, min=-1, soft_max=200)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input data")
self.outputs.new('OutputSocketType', "Output data")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "scale")
layout.prop(self, "shift")
layout.prop(self, "base")
self.draw_extra_params(context, layout)
class PowerNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Power Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'PowerNodeType'
# Label for nice name display
bl_label = 'Power Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Power'
# === Custom Properties ===
power = bpy.props.FloatProperty(name='Power', default=1)
scale = bpy.props.FloatProperty(name='Scale', default=1)
shift = bpy.props.FloatProperty(name='Shift', default=0)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input data")
self.outputs.new('OutputSocketType', "Output data")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "power")
layout.prop(self, "scale")
layout.prop(self, "shift")
self.draw_extra_params(context, layout)
class ReductionNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Reduction Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'ReductionNodeType'
# Label for nice name display
bl_label = 'Reduction Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Reduction'
ops = [("SUM", "SUM", "Sum"),
("ASUM", "ASUM", "Absolute Sum"),
("SUMSQ", "SUMSQ", "Sum of squares"),
("MEAN", "MEAN", "Mean")
]
# === Custom Properties ===
operation = bpy.props.EnumProperty(name='Operation', default='SUM', items=ops)
axis = bpy.props.IntProperty(name='Axis', default=0)
coeff = bpy.props.FloatProperty(name='Coeff', default=1)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input data")
self.outputs.new('OutputSocketType', "Output data")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "operation")
layout.prop(self, "axis")
layout.prop(self, "coeff")
self.draw_extra_params(context, layout)
class PythonLossNode(Node,CaffeTreeNode):
# === Basics ===
# Description string
'''Python Loss Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'PythonLossNodeType'
# Label for nice name display
bl_label = 'Python Loss Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Python'
# === Custom Properties ===
modulepath = bpy.props.StringProperty(
name="Module Dir",
default="",
description="Python Module Directory",
subtype='DIR_PATH'
)
module = bpy.props.StringProperty(name="Module")
layer = bpy.props.StringProperty(name='Python Layer Class')
w = bpy.props.FloatProperty(default=1)
# === Optional Functions ===
def init(self, context):
self.inputs.new('AFlatSocketType', "Input values")
self.inputs.new('AFlatSocketType', "Input values 2")
self.outputs.new('OutputSocketType', "Loss output")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "modulepath")
layout.prop(self, "module")
layout.prop(self, "layer")
layout.prop(self, "w")
class slice_point_p_g(bpy.types.PropertyGroup):
slice_point = bpy.props.IntProperty(min=0)
def draw(self, context, layout):
layout.prop(self, "slice_point")
class SliceNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''Slice Node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'SliceNodeType'
# Label for nice name display
bl_label = 'Slice Node'
# Icon identifier
bl_icon = 'SOUND'
n_type = 'Slice'
# === Custom Properties ===
axis = bpy.props.IntProperty(name='Axis', default=0)
slice_points = bpy.props.CollectionProperty(type=slice_point_p_g)
def update_slices(self, context):
while len(self.slice_points) < self.num_of_slices:
self.slice_points.add()
self.outputs.new('OutputSocketType', "Out%i" % len(self.slice_points))
while len(self.slice_points) > self.num_of_slices:
self.slice_points.remove(len(self.slice_points) - 1)
self.outputs.remove(self.outputs[len(self.slice_points) + 1])
num_of_slices = bpy.props.IntProperty(default=1, min=1, update=update_slices)
# === Optional Functions ===
def init(self, context):
self.inputs.new('ImageSocketType', "Input data")
self.outputs.new('OutputSocketType', "Out1")
self.outputs.new('OutputSocketType', "Out2")
self.slice_points.add()
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "axis")
layout.prop(self, "num_of_slices")
for slice_point in self.slice_points:
slice_point.draw(context, layout)
self.draw_extra_params(context, layout)
# // Return the current learning rate. The currently implemented learning rate
# // policies are as follows:
# // - fixed: always return base_lr.
# // - step: return base_lr * gamma ^ (floor(iter / step))
# // - exp: return base_lr * gamma ^ iter
# // - inv: return base_lr * (1 + gamma * iter) ^ (- power)
# // - multistep: similar to step but it allows non uniform steps defined by
# // stepvalue
# // - poly: the effective learning rate follows a polynomial decay, to be
# // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)
# // - sigmoid: the effective learning rate follows a sigmod decay
# // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))
# //
# // where base_lr, max_iter, gamma, step, stepvalue and power are defined
# // in the solver parameter protocol buffer, and iter is the current iteration.
class SolverNode(Node, CaffeTreeNode):
# === Basics ===
# Description string
'''A Solver node'''
# Optional identifier string. If not explicitly defined, the python class name is used.
bl_idname = 'SolverNodeType'
# Label for nice name display
bl_label = 'Solver Node'
# Icon identifier
bl_icon = 'SOUND'
lr_policies = [("fixed", "fixed", "Fixed"),
("step", "step", "Step"),
("exp", "exp", "Exponential"),
("inv", "inv", "Inverse"),
("multistep", "multistep", "Multi Step"),
("poly", "poly", "Polinomial"),
("sigmoid", "sigmoid", "Sigmoid")]
regularization_types = [("NONE", "NONE", "NONE"), ("L1", "L1", "L1"), ("L2", "L2", "L2")]
gputoggles = []
gpunum = getgpus()
gpufailed = 0
if gpunum == 'Error':
gpufailed = 1
else:
for gpu in range(gpunum):
gpu_name = 'GPU %i' % gpu
gputoggles.append((gpu_name, gpu_name, gpu_name))
# === Custom Properties ===
solvername = bpy.props.StringProperty()
test_iter = bpy.props.IntProperty(name='Test Iterations', default=100, min=1,
description="How many forward passes the test should carry out")
test_interval = bpy.props.IntProperty(name='Test Interval', default=500, min=1,
description="Carry out testing every test interval iterations")
test_compute_loss = bpy.props.BoolProperty(name='Test Compute Loss', default=False,
description="Compute loss in testing")
test_initialization = bpy.props.BoolProperty(name='Test Initialization', default=True,
description="run an initial test pass before the first iteration, ensuring memory availability and printing the starting value of the loss.")
base_lr = bpy.props.FloatProperty(name='Base Learning rate', default=0.01, min=0)
display = bpy.props.IntProperty(name='Display', default=100, min=0,
description="The number of iterations between displaying info. If display = 0, no info will be displayed")
average_loss = bpy.props.IntProperty(name='Average Loss', default=1, min=1,
description="Display the loss averaged over the last average_loss iterations")
max_iter = bpy.props.IntProperty(name='Maximum Iterations', default=50000, min=1)
iter_size = bpy.props.IntProperty(name='Iteration Size', default=1, min=1,
description="Accumulate gradients over iter_size x batch_size instances")
lr_policy = bpy.props.EnumProperty(name='Learning rate Policy', items=lr_policies, default='step')
gamma = bpy.props.FloatProperty(name='Gamma', default=0.0001, min=0)
power = bpy.props.FloatProperty(name='Power', default=0.75)
momentum = bpy.props.FloatProperty(name='Momentum', default=0.9, min=0)
momentum2 = bpy.props.FloatProperty(name='Momentum2',default = 0.999,)
weight_decay = bpy.props.FloatProperty(name='Weight Decay', default=0.0005, min=0)
regularization_type = bpy.props.EnumProperty(name='Regularization type', items=regularization_types, default='L2')
stepsize = bpy.props.IntProperty(name='Step size', default=5000, min=1)
snapshot = bpy.props.IntProperty(name='Snapshot Interval', default=0, min=0,
description="The snapshot interval. 0 for no snapshot")
snapshot_prefix = bpy.props.StringProperty \
(
name="Snapshot Path",
default="",
description="Give the path to the snapshot data",
subtype='DIR_PATH'
)
snapshot_diff = bpy.props.BoolProperty(name='Snapshot diff', default=False,
description="Whether to snapshot diff in the results or not")
# snapshot_formats = [("HDF5", "HDF5", "HDF5"), ("BINARYPROTO", "BINARYPROTO", "BINARYPROTO")]
# snapshot_format = bpy.props.EnumProperty(name='Snapshot format', items=snapshot_formats, default='BINARYPROTO')
solver_modes = [("GPU", "GPU", "GPU"), ("CPU", "CPU", "CPU")]
solver_mode = bpy.props.EnumProperty(name='Solver mode', items=solver_modes, default='GPU')
gpus = bpy.props.EnumProperty(name="GPU", description="GPU to use", items=gputoggles)
use_random_seed = bpy.props.BoolProperty(name='Use Random seed', default=False)
random_seed = bpy.props.IntProperty(name='Random seed', default=10,
description="The seed with which the Solver will initialize the Caffe random number generator")
solver_types = [("Nesterov", "Nesterov", "Nesterovs Accelerated Gradient"),
("AdaGrad", "AdaGrad", "Adaptive gradient descent"),
("SGD", "SGD", "Stochastic Gradient Descent"),
("RMSProp", "RMSProp", "RMSProp"),
("Adam","Adam","Adam"),
("AdaDelta","AdaDelta","AdaDelta")]
solver_type = bpy.props.EnumProperty(name='Solver type', items=solver_types, default='SGD')
delta = bpy.props.FloatProperty(name='Delta', default=1e-8, min=0, description="Numerical stability for AdaGrad")
RMSdecay = bpy.props.FloatProperty(name='RMS decay', default=.97, min=0, description="Numerical stability for RMSprop")
debug_info = bpy.props.BoolProperty(name='Debug info', default=False)
snapshot_after_train = bpy.props.BoolProperty(name='Snapshot after train', default=True,
description="If false, don't save a snapshot after training finishes")
config_path = bpy.props.StringProperty \
(
name="Configuration Data Path",
default="",
description="Give the path to the config data",
subtype='DIR_PATH'
)
caffe_exec = bpy.props.StringProperty \
(
name="Caffe Tools Folder",
default="",
description="Give the path to the caffe executable",
subtype='DIR_PATH'
)
# def init(self, context):
# self.inputs.new('LossSocketType', "Input Loss")
# Copy function to initialize a copied node from an existing one.
def copy(self, node):
print("Copying from node ", node)
# Free function to clean up on removal.
def free(self):
print("Removing node ", self, ", Goodbye!")
# def update(self):
# x = 0
# for input in self.inputs:
# if input.is_linked == False:
# x += 1
# if x > 1:
# self.inputs.remove(input)
# if x == 0:
# self.inputs.new('LossSocketType', "Input Loss")
# Additional buttons displayed on the node.
def draw_buttons(self, context, layout):
layout.prop(self, "solvername")
layout.prop(self, "test_iter")
layout.prop(self, "test_interval")
layout.prop(self, "test_compute_loss")
layout.prop(self, "test_initialization")
layout.prop(self, "display")
layout.prop(self, "average_loss")
layout.prop(self, "max_iter")
layout.prop(self, "iter_size")
layout.prop(self, "lr_policy")
layout.prop(self, "base_lr")
if self.lr_policy == 'step':
layout.prop(self, "gamma")
layout.prop(self, "stepsize")
elif self.lr_policy == 'exp':
layout.prop(self, "gamma")
elif self.lr_policy == 'inv':
layout.prop(self, "gamma")
layout.prop(self, "power")
elif self.lr_policy == 'multistep':
layout.label("NOT IMPLEMENTED", icon='ERROR')
elif self.lr_policy == 'poly':
layout.prop(self, "power")
elif self.lr_policy == 'sigmoid':
layout.prop(self, "gamma")
layout.prop(self, "stepsize")
layout.prop(self, "weight_decay")
layout.prop(self, "regularization_type")
layout.prop(self, "snapshot")
layout.prop(self, "snapshot_prefix")
layout.prop(self, "snapshot_diff")
# layout.prop(self, "snapshot_format")
layout.prop(self, "snapshot_after_train")
layout.prop(self, "solver_mode")
if self.solver_mode == 'GPU':
if self.gpufailed:
layout.label("WARNING: GPU NOT DETECTED", icon='ERROR')
layout.label("Check 'nvidia-smi' command can be run", icon='ERROR')
else:
layout.prop(self, "gpus")
layout.prop(self, "use_random_seed")
if self.use_random_seed:
layout.prop(self, "random_seed")
layout.prop(self, "solver_type")
if self.solver_type not in ['AdaGrad','RMSProp']:
layout.prop(self, "momentum")
if self.solver_type == 'AdaDelta':
layout.prop(self, "delta")
if self.solver_type == 'RMSProp':
layout.prop(self, "RMSdecay")
layout.prop(self, "debug_info")
layout.prop(self, "config_path")
layout.prop(self, "caffe_exec")
import nodeitems_utils
from nodeitems_utils import NodeCategory, NodeItem
# our own base class with an appropriate poll function,
# so the categories only show up in our own tree type
class CaffeNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'CaffeNodeTree'
# all categories in a list
node_categories = [
# identifier, label, items list
CaffeNodeCategory("PNODES", "Spatial, Image Nodes", items=[
# our basic node
NodeItem("PoolNodeType"),
NodeItem("ConvNodeType"),
NodeItem("DeConvNodeType"),
NodeItem("LRNNodeType"),
NodeItem("ConcatNodeType"),
NodeItem("SliceNodeType"),
NodeItem("BatchNormNodeType")
]),
CaffeNodeCategory("NNODES", "Neuron, Elementwise Nodes", items=[
# our basic node
NodeItem("MVNNodeType"),
NodeItem("ExpNodeType"),
NodeItem("EltwiseNodeType"),
NodeItem("ArgMaxNodeType"),
NodeItem("FCNodeType"),
NodeItem("FlattenNodeType"),
NodeItem("AcNodeType"),
NodeItem("ReluNodeType"),
NodeItem("PReluNodeType"),
NodeItem("DropoutNodeType"),
NodeItem("LogNodeType"),
NodeItem("PowerNodeType")
]),
CaffeNodeCategory("SNODES", "Loss Nodes", items=[
# our basic node
NodeItem("AccuracyNodeType"),
NodeItem("EULossNodeType"),
NodeItem("SCELossNodeType"),
NodeItem("SMLossNodeType"),
NodeItem("ReductionNodeType"),
NodeItem("PythonLossNodeType")
]),
CaffeNodeCategory("DNODES", "Solver, data Nodes", items=[
# our basic node
NodeItem("SolverNodeType"),
NodeItem("DataNodeType"),
NodeItem("HDF5OutputNodeType")
]),
CaffeNodeCategory("MNODES", "Misc Nodes", items=[
# our basic node
NodeItem("SilenceNodeType")
]),
]
def register():
bpy.utils.register_class(filler_p_g)
bpy.utils.register_class(params_p_gw)
bpy.utils.register_class(params_p_gb)
bpy.utils.register_class(slice_point_p_g)
bpy.utils.register_class(OutputSocket)
bpy.utils.register_class(CaffeTree)
bpy.utils.register_class(InPlaceOutputSocket)
bpy.utils.register_class(DataNode)
bpy.utils.register_class(DropoutNode)
bpy.utils.register_class(PoolNode)
bpy.utils.register_class(EltwiseNode)
bpy.utils.register_class(MVNNode)
bpy.utils.register_class(BatchNormNode)
bpy.utils.register_class(ExpNode)
bpy.utils.register_class(ConvNode)
bpy.utils.register_class(DeConvNode)
bpy.utils.register_class(FCNode)
bpy.utils.register_class(FlattenNode)
bpy.utils.register_class(LRNNode)
bpy.utils.register_class(ActivationNode)
bpy.utils.register_class(ReLuNode)
bpy.utils.register_class(PReLuNode)
bpy.utils.register_class(SMLossNode)
bpy.utils.register_class(SCELossNode)
bpy.utils.register_class(EULossNode)
bpy.utils.register_class(ConcatNode)
bpy.utils.register_class(AccuracyNode)
bpy.utils.register_class(ArgMaxNode)
bpy.utils.register_class(SolverNode)
bpy.utils.register_class(ImageSocket)
bpy.utils.register_class(LabelSocket)
bpy.utils.register_class(LossSocket)
bpy.utils.register_class(AFlatSocket)
bpy.utils.register_class(NAFlatSocket)
bpy.utils.register_class(SilenceNode)
bpy.utils.register_class(HDF5OutputNode)
bpy.utils.register_class(LogNode)
bpy.utils.register_class(PowerNode)
bpy.utils.register_class(ReductionNode)
bpy.utils.register_class(PythonLossNode)
bpy.utils.register_class(SliceNode)
nodeitems_utils.register_node_categories("CUSTOM_NODES", node_categories)
def unregister():
nodeitems_utils.unregister_node_categories("CUSTOM_NODES")
bpy.utils.unregister_class(filler_p_g)
bpy.utils.unregister_class(params_p_gw)
bpy.utils.unregister_class(params_p_gb)
bpy.utils.unregister_class(slice_point_p_g)
bpy.utils.unregister_class(OutputSocket)
bpy.utils.unregister_class(InPlaceOutputSocket)
bpy.utils.unregister_class(CaffeTree)
bpy.utils.unregister_class(DataNode)
bpy.utils.unregister_class(DropoutNode)
bpy.utils.unregister_class(PoolNode)
bpy.utils.unregister_class(EltwiseNode)
bpy.utils.unregister_class(MVNNode)
bpy.utils.unregister_class(BatchNormNode)
bpy.utils.unregister_class(ExpNode)
bpy.utils.unregister_class(ConvNode)
bpy.utils.unregister_class(DeConvNode)
bpy.utils.unregister_class(FCNode)
bpy.utils.unregister_class(FlattenNode)
bpy.utils.unregister_class(LRNNode)
bpy.utils.unregister_class(ActivationNode)
bpy.utils.unregister_class(ReLuNode)
bpy.utils.unregister_class(PReLuNode)
bpy.utils.unregister_class(SMLossNode)
bpy.utils.unregister_class(SCELossNode)
bpy.utils.unregister_class(EULossNode)
bpy.utils.unregister_class(ConcatNode)
bpy.utils.unregister_class(AccuracyNode)
bpy.utils.unregister_class(ArgMaxNode)
bpy.utils.unregister_class(SolverNode)
bpy.utils.unregister_class(ImageSocket)
bpy.utils.unregister_class(LabelSocket)
bpy.utils.unregister_class(LossSocket)
bpy.utils.unregister_class(AFlatSocket)
bpy.utils.unregister_class(NAFlatSocket)
bpy.utils.unregister_class(SilenceNode)
bpy.utils.unregister_class(HDF5OutputNode)
bpy.utils.unregister_class(LogNode)
bpy.utils.unregister_class(PowerNode)
bpy.utils.unregister_class(ReductionNode)
bpy.utils.unregister_class(PythonLossNode)
bpy.utils.unregister_class(SliceNode)
if __name__ == "__main__":
register()
| Chasvortex/caffe-gui-tool | CGTNodes.py | Python | unlicense | 77,330 | [
"Gaussian",
"NEURON"
] | 44a3dcc5a3f1779c698d846b45d9da1396f14f02901658554e10e9bfd97467de |
import os
import glob
from ConfigParser import ConfigParser
# from distutils.core import setup
from setuptools import setup
from distutils.cmd import Command
from distutils.errors import DistutilsExecError
from distutils.command.sdist import sdist as _sdist
p = ConfigParser()
p.read("metainf.cfg")
PACKAGE = p.get("main", "package")
DESCRIPTION = p.get("main", "description")
VERSION = __import__(PACKAGE).__version__
class epydoc(Command):
description = "Builds the documentation."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if not os.path.exists("epydoc.cfg"):
return
self.mkpath("doc/html")
stat = os.system("epydoc --config epydoc.cfg %s/*.py" % (PACKAGE))
if not stat == 0:
raise DistutilsExecError("failed to run epydoc")
class nosetests(Command):
description = "Runs the tests."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from nose.core import TestProgram
TestProgram(argv=[PACKAGE]).runTests()
class sdist(_sdist):
def run(self):
self.run_command("epydoc")
_sdist.run(self)
def datafiles():
"""Returns a list of (path, [files]) to install.
"""
def _datafiles():
root = os.path.join("share", "doc", PACKAGE + "-" + VERSION)
yield (root, [x for x in ("ChangeLog", "LICENSE", "README") if os.path.exists(x)])
for dn, pattern in (("doc/html", "*"), ("examples", "*.py")):
files = glob.glob(os.path.join(dn, pattern))
if files:
yield (os.path.join(root, dn), files)
return list(_datafiles())
def scripts():
"""Returns a list of script files to install.
"""
return glob.glob(os.path.join("scripts", "*"))
setup(
name = PACKAGE,
version = VERSION,
description = DESCRIPTION,
author = "Brian Zimmer",
author_email = "bzimmer@ziclix.com",
url = "http://code.google.com/p/%s" % (PACKAGE),
download_url = "http://pypi.python.org/pypi/%s/%s" % (PACKAGE, VERSION),
packages = [PACKAGE],
scripts = scripts(),
data_files = datafiles(),
platforms = ['any'],
license = "MIT License",
classifiers = [
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
cmdclass = {"epydoc":epydoc, "sdist":sdist, "nosetests":nosetests},
entry_points="""
[console_scripts]
openroom = openroom.main:main
""",
zip_safe = True,
)
| bzimmer/openroom | setup.py | Python | mit | 2,612 | [
"Brian"
] | 1eea7b5b7e64a6072f2feede70a6d13fa707bdde23bb8194a30b6953176dc632 |
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditionning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditionning of the
design matrix. For a well-conditionned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditionned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print __doc__
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD
import warnings
import pylab as pl
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditionning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditionned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditionning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
pl.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = pl.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = pl.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = pl.ylim()
pl.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
pl.ylabel('Stability score: proportion of times selected')
pl.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
pl.axis('tight')
pl.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100, compute_importances=True).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
pl.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
pl.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
pl.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
pl.xlabel("Features")
pl.ylabel("Score")
# Plot only the 100 first coefficients
pl.xlim(0, 100)
pl.legend(loc='best')
pl.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
pl.show()
| lucidfrontier45/scikit-learn | examples/linear_model/plot_sparse_recovery.py | Python | bsd-3-clause | 7,344 | [
"Gaussian"
] | e09e888058bbadba39acc1673d29163b98d3a64fb4f58eebf9a16e741e3f880c |
#!/usr/bin/python
#
# dtc_reader.py
#
# Copyright (C) Ben Van Mechelen 2007-2009 <me@benvm.be>
#
# This file is part of Garmon
#
# Garmon is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
import os
import gobject
from gobject import GObject
import gtk
from gtk import glade
from gettext import gettext as _
import garmon
import garmon.plugin
from garmon.plugin import Plugin, STATUS_STOP, STATUS_WORKING, STATUS_PAUSE
from garmon.property_object import PropertyObject, gproperty
from garmon.obd_device import OBDDataError, OBDPortError
from garmon.trouble_codes import DTC_CODES, DTC_CODE_CLASSES
from garmon.sensor import decode_dtc_code
__name = _('DTC Reader')
__version = '0.2'
__author = 'Ben Van Mechelen'
__description = _('Reads the stored trouble codes from the vehicle')
__class = 'DTCReader'
(
COLUMN_CODE,
COLUMN_DTC,
COLUMN_DESC
) = range(3)
class DTCReader (gtk.VBox, Plugin):
__gtype_name__='DTCReader'
def __init__(self, app):
gtk.VBox.__init__(self)
Plugin.__init__(self)
self.app = app
self.dir = os.path.dirname(__file__)
self.status = STATUS_STOP
fname = os.path.join(self.dir, 'dtc_reader.glade')
self._glade = glade.XML(fname, 'hpaned')
self._dtc_info = DTCInfo(self._glade)
button = self._glade.get_widget('re-read-button')
button.connect('clicked', self._reread_button_clicked)
hpaned = self._glade.get_widget('hpaned')
self.pack_start(hpaned, True, True)
hpaned.set_border_width(5)
dtc_frame = self._glade.get_widget('dtc_frame')
self.treemodel = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING)
treeview = gtk.TreeView(self.treemodel)
treeview.set_rules_hint(True)
#column = gtk.TreeViewColumn(_('Bits'), gtk.CellRendererText(),
# text=COLUMN_CODE)
#treeview.append_column(column)
column = gtk.TreeViewColumn(_('DTC'), gtk.CellRendererText(),
text=COLUMN_DTC)
treeview.append_column(column)
selection = treeview.get_selection()
selection.set_mode(gtk.SELECTION_SINGLE)
selection.connect("changed", self._on_selection_changed)
dtc_frame.add(treeview)
self.show_all()
self._reset_cbid = app.connect("reset", self._on_reset)
self._switch_cbid = app.notebook.connect('switch-page',
self._notebook_page_change_cb)
def _on_reset(self):
self.treemodel.clear()
dtc = cls = description = additional = ''
self._dtc_info.code = dtc
self._dtc_info.code_class = cls
self._dtc_info.description = description
self._dtc_info.additional = additional
if self.app.device.connected:
self.start()
def _on_selection_changed(self, selection):
treeview = selection.get_tree_view()
model, iter = selection.get_selected()
if iter:
dtc = model.get_value(iter, COLUMN_DTC)
cls = DTC_CODE_CLASSES[dtc[:3]]
description = model.get_value(iter, COLUMN_DESC)
additional = 'Coming soon'
else:
dtc = cls = description = additional = ''
self._dtc_info.code = dtc
self._dtc_info.code_class = cls
self._dtc_info.description = description
self._dtc_info.additional = additional
def _notebook_page_change_cb (self, notebook, no_use, page):
plugin = notebook.get_nth_page(page)
if plugin is self:
self.app.set_active_plugin(plugin)
self._on_reset()
else:
self.stop()
def restart(self):
self._on_reset()
def _reread_button_clicked(self, button):
self.start()
def stop(self):
pass
def start(self):
def success_cb(cmd, dtcs, args):
self.treemodel.clear()
for code in dtcs:
dtc = decode_dtc_code(code)
desc = 'Code not in Generic or ' + str(self.current_make) + ". Please set vehicle make in preferences."
if DTC_CODES.has_key(dtc):
desc = DTC_CODES[dtc]
if self.current_make:
if DTC_CODES_MANUFACTURER.has_key(dtc):
desc = DTC_CODES_MANUFACTURER[dtc]
iter = self.treemodel.append(None)
self.treemodel.set(iter, COLUMN_CODE, code,
COLUMN_DTC, dtc,
COLUMN_DESC, desc)
def error_cb(cmd, error, args):
self._display_port_error_dialog(error)
self.app.scheduler.working = False
self.current_make = self.app.prefs.get("vehicle.make")
if self.current_make:
if self.current_make == "Acura":
from garmon.acura_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Audi":
from garmon.audi_codes import DTC_CODES_MANUFACTURER
if self.current_make == "BMW":
from garmon.bmw_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Chrysler":
from garmon.chrysler_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Ford":
from garmon.ford_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Chevrolet":
from garmon.chevrolet_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Honda":
from garmon.honda_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Hyundai":
from garmon.hyundai_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Infiniti":
from garmon.infiniti_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Isuzu":
from garmon.isuzu_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Jaguar":
from garmon.jaguar_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Kia":
from garmon.kia_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Land Rover":
from garmon.land_rover_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Lexus":
from garmon.lexus_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Mazda":
from garmon.mazda_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Mitsubishi":
from garmon.mitsubishi_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Nissan":
from garmon.nissan_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Subaru":
from garmon.subaru_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Toyota":
from garmon.toyota_codes import DTC_CODES_MANUFACTURER
if self.current_make == "Volkswagen":
from garmon.volkswagen_codes import DTC_CODES_MANUFACTURER
try:
self.app.device.read_dtc(success_cb, error_cb)
except OBDPortError, e:
self._display_port_error_dialog(e)
def load(self):
self.app.notebook.append_page(self, gtk.Label(_('DTC Reader')))
def unload(self):
self.app.notebook.disconnect(self._switch_cbid)
self.app.disconnect(self._reset_cbid)
self.app.notebook.remove(self)
class DTCInfo(GObject, PropertyObject) :
gproperty('code', str)
gproperty('code-class', str)
gproperty('description', str)
gproperty('additional', str)
def __init__(self, glade):
GObject.__init__(self)
PropertyObject.__init__(self)
self._code_label = glade.get_widget('code_label')
self._class_label = glade.get_widget('class_label')
self._description_label = glade.get_widget('description_label')
self._additional_textview = glade.get_widget('additional_textview')
self._additional_buffer = gtk.TextBuffer()
self._additional_textview.set_buffer(self._additional_buffer)
def __post_init__(self):
self.connect('notify::code', self._notify_cb)
self.connect('notify::code-class', self._notify_cb)
self.connect('notify::description', self._notify_cb)
self.connect('notify::additional', self._notify_cb)
def _notify_cb(self, o, pspec):
if pspec.name == 'code':
self._code_label.set_text(self.code)
elif pspec.name == 'code-class':
self._class_label.set_text(self.code_class)
elif pspec.name == 'description':
self._description_label.set_text(self.description)
elif pspec.name == 'additional':
self._additional_buffer.set_text(self.additional)
| bwhitelock/garmon-ng | plugins/dtc_reader/dtc_reader.py | Python | gpl-3.0 | 9,980 | [
"Jaguar"
] | 9cb70a0e3e3dfe1e3d58782ed77fd136685803db124a1159a4ec27abf1cdee4d |
from django.utils.translation import ugettext as _
from sqlagg import SumColumn
from sqlagg.columns import SimpleColumn
from corehq.apps.reports.sqlreport import SqlData, DatabaseColumn
class AncHmisCaseSqlData(SqlData):
table_name = "fluff_AncHmisCaseFluff"
def __init__(self, domain, datespan):
self.domain = domain
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def filters(self):
return [
"domain = :domain",
"date between :startdate and :enddate"
]
@property
def columns(self):
return [
DatabaseColumn(_("Location ID"), SimpleColumn("location_id")),
DatabaseColumn(_("Antenatal Attendance - Total"), SumColumn("attendance_total")),
DatabaseColumn(_("Antenatal first Visit before 20wks"), SumColumn("attendance_before_20_weeks_total")),
DatabaseColumn(_("Antenatal first Visit after 20wks"), SumColumn("attendance_after_20_weeks_total")),
DatabaseColumn(_("Pregnant Women that attend antenatal clinic for 4th visit during the month"),
SumColumn("attendance_gte_4_visits_total")),
DatabaseColumn(_("ANC syphilis test done"), SumColumn("anc_syphilis_test_done_total")),
DatabaseColumn(_("ANC syphilis test positive"), SumColumn("anc_syphilis_test_positive_total")),
DatabaseColumn(_("ANC syphilis case treated"), SumColumn("anc_syphilis_case_treated_total")),
DatabaseColumn(_("Pregnant women who receive malaria IPT1"), SumColumn("pregnant_mothers_receiving_ipt1_total")),
DatabaseColumn(_("Pregnant women who receive malaria IPT2"), SumColumn("pregnant_mothers_receiving_ipt2_total")),
DatabaseColumn(_("Pregnant women who receive malaria LLIN"), SumColumn("pregnant_mothers_receiving_llin_total")),
DatabaseColumn(_("Pregnant women who receive malaria Haematinics"), SumColumn("pregnant_mothers_receiving_ifa_total")),
DatabaseColumn(_("Postanatal Attendance - Total"), SumColumn("postnatal_attendance_total")),
DatabaseColumn(_("Postnatal clinic visit within 1 day of delivery"), SumColumn("postnatal_clinic_visit_lte_1_day_total")),
DatabaseColumn(_("Postnatal clinic visit within 3 days of delivery"), SumColumn("postnatal_clinic_visit_lte_3_days_total")),
DatabaseColumn(_("Postnatal clinic visit >= 7 days of delivery"), SumColumn("postnatal_clinic_visit_gte_7_days_total"))
]
@property
def group_by(self):
return ["domain","location_id"]
class ProjectIndicatorsCaseSqlData(SqlData):
table_name = "fluff_ProjectIndicatorsCaseFluff"
def __init__(self, domain, datespan):
self.domain = domain
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def filters(self):
return [
"domain = :domain",
"date between :startdate and :enddate"
]
@property
def columns(self):
return [
DatabaseColumn(_("Location ID"), SimpleColumn("location_id")),
DatabaseColumn(_("Number of pregnant women who registered for ANC (in CCT payment sites only) "),
SumColumn("women_registered_anc_total")),
DatabaseColumn(_("Number of women who had 4 ANC visits (in CCT payment sites only)"),
SumColumn("women_having_4_anc_visits_total")),
DatabaseColumn(_("Number of women who delivered at the facility (in CCT payment sites only)"),
SumColumn("women_delivering_at_facility_cct_total")),
DatabaseColumn(_("Number of women who attended PNC within 6 weeks of delivery"),
SumColumn("women_delivering_within_6_weeks_attending_pnc_total")),
DatabaseColumn(_("Number of free sim cards given"),
SumColumn("number_of_free_sims_given_total")),
DatabaseColumn(_("Number of MTN MNO"),
SumColumn("mno_mtn_total")),
DatabaseColumn(_("Number of Etisalat MNO"),
SumColumn("mno_etisalat_total")),
DatabaseColumn(_("Number of GLO MNO"),
SumColumn("mno_glo_total")),
DatabaseColumn(_("Number of Airtel MNO"),
SumColumn("mno_airtel_total")),
]
@property
def group_by(self):
return ["domain","mother_id","location_id"]
class LdHmisCaseSqlData(SqlData):
table_name = "fluff_LdHmisCaseFluff"
def __init__(self, domain, datespan):
self.domain = domain
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def filters(self):
return [
"domain = :domain",
"date between :startdate and :enddate"
]
@property
def columns(self):
return [
DatabaseColumn(_("Location ID"), SimpleColumn("location_id")),
DatabaseColumn(_("Deliveries - Total"), SumColumn("deliveries_total")),
DatabaseColumn(_("Deliveries - SVD"), SumColumn("deliveries_svd_total")),
DatabaseColumn(_("Deliveries - Assisted"), SumColumn("deliveries_assisted_total")),
DatabaseColumn(_("Deliveries caesarean section"), SumColumn("deliveries_caesarean_section_total")),
DatabaseColumn(_("Deliveries - Complications"), SumColumn("deliveries_complications_total")),
DatabaseColumn(_("Deliveries - Preterm"), SumColumn("deliveries_preterm_total")),
DatabaseColumn(_("Deliveries - HIV positive women"), SumColumn("deliveries_hiv_positive_women_total")),
DatabaseColumn(_("LiveBirth - HIV positive women"), SumColumn("live_birth_hiv_positive_women_total")),
DatabaseColumn(_("Deliveries - HIV positive booked women"), SumColumn("deliveries_hiv_positive_booked_women_total")),
DatabaseColumn(_("Deliveries - HIV positive unbooked women"), SumColumn("deliveries_hiv_positive_unbooked_women_total")),
# DatabaseColumn(_("Deliveries - Monitored using Partograph"), SumColumn("deliveries_monitored_using_partograph_total")),
# DatabaseColumn(_("Deliveries taken by skilled birth attendant"), SumColumn("deliveries_skilled_birth_attendant_total")),
DatabaseColumn(_("TT1"), SumColumn("tt1_total")),
DatabaseColumn(_("TT2"), SumColumn("tt2_total")),
DatabaseColumn(_("Live Births(Male, Female, < 2.5kg, >= 2.5k g)"), SumColumn("live_births_male_female_total")),
DatabaseColumn(_("Male, < 2.5kg"), SumColumn("male_lt_2_5kg_total")),
DatabaseColumn(_("Male, >= 2.5kg"), SumColumn("male_gte_2_5kg_total")),
DatabaseColumn(_("Female, < 2.5kg"), SumColumn("female_lt_2_5kg_total")),
DatabaseColumn(_("Female, >= 2.5kg"), SumColumn("female_gte_2_5kg_total")),
DatabaseColumn(_("Still Births total"), SumColumn("still_births_total")),
DatabaseColumn(_("Fresh Still Births"), SumColumn("fresh_still_births_total")),
DatabaseColumn(_("Other still Births"), SumColumn("other_still_births_total")),
DatabaseColumn(_("Abortion Induced"), SumColumn("abortion_induced_total")),
DatabaseColumn(_("Other Abortions"), SumColumn("other_abortions_total")),
DatabaseColumn(_("Total Abortions"), SumColumn("total_abortions_total")),
DatabaseColumn(_("Birth Asphyxia - Total"), SumColumn("birth_asphyxia_total")),
DatabaseColumn(_("Birth Asphyxia - Male"), SumColumn("birth_asphyxia_male_total")),
DatabaseColumn(_("Birth Asphyxia - Female"), SumColumn("birth_asphyxia_female_total")),
DatabaseColumn(_("Neonatal Sepsis - Total"), SumColumn("neonatal_sepsis_total")),
DatabaseColumn(_("Neonatal Sepsis - Male"), SumColumn("neonatal_sepsis_male_total")),
DatabaseColumn(_("Neonatal Sepsis - Female"), SumColumn("neonatal_sepsis_female_total")),
DatabaseColumn(_("Neonatal Tetanus - Total"), SumColumn("neonatal_tetanus_total")),
DatabaseColumn(_("Neonatal Tetanus - Male"), SumColumn("neonatal_tetanus_male_total")),
DatabaseColumn(_("Neonatal Tetanus - Female"), SumColumn("neonatal_tetanus_female_total")),
DatabaseColumn(_("Neonatal Jaundice - Total"), SumColumn("neonatal_jaundice_total")),
DatabaseColumn(_("Neonatal Jaundice - Male"), SumColumn("neonatal_jaundice_male_total")),
DatabaseColumn(_("Neonatal Jaundice - Female"), SumColumn("neonatal_jaundice_female_total")),
DatabaseColumn(_("Low birth weight babies placed in KMC - Total"), SumColumn("low_birth_weight_babies_in_kmc_total")),
DatabaseColumn(_("Low birth weight babies placed in KMC - Male"), SumColumn("low_birth_weight_babies_in_kmc_male_total")),
DatabaseColumn(_("Low birth weight babies placed in KMC - Female"), SumColumn("low_birth_weight_babies_in_kmc_female_total")),
# DatabaseColumn(_("Newborns with low birth weight discharged - Total"), SumColumn("newborns_low_birth_weight_discharged_total")),
# DatabaseColumn(_("Newborns with low birth weight discharged - Male"), SumColumn("newborns_low_birth_weight_discharged_male_total")),
# DatabaseColumn(_("Newborns with low birth weight discharged - Female"), SumColumn("newborns_low_birth_weight_discharged_female_total")),
]
@property
def group_by(self):
return ["domain", "location_id"]
class ImmunizationHmisCaseSqlData(SqlData):
table_name = "fluff_ImmunizationHmisCaseFluff"
def __init__(self, domain, datespan):
self.domain = domain
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def filters(self):
return [
"domain = :domain",
"date between :startdate and :enddate"
]
@property
def columns(self):
return [
DatabaseColumn(_("Location ID"), SimpleColumn("location_id")),
DatabaseColumn(_("OPV0 - birth "), SumColumn("opv_0_total")),
DatabaseColumn(_("Hep.B0 - birth"), SumColumn("hep_b_0_total")),
DatabaseColumn(_("BCG"),SumColumn("bcg_total")),
DatabaseColumn(_("OPV1"), SumColumn("opv_1_total")),
DatabaseColumn(_("HEP.B1"), SumColumn("hep_b_1_total")),
DatabaseColumn(_("Penta.1"), SumColumn("penta_1_total")),
DatabaseColumn(_("DPT1 (not when using Penta)"), SumColumn("dpt_1_total")),
DatabaseColumn(_("PCV1"), SumColumn("pcv_1_total")),
DatabaseColumn(_("OPV2"), SumColumn("opv_2_total")),
DatabaseColumn(_("Hep.B2"), SumColumn("hep_b_2_total")),
DatabaseColumn(_("Penta.2"), SumColumn("penta_2_total")),
DatabaseColumn(_("DPT2 (not when using Penta)"), SumColumn("dpt_2_total")),
DatabaseColumn(_("PCV2"), SumColumn("pcv_2_total")),
DatabaseColumn(_("OPV3"), SumColumn("opv_3_total")),
DatabaseColumn(_("Penta.3"), SumColumn("penta_3_total")),
DatabaseColumn(_("DPT3 (not when using Penta)"), SumColumn("dpt_3_total")),
DatabaseColumn(_("PCV3"), SumColumn("pcv_3_total")),
DatabaseColumn(_("Measles 1"), SumColumn("measles_1_total")),
DatabaseColumn(_("Fully Immunized (<1year)"), SumColumn("fully_immunized_total")),
DatabaseColumn(_("Yellow Fever"), SumColumn("yellow_fever_total")),
DatabaseColumn(_("Measles 2"), SumColumn("measles_2_total")),
DatabaseColumn(_("Conjugate A CSM"), SumColumn("conjugate_csm_total"))
]
@property
def group_by(self):
return ["domain","location_id"]
class McctMonthlyAggregateFormSqlData(SqlData):
table_name = "fluff_McctMonthlyAggregateFormFluff"
def __init__(self, domain, datespan):
self.domain = domain
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def filters(self):
return [
"domain = :domain",
"date between :startdate and :enddate"
]
@property
def columns(self):
return [
DatabaseColumn(_("Location ID"), SimpleColumn("location_id")),
DatabaseColumn(_("Eligible beneficiaries due to registration"), SumColumn("status_eligible_due_to_registration")),
DatabaseColumn(_("Eligible beneficiaries due to 4th visit"), SumColumn("status_eligible_due_to_4th_visit")),
DatabaseColumn(_("Eligible beneficiaries due to delivery"), SumColumn("status_eligible_due_to_delivery")),
DatabaseColumn(_("Eligible beneficiaries due to immunization or PNC visit"),
SumColumn("status_eligible_due_to_immun_or_pnc_visit")),
DatabaseColumn(_("Reviewed beneficiaries due to registration"), SumColumn("status_reviewed_due_to_registration")),
DatabaseColumn(_("Reviewed beneficiaries due to 4th visit"), SumColumn("status_reviewed_due_to_4th_visit")),
DatabaseColumn(_("Reviewed beneficiaries due to delivery"), SumColumn("status_reviewed_due_to_delivery")),
DatabaseColumn(_("Reviewed beneficiaries due to immunization or PNC visit"),
SumColumn("status_reviewed_due_to_immun_or_pnc_visit")),
DatabaseColumn(_("Approved beneficiaries due to registration"), SumColumn("status_approved_due_to_registration")),
DatabaseColumn(_("Approved beneficiaries due to 4th visit"), SumColumn("status_approved_due_to_4th_visit")),
DatabaseColumn(_("Approved beneficiaries due to delivery"), SumColumn("status_approved_due_to_delivery")),
DatabaseColumn(_("Approved beneficiaries due to immunization or PNC visit"),
SumColumn("status_approved_due_to_immun_or_pnc_visit")),
DatabaseColumn(_("Paid beneficiaries due to registration"), SumColumn("status_paid_due_to_registration")),
DatabaseColumn(_("Paid beneficiaries due to 4th visit"), SumColumn("status_paid_due_to_4th_visit")),
DatabaseColumn(_("Paid beneficiaries due to delivery"), SumColumn("status_paid_due_to_delivery")),
DatabaseColumn(_("Paid beneficiaries due to immunization or PNC visit"),
SumColumn("status_paid_due_to_immun_or_pnc_visit")),
DatabaseColumn(_("Rejected beneficiaries due to incorrect phone number"), SumColumn("status_rejected_due_to_incorrect_phone_number")),
DatabaseColumn(_("Rejected beneficiaries due to double entry"), SumColumn("status_rejected_due_to_double_entry")),
DatabaseColumn(_("Rejected beneficiaries due to other errors"), SumColumn("status_rejected_due_to_other_errors"))
]
@property
def group_by(self):
return ["domain","location_id"]
class AllHmisCaseSqlData(SqlData):
table_name = "fluff_AllHmisCaseFluff"
def __init__(self, domain, datespan):
self.domain = domain
self.datespan = datespan
@property
def filter_values(self):
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc.date(),
enddate=self.datespan.enddate_utc.date()
)
@property
def filters(self):
return [
"domain = :domain",
"date between :startdate and :enddate"
]
@property
def columns(self):
return [
DatabaseColumn(_("Newborns with low birth weight discharged - Total"), SumColumn("newborns_low_birth_weight_discharged_total")),
DatabaseColumn(_("Newborns with low birth weight discharged - Male"), SumColumn("newborns_low_birth_weight_discharged_male_total")),
DatabaseColumn(_("Newborns with low birth weight discharged - Female"), SumColumn("newborns_low_birth_weight_discharged_female_total")),
DatabaseColumn(_("Pregnant Mothers Referred out"), SumColumn("pregnant_mothers_referred_out_total")),
DatabaseColumn(_("ANC Anemia test done"), SumColumn("anc_anemia_test_done_total")),
DatabaseColumn(_("ANC Anemia test positive"), SumColumn("anc_anemia_test_positive_total")),
DatabaseColumn(_("ANC Proteinuria Test done"), SumColumn("anc_proteinuria_test_done_total")),
DatabaseColumn(_("ANC Proteinuria test positive"), SumColumn("anc_proteinuria_test_positive_total")),
DatabaseColumn(_("HIV rapid antibody test done"), SumColumn("hiv_rapid_antibody_test_done_total")),
DatabaseColumn(_("Deaths of women related to pregnancy"),
SumColumn("deaths_of_women_related_to_pregnancy_total")),
DatabaseColumn(_("Pregnant mothers tested positive for HIV"),
SumColumn("pregnant_mothers_tested_for_hiv_total")),
DatabaseColumn(_("Pregnant Mothers with confirmed Malaria"),
SumColumn("pregnant_mothers_with_confirmed_malaria_total")),
DatabaseColumn(_("ANC Women with previously known HIV status(at ANC)"),
SumColumn("anc_women_previously_known_hiv_status_total")),
DatabaseColumn(_("Pregnant women who received HIV counseling testing and received result at ANC"),
SumColumn("pregnant_women_received_hiv_counseling_and_result_anc_total")),
DatabaseColumn(_("Pregnant women who received HIV counseling testing and received result at L&D"),
SumColumn("pregnant_women_received_hiv_counseling_and_result_ld_total")),
DatabaseColumn(_("Partners of HIV positive women who tested HIV negative"),
SumColumn("partners_of_hiv_positive_women_tested_negative_total")),
DatabaseColumn(_("Partners of HIV positive women who tested positive"),
SumColumn("partners_of_hiv_positive_women_tested_positive_total")),
DatabaseColumn(_("Assessed for clinical stage eligibility"),
SumColumn("assessed_for_clinical_stage_eligibility_total")),
DatabaseColumn(_("Assessed for cd4-count eligibility"),
SumColumn("assessed_for_clinical_cd4_eligibility_total")),
DatabaseColumn(_("Pregnant HIV positive women who received ART prophylaxis for PMTCT (Triple)"),
SumColumn("pregnant_hiv_positive_women_received_art_total")),
DatabaseColumn(_("Pregnant HIV positive woman who received ARV prophylaxis for PMTCT (AZT)"),
SumColumn("pregnant_hiv_positive_women_received_azt_total")),
DatabaseColumn(_("Pregnant positive women who received ARV prophylaxis(SdNvP in Labor + (AZT + 3TC))"),
SumColumn("pregnant_hiv_positive_women_received_mother_sdnvp_total")),
DatabaseColumn(_("Infants born to HIV infected women started on cotrimoxazole prophylaxis within 2 months"),
SumColumn("infants_hiv_women_cotrimoxazole_lt_2_months_total")),
DatabaseColumn(_("Infants born to HIV infected women started on cotrimoxazole prophylaxis 2 months & above"),
SumColumn("infants_hiv_women_cotrimoxazole_gte_2_months_total")),
DatabaseColumn(_("Infants born to HIV infected women who received an HIV test within two months of birth - (DNA -PCR)"),
SumColumn("infants_hiv_women_received_hiv_test_lt_2_months_total")),
DatabaseColumn(_("Infants born to HIV infected women who received an HIV test after two months of birth - (DNA - PCR)"),
SumColumn("infants_hiv_women_received_hiv_test_gte_2_months_total")),
DatabaseColumn(_("Infants born to HIV infected women who received an HIV test at 18 months - (HIV Rapid test)"),
SumColumn("infants_hiv_women_received_hiv_test_lt_18_months_total")),
DatabaseColumn(_("Infant born to HIV infected women who tested negative to HIV Rapid test at 18 months"),
SumColumn("infants_hiv_women_received_hiv_test_gte_18_months_total")),
DatabaseColumn(_("HIV exposed infants breast feeding and receiving ARV prophylaxis"),
SumColumn("hiv_exposed_infants_breast_feeding_receiving_arv_total"))
]
@property
def group_by(self):
return ["domain", "location_id"]
| qedsoftware/commcare-hq | custom/m4change/reports/sql_data.py | Python | bsd-3-clause | 21,543 | [
"VisIt"
] | abf0314eac8290e7d717ca4335dfc10dc22c8bd7c85ba5aa062f06fad733082e |
#!/usr/bin/python
# EPG CPMG simulation code, based off of Matlab scripts from Brian Hargreaves <bah@stanford.edu>
# 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu>
# 2019 Ke Wang <kewang@eecs.berkeley.edu> rewrite it in Pytorch
from __future__ import division
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import numpy as np
from warnings import warn
def rf(FpFmZ, alpha):
"Same as rf2, but only returns FpFmZ"""
return rf2(FpFmZ, alpha)[0]
def rf2(FpFmZ, alpha):
""" Propagate EPG states through an RF rotation of
alpha (radians). Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
INPUT: (input should be a torch tensor)
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
if torch.abs(alpha) > 2*np.pi:
warn('rf2: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = torch.cos(alpha/2.)**2
sina2 = torch.sin(alpha/2.)**2
cosa = torch.cos(alpha)
sina = torch.sin(alpha)
RR = torch.tensor([ [cosa2, sina2, sina],
[sina2, cosa2, -sina],
[-0.5 * sina, 0.5 * sina, cosa] ])
# RR = torch.tensor([ [cosa2, sina2, sina],
# [sina2, cosa2, -sina],
# [-0.5 * sina, 0.5 * sina, cosa] ]).cuda()
FpFmZ = torch.mm(RR,FpFmZ.float()) # dot in numpy
return FpFmZ, RR
def rf_ex(FpFmZ, alpha):
"Same as rf2_ex, but only returns FpFmZ"""
return rf2_ex(FpFmZ, alpha)[0]
def rf2_ex(FpFmZ, alpha):
""" Propagate EPG states through an RF excitation of
alpha (radians) along the y direction, i.e. phase of pi/2.
in Pytorch
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
try:
alpha = alpha[0]
except:
pass
if torch.abs(alpha) > 2 * np.pi:
warn('rf2_ex: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = torch.cos(alpha/2.)**2
sina2 = torch.sin(alpha/2.)**2
cosa = torch.cos(alpha)
sina = torch.sin(alpha)
RR = torch.tensor([ [cosa2, -sina2, sina],
[-sina2, cosa2, sina],
[-0.5 * sina, -0.5 * sina, cosa] ])
# RR = torch.tensor([ [cosa2, -sina2, sina],
# [-sina2, cosa2, sina],
# [-0.5 * sina, -0.5 * sina, cosa] ]).cuda()
# print(FpFmZ)
FpFmZ = torch.mm(RR, FpFmZ)
return FpFmZ, RR
def rf_prime(FpFmZ, alpha):
"""Same as rf_prime2, but only returns FpFmZ"""
return rf_prime2(FpFmZ, alpha)[0]
def rf_prime2(FpFmZ, alpha):
""" Compute the gradient of the RF rotation operator, where
alpha (radians) is the RF rotation. Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
RR = Derivative of RF rotation matrix (3x3) w.r.t. alpha
"""
if torch.abs(alpha) > 2 * np.pi:
warn('rf_prime2: Flip angle should be in radians! alpha=%f' % alpha)
RR = torch.tensor([ [-torch.cos(alpha/2.) * torch.sin(alpha/2.), torch.cos(alpha/2.) * torch.sin(alpha/2.), torch.cos(alpha)],
[torch.cos(alpha/2.) * torch.sin(alpha/2.), -torch.cos(alpha/2.) * torch.sin(alpha/2.), -torch.cos(alpha)],
[-0.5 * torch.cos(alpha), 0.5 * torch.cos(alpha), -torch.sin(alpha)] ])
FpFmZ = torch.mm(RR, FpFmZ)
return FpFmZ, RR
def rf_B1_prime(FpFmZ, alpha, B1):
"""Same as rf_B1_prime2, but only returns FpFmZ"""
return rf_B1_prime2(FpFmZ, alpha, B1)[0]
def rf_B1_prime2(FpFmZ, alpha, B1):
""" Compute the gradient of B1 inhomogeneity w.r.t. RF refocusing operator, where
alpha (radians) is the RF rotation and B1 is the B1 homogeneity (0, 2).
Assumes CPMG condition, i.e. magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
B1 = B1 Homogeneity, where 1. is homogeneous
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
RR = Derivative of RF rotation matrix (3x3) w.r.t. B1
"""
if torch.abs(alpha) > 2 * np.pi:
warn('rf_B1_prime2: Flip angle should be in radians! alpha=%f' % alpha)
if B1 < 0 or B1 > 2:
warn('rf_B1_prime2: B1 Homogeneity should be a percentage between (0, 2)')
RR = torch.tensor([ [-alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), alpha*torch.cos(B1*alpha)],
[alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), -alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), -alpha*torch.cos(B1*alpha)],
[-0.5*alpha*torch.cos(B1*alpha), 0.5*alpha*torch.cos(B1*alpha), -alpha*torch.sin(B1*alpha)] ])
FpFmZ = torch.mm(RR, FpFmZ)
return FpFmZ, RR
def rf_ex_B1_prime(FpFmZ, alpha, B1):
"""Gradient of B1 inhomogeneity w.r.t. RF excitation operator, where
alpha (radians) is the RF rotation and B1 is the B1 honogeneity (0, 2).
Assumes CPMG condition, i.e. RF excitation in the y direction.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
B1 = B1 Homogeneity, where 1. is homogeneous
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
"""
if torch.abs(alpha) > 2 * np.pi:
warn('rf_ex_B1_prime2: Flip angle should be in radians! alpha=%f' % alpha)
if B1 < 0 or B1 > 2:
warn('rf_ex_B1_prime: B1 Homogeneity should be a percentage between (0, 2)')
RR = torch.tensor([ [-alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), alpha*torch.cos(B1*alpha)],
[alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), -alpha*torch.cos(B1*alpha/2.) * torch.sin(B1*alpha/2.), alpha*torch.cos(B1*alpha)],
[-0.5*alpha*torch.cos(B1*alpha), -0.5*alpha*torch.cos(B1*alpha), -alpha*torch.sin(B1*alpha)] ])
FpFmZ = torch.tensor(RR, FpFmZ)
return FpFmZ
def relax_mat(T, T1, T2):
E2 = torch.exp(-T/T2)
E1 = torch.exp(-T/T1)
EE = torch.diag(torch.tensor([E2, E2, E1])) # Decay of states due to relaxation alone.
return EE
def relax_mat_prime_T1(T, T1, T2):
E1_prime_T1 = T * torch.exp(-T/T1) / T1**2
return torch.diag(torch.tensor([0, 0, E1_prime_T1]))
def relax_mat_prime_T2(T, T1, T2):
E2_prime_T2 = T * torch.exp(-T/T2) / T2**2
return torch.diag(torch.tensor([E2_prime_T2, E2_prime_T2, 0]))
def relax_prime_T1(FpFmZ, T, T1, T2):
"""returns E'(T1) FpFmZ + E0'(T1)"""
EE_prime_T1 = relax_mat_prime_T1(T, T1, T2)
RR = -EE_prime_T1[2,2]
FpFmZ = torch.mm(EE_prime_T1, FpFmZ)
FpFmZ[2,0] = FpFmZ[2,0] + RR
return FpFmZ
def relax_prime_T2(FpFmZ, T, T1, T2):
"""returns E'(T2) FpFmZ"""
EE_prime_T2 = relax_mat_prime_T2(T, T1, T2)
FpFmZ = torch.mm(EE_prime_T2, FpFmZ)
return FpFmZ
def relax(FpFmZ, T, T1, T2):
"""Same as relax2, but only returns FpFmZ"""
return relax2(FpFmZ, T, T1, T2)[0]
def relax2(FpFmZ, T, T1, T2):
""" Propagate EPG states through a period of relaxation over
an interval T.
torch
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
T1, T2 = Relaxation times (same as T)
T = Time interval (same as T1,T2)
OUTPUT:
FpFmZ = updated F+, F- and Z states.
EE = decay matrix, 3x3 = diag([E2 E2 E1]);
"""
E2 = torch.exp(-T/T2)
E1 = torch.exp(-T/T1)
# EE = torch.diag(torch.tensor([E2, E2, E1])).cuda() # Decay of states due to relaxation alone.
EE = torch.diag(torch.tensor([E2, E2, E1])) # Decay of states due to relaxation alone.
RR = 1 - E1 # Mz Recovery, affects only Z0 state, as
# recovered magnetization is not dephased.
FpFmZ = torch.mm(EE, FpFmZ.double()) # Apply Relaxation
# FpFmz = torch.DoubleTensor(FpFmz)
# FpFmZ = torch.mm(EE, FpFmZ.double().cuda()) # Apply Relaxation
FpFmZ[2,0] = FpFmZ[2,0] + RR # Recovery
return FpFmZ, EE
# A torch implementation of numpy.roll
def roll(tensor, shift, axis):
if shift == 0:
return tensor
if axis < 0:
axis += tensor.dim()
dim_size = tensor.size(axis)
after_start = dim_size - shift
if shift < 0:
after_start = -shift
shift = dim_size - abs(shift)
before = tensor.narrow(axis, 0, dim_size - shift)
after = tensor.narrow(axis, after_start, shift)
return torch.cat([after, before], axis)
def grad(FpFmZ, noadd=False):
"""Propagate EPG states through a "unit" gradient. Assumes CPMG condition,
i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
Updated FpFmZ state.
"""
# Gradient does not affect the Z states.
if noadd == False:
# print(FpFmZ)
# FpFmZ = torch.cat((FpFmZ.float().cuda(), torch.tensor([[0.],[0.],[0.]]).cuda()),1) # add higher dephased state
FpFmZ = torch.cat((FpFmZ.float(), torch.tensor([[0.],[0.],[0.]])),1) # add higher dephased state
FpFmZ[0,:] = roll(FpFmZ[0,:], 1,0) # shift Fp states
FpFmZ[1,:] = roll(FpFmZ[1,:], -1,0) # shift Fm states
FpFmZ[1,-1] = 0 # Zero highest Fm state
FpFmZ[0,0] = FpFmZ[1,0] # Fill in lowest Fp state
return FpFmZ
def FSE_TE(FpFmZ, alpha, TE, T1, T2, noadd=False, recovery=True):
""" Propagate EPG states through a full TE, i.e.
relax -> grad -> rf -> grad -> relax.
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
EE = relax_mat(TE/2., T1, T2)
if recovery:
# print(FpFmZ.dtype)
# F1 = torch.DoubleTensor(FpFmZ)
FpFmZ = relax(FpFmZ.double(), TE/2., T1, T2)
else:
FpFmZ = torch.mm(EE, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
if recovery:
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
else:
FpFmZ = torch.mm(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime_alpha(FpFmZ, alpha, TE, T1, T2, noadd=False, recovery=True):
""" Gradient of EPG over a full TE, w.r.t. flip angle alpha, i.e.
relax -> grad -> rf_prime -> grad -> relax_hat,
where rf_prime is the derivative of the RF pulse matrix w.r.t. alpha,
and relax_hat is the relaxation without longitudinal recovery
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
recovery = True to include T1 recovery in the Z0 state.
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ, EE = relax2(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf_prime(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = torch.mm(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime1_T2(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E(T2) G R G E'(T2) FpFmZ"""
EE = relax_mat(TE/2., T1, T2)
EE_prime = relax_mat_prime_T2(TE/2., T1, T2)
FpFmZ = torch.mm(EE_prime, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = torch.mm(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime2_T2(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E'(T2) G R G (E(T2) FpFmZ + E0)"""
EE_prime = relax_mat_prime_T2(TE/2., T1, T2)
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = torch.mm(EE_prime, FpFmZ)
return FpFmZ
def FSE_TE_prime1_T1(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E(T1) G R G (E'(T1) FpFmZ + E0'(T1))"""
EE = relax_mat(TE/2., T1, T2)
FpFmZ = relax_prime_T1(FpFmZ, TE/2., T1, T2) # E'(T1) FpFmZ + E0'(T1)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = torch.mm(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime2_T1(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E'(T1) G R G E(T1) FpFmZ + E0'(T1)"""
EE = relax_mat(TE/2., T1, T2)
FpFmZ = torch.mm(EE, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = relax_prime_T1(FpFmZ, TE/2., T1, T2) # E'(T1) FpFmZ + E0'(T1)
return FpFmZ
def FSE_TE_prime_B1(FpFmZ, alpha, TE, T1, T2, B1, noadd=False):
""" Gradient of EPG over a full TE, w.r.t. B1 homogeneity fraciton B1, i.e.
relax -> grad -> rf_B1_prime -> grad -> relax_hat,
where rf_B1_prime is the derivative of the RF pulse matrix w.r.t. B1,
and relax_hat is the relaxation without longitudinal recovery
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
B1 = fraction of B1 homogeneity (1 is fully homogeneous)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
recovery = True to include T1 recovery in the Z0 state.
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ, EE = relax2(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf_B1_prime(FpFmZ, alpha, B1)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = torch.mm(EE, FpFmZ)
return FpFmZ
### Gradients of full FSE EPG function across T time points
def FSE_signal_prime_alpha_idx(angles_rad, TE, T1, T2, idx):
"""Gradient of EPG function at each time point w.r.t. RF pulse alpha_i"""
T = len(angles_rad)
zi = torch.cat((np.array([[1],[1],[0]]), np.zeros((3, T))),1)
z_prime = torch.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i < idx:
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = 0
elif i == idx:
wi = FSE_TE_prime_alpha(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_prime_T1(angles_rad, TE, T1, T2):
return FSE_signal_ex_prime_T1(np.pi/2, angles_rad, TE, T1, T2)
def FSE_signal_ex_prime_T1(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Gradient of EPG function at each time point w.r.t. T1"""
T = len(angles_rad)
try:
B1 = B1[0]
except:
pass
# since the grad doesn't depend on B1 inhomog, can just pre-scale flip angles
angle_ex_rad = B1 * angle_ex_rad
angles_rad = B1 * angles_rad
zi = torch.cat((rf_ex(np.array([[0],[0],[1]]), angle_ex_rad), np.zeros((3, T))),1)
z_prime = torch.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i == 0:
wi = torch.zeros((3, T+1))
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
wi += FSE_TE_prime1_T1(zi, alpha, TE, T1, T2, noadd=True)
wi += FSE_TE_prime2_T1(zi, alpha, TE, T1, T2, noadd=True)
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_prime_T2(angles_rad, TE, T1, T2):
return FSE_signal_ex_prime_T2(np.pi/2, angles_rad, TE, T1, T2)
def FSE_signal_ex_prime_T2(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Gradient of EPG function at each time point w.r.t. T2"""
T = len(angles_rad)
try:
B1 = B1[0]
except:
pass
# since the grad doesn't depend on B1 inhomog, can just pre-scale flip angles
angle_ex_rad = B1 * angle_ex_rad
angles_rad = B1 * angles_rad
zi = torch.cat((rf_ex(np.array([[0],[0],[1]]), angle_ex_rad), np.zeros((3, T))),1)
z_prime = torch.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i == 0:
wi = torch.zeros((3, T+1))
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
wi += FSE_TE_prime1_T2(zi, alpha, TE, T1, T2, noadd=True)
wi += FSE_TE_prime2_T2(zi, alpha, TE, T1, T2, noadd=True)
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_ex_prime_B1(angle_ex_rad, angles_rad, TE, T1, T2, B1):
"""Gradient of EPG function at each time point w.r.t. B1 Homogeneity.
Includes the excitation flip angle"""
T = len(angles_rad)
zi = torch.cat((np.array([[0],[0],[1]]), np.zeros((3, T+1))),1)
z_prime = torch.zeros((T, 1))
wi = rf_ex_B1_prime(zi, angle_ex_rad, B1)
zi = rf_ex(zi, angle_ex_rad * B1)
for i in range(T):
alpha = angles_rad[i]
if i == 0:
xi = FSE_TE(wi, alpha * B1, TE, T1, T2, noadd=True, recovery=False)
else:
xi = FSE_TE(wi, alpha * B1, TE, T1, T2, noadd=True)
wi = FSE_TE_prime_B1(zi, alpha, TE, T1, T2, B1, noadd=True) + xi
zi = FSE_TE(zi, alpha * B1, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
### Full FSE EPG function across T time points
def FSE_signal_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Same as FSE_signal2_ex, but only returns Mxy"""
return FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1)[0]
def FSE_signal(angles_rad, TE, T1, T2):
"""Same as FSE_signal2, but only returns Mxy"""
return FSE_signal2(angles_rad, TE, T1, T2)[0]
def FSE_signal2(angles_rad, TE, T1, T2):
"""Same as FSE_signal2_ex, but assumes excitation pulse is 90 degrees"""
return FSE_signal2_ex(pi/2., angles_rad, TE, T1, T2)
def FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Simulate Fast Spin-Echo CPMG sequence with specific flip angle train.
Prior to the flip angle train, an excitation pulse of angle_ex_rad degrees
is applied in the Y direction. The flip angle train is then applied in the X direction.
INPUT:
angles_rad = array of flip angles in radians equal to echo train length
TE = echo time/spacing
T1 = T1 value in seconds
T2 = T2 value in seconds
OUTPUT:
Mxy = Transverse magnetization at each echo time
Mz = Longitudinal magnetization at each echo time
"""
T = len(angles_rad)
# Mxy = torch.zeros((T,1)).cuda()
# Mz = torch.zeros((T,1)).cuda()
Mxy = torch.zeros((T,1))
Mz = torch.zeros((T,1))
P = torch.Tensor([[0.],[0.],[1.]]) # initially on Mz
# P = torch.Tensor([[0.],[0.],[1.]]).cuda() # initially on Mz
try:
B1 = B1[0]
except:
pass
# pre-scale by B1 homogeneity
angle_ex_rad = B1 * angle_ex_rad
angles_rad = B1 * angles_rad
P = rf_ex(P, angle_ex_rad) # initial tip
for i in range(T):
alpha = angles_rad[i]
P = FSE_TE(P, alpha, TE, T1, T2)
Mxy[i] = P[0,0]
Mz[i] = P[2,0]
return Mxy, Mz
def SE_sim(angle_ex_rad, angles_rad, TE, T1, T2, TR, B1=1.):
Mxy, Mz = FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.)
par = 1 - torch.exp(-(TR - TE)/T1)
# print(type(Mxy))
# return Mxy * par.float().cuda(), Mz
return Mxy * par.float(), Mz
| jtamir/mri-sim-py | epg/epgcpmg_torch.py | Python | mit | 20,962 | [
"Brian"
] | c9d417c0a083a4371554975f447689bae73ef25d5f08b3bc598c1a440fafb385 |
from __future__ import division
import re
# List of tags in VASP sorted by the data type associated with it
VASP_TAG_INT_LIST = ['ialgo','ibrion','icharg','images','ismear','ispin',\
'istart','isym','lorbit','nbands','ndav','ngx','ngxf',\
'ngy','ngyf','ngz','ngzf','npar','ncore','spind','nsw',\
'isif', 'kpar', 'voskown', 'nsim', 'nedos', 'lmaxfock',\
'lmaxmix', 'nkred','ivdw','nelmin', 'nelm', 'nelmdl',\
'ldautype','ldauprint', 'ldauprint']
VASP_TAG_FLOAT_LIST = ['ediff','ediffg','emax','emin','encut','potim','sigma',\
'enmax','symprec', 'time', 'hfscreen','amix','bmix',\
'amix_mag', 'bmix_mag']
VASP_TAG_BOOL_LIST = ['lcharg','lreal','lsorbit','lwave','lscalapack', 'lscalu',\
'lplane', 'lhfcalc', 'shiftred', 'evenonly', 'oddonly',\
'addgrid', 'ldau', 'lasph']
# Site-wise list of arrays of FLOAT
VASP_TAG_SITEF_LIST = ['magmom','rwigs']
# Species-wise list of arrays of FLOAT
VASP_TAG_SPECF_LIST = ['ldauu', 'ldauj']
# Site-wise list of arrays of INT
VASP_TAG_SPECI_LIST = ['ldaul']
VASP_TAG_STRING_LIST = ['algo','prec','system', 'precfock']
# The master list of VASP tags is a union of the above -> need to allow for 'miscellaneous' ?
VASP_TAG_LIST = VASP_TAG_INT_LIST + VASP_TAG_SITEF_LIST + VASP_TAG_SPECI_LIST + VASP_TAG_BOOL_LIST + VASP_TAG_FLOAT_LIST + VASP_TAG_STRING_LIST + VASP_TAG_SPECF_LIST
class IncarError(Exception):
def __init__(self,msg):
self.msg = msg
def __str__(self):
return self.msg
class Incar:
"""
The INCAR class contains:
tags: a dict of all INCAR settings
All input tags and associated values are stored as key-value pairs in the dicionary called 'tags'.
"""
def __init__(self,filename, species=None, poscar=None, sort=True):
""" Construct an Incar object from 'filename'"""
self.read(filename, species, poscar, sort)
def read(self, filename, species=None, poscar=None, sort=True):
""" Read an INCAR file """
self.tags = dict()
try:
file = open(filename,'r')
except:
raise IncarError("Could not open file: '" + filename + "'")
# parse INCAR into self.tags
for line in file:
line = re.split('=',re.split('#',line)[0])
if len(line) == 2:
self.tags[line[0].strip()] = line[1].strip()
self._verify_tags()
self._make_natural_type()
if species != None:
self.update(species, poscar, sort)
file.close()
def _make_natural_type(self):
""" Convert self.tags values from strings into their 'natural type' (int, float, etc.) """
for tag in self.tags:
if self.tags[tag] == None or str(self.tags[tag]).strip() == "":
self.tags[tag] = None
else:
if tag.lower() in VASP_TAG_INT_LIST:
try:
self.tags[tag] = int(self.tags[tag])
except ValueError:
raise IncarError("Could not convert '" + tag + "' : '" + self.tags[tag] + "' to int")
elif tag.lower() in VASP_TAG_FLOAT_LIST:
try:
self.tags[tag] = float(self.tags[tag].lower().replace('d','e'))
except ValueError:
raise IncarError("Could not convert '" + tag + "' : '" + self.tags[tag] + "' to float")
elif tag.lower() in VASP_TAG_BOOL_LIST:
if not self.tags[tag].lower() in ['.true.','.false.']:
raise IncarError("Could not find '" + tag + "' : '" + self.tags[tag].lower() + "' in ['.true.','.false.']")
else:
self.tags[tag] = (self.tags[tag].lower() == '.true.')
elif tag.lower() in VASP_TAG_SITEF_LIST + VASP_TAG_SPECF_LIST:
temp = []
for value in self.tags[tag].split():
try:
temp.append(float(value))
except ValueError:
raise IncarError("Could not convert '" + tag + "' : '" + self.tags[tag] + "' to float list")
self.tags[tag] = temp
elif tag.lower() in VASP_TAG_SPECI_LIST:
temp = []
for value in self.tags[tag].split():
try:
temp.append(int(value))
except ValueError:
raise IncarError("Could not convert '" + tag + "' : '" + self.tags[tag] + "' to int list")
self.tags[tag] = temp
elif tag.lower() in VASP_TAG_STRING_LIST:
self._check_string_tag(tag,self.tags[tag])
def _check_string_tag(self,tag,value):
""" Check that string-valued tags are allowed values """
if tag.lower() == 'prec':
if value.lower() not in ['low' ,'medium' ,'high' ,'normal' ,'single' ,'accurate']:
raise IncarError("Unknown 'prec' value: '" + value)
elif tag.lower() == 'algo':
if value.lower() not in ['normal','veryfast','fast','conjugate','all','damped','subrot','eigenval','none','nothing','chi','gw0','gw','scgw0','scgw']:
raise IncarError("Unknown 'algo' value: '" + value)
def _verify_tags(self):
""" Check that only allowed INCAR tags are in self.tags """
for tag in self.tags:
if tag.lower() in VASP_TAG_LIST:
continue
else:
print("Warning: unknown INCAR tag '" + tag + "' with value '" + str(self.tags[tag]) + "'")
def update(self, species, poscar, sort=True):
""" Update Incar object to reflect Species settings """
if sort == False:
# for each 'tag' in the IndividualSpecies, create a list in self.tags
for key in species.values()[0].tags.keys():
self.tags[key] = []
if key.lower() in (VASP_TAG_SPECF_LIST + VASP_TAG_SPECI_LIST):
# add the value of the 'tag' for each species into the self.tags list
for spec in poscar.type_atoms_alias:
self.tags[key].append(species[spec].tags[key])
else:
# add the value of the 'tag' for each atom into the self.tags list
for site in poscar.basis:
self.tags[key].append( species[site.occupant].tags[key] )
else:
pos = poscar.basis_dict()
# for each 'tag' in the IndividualSpecies, create a list in self.tags
for key in species.values()[0].tags.keys():
# for key in species[species.keys()[0]].tags.keys():
self.tags[key] = []
# add the value of the 'tag' for each atom into the self.tags list
for alias in sorted(pos.keys()):
if key.lower() in (VASP_TAG_SPECF_LIST + VASP_TAG_SPECI_LIST):
self.tags[key].append(species[alias].tags[key])
else:
for site in pos[alias]:
self.tags[key].append( species[site.occupant].tags[key] )
def write(self, filename):
try:
incar_write = open(filename,'w')
except IOError as e:
raise e
for tag in self.tags:
if self.tags[tag] == None or str(self.tags[tag]).strip() == "":
pass
else:
if tag.lower() in VASP_TAG_SITEF_LIST + VASP_TAG_SPECF_LIST:
incar_write.write('{} = {}\n'.format(tag.upper(),str(self.tags[tag]).translate(None,"[],'")))
elif tag.lower() in VASP_TAG_SPECI_LIST:
incar_write.write('{} = {}\n'.format(tag.upper(),str(self.tags[tag]).translate(None,"[],'")))
elif tag.lower() in VASP_TAG_BOOL_LIST:
if self.tags[tag] == True:
incar_write.write('{} = .TRUE.\n'.format(tag.upper()))
else:
incar_write.write('{} = .FALSE.\n'.format(tag.upper()))
else:
incar_write.write('{} = {}\n'.format(tag.upper(),self.tags[tag]))
incar_write.close()
| jbechtel/CASMcode | python/vasp/vasp/io/incar.py | Python | lgpl-2.1 | 8,493 | [
"VASP"
] | fdecd9a6e6d30923049ba9689d6f582fcf41eecd81ded92d33dc4076ba683334 |
#! /usr/bin/env python3
# -*- coding: utf-8; -*-
# very basic 2D plot using vtkXYPlotActor
import vtk
import math
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def genvalues():
# generate a list of tuples
values = []
for x in drange(0.0, 4*math.pi, 4*math.pi/100):
values.append( (x, math.sin(x)) )
return values
def plot(values):
# convert data from python list of tuples => vtkFieldData
xCoords = vtk.vtkFloatArray()
yCoords = vtk.vtkFloatArray()
xCoords.SetNumberOfTuples(len(values))
yCoords.SetNumberOfTuples(len(values))
for i,v in enumerate(values):
xCoords.SetTuple1(i, v[0])
yCoords.SetTuple1(i, v[1])
curve = vtk.vtkFieldData()
curve.AddArray(xCoords)
curve.AddArray(yCoords)
# create vtkDataObject
plot = vtk.vtkDataObject()
plot.SetFieldData(curve)
# build a vtkXYPlotActor
xyplot = vtk.vtkXYPlotActor()
xyplot.AddDataObjectInput(plot)
#xyplot.SetDataObjectPlotModeToRows()
xyplot.SetDataObjectXComponent(0, 0)
xyplot.SetDataObjectYComponent(0, 1)
xyplot.GetPositionCoordinate().SetValue(0, 0.0, 0)
xyplot.GetPosition2Coordinate().SetValue(1, 1, 0)
xyplot.PlotPointsOn()
xyplot.PlotLinesOn()
xyplot.GetProperty().SetPointSize(5)
#xyplot.SetXRange(0, 100)
#xyplot.SetYRange(0, 20)
xyplot.SetPlotColor(0, 1, 1, 0)
# setup renderer / window / interactor
ren = vtk.vtkRenderer()
ren.SetBackground(0.1, 0.2, 0.4)
ren.AddActor2D(xyplot)
renWin = vtk.vtkRenderWindow()
renWin.SetSize(1000, 800)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
iren.Start()
if __name__=="__main__":
values = genvalues()
plot(values)
| rboman/progs | sandbox/vtk/patte/testplot2d.py | Python | apache-2.0 | 1,847 | [
"VTK"
] | 1cb5582537fd60989eb29f3c5a3e06c63eaaa06f7d23bbced5d20b377f94d8ff |
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
# Excludes oslo.config OptGroup objects
cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(")
vi_header_re = re.compile(r"^#\s+vim?:.+")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
asse_equal_end_with_none_re = re.compile(
r"assertEqual\(.*?,\s+None\)$")
asse_equal_start_with_none_re = re.compile(
r"assertEqual\(None,")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|critical)\(\s*('|\")")
log_translation_info = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_exception = re.compile(
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
opt_help_text_min_char_count = 10
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "firewall",
"disk", "api", "imagecache", "cpu", "hardware",
"image"]:
return None
return driver
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield(0, msg)
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
def assert_equal_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N318
"""
res = (asse_equal_start_with_none_re.search(logical_line) or
asse_equal_end_with_none_re.search(logical_line))
if res:
yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed")
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield(0, "N337 Don't import translation in tests")
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
# and the Xen utilities
if ("nova/tests" in filename or
"plugins/xenserver/xenapi/etc/xapi.d" in filename):
return
if pep8.noqa(physical_line):
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_info.match(logical_line):
yield (0, msg)
msg = "N329: LOG.exception messages require translations `_LE()`!"
if log_translation_exception.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "N321: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def use_jsonutils(logical_line, filename):
# the code below that path is not meant to be executed from neutron
# tree where jsonutils module is present, so don't enforce its usage
# for this subdirectory
if "plugins/xenserver" in filename:
return
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def check_http_not_implemented(logical_line, physical_line, filename):
msg = ("N339: HTTPNotImplemented response must be implemented with"
" common raise_feature_not_supported().")
if pep8.noqa(physical_line):
return
if ("nova/api/openstack/compute/legacy_v2" in filename or
"nova/api/openstack/compute" not in filename):
return
if re.match(http_not_implemented_re, logical_line):
yield(0, msg)
def check_greenthread_spawns(logical_line, physical_line, filename):
"""Check for use of greenthread.spawn(), greenthread.spawn_n(),
eventlet.spawn(), and eventlet.spawn_n()
N340
"""
msg = ("N340: Use nova.utils.%(spawn)s() rather than "
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
if "nova/utils.py" in filename or "nova/tests/" in filename:
return
match = re.match(spawn_re, logical_line)
if match:
yield (0, msg % {'spawn': match.group('spawn_part')})
def check_no_contextlib_nested(logical_line, filename):
msg = ("N341: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information. nova.test.nested() is an alternative as well.")
if contextlib_nested.match(logical_line):
yield(0, msg)
def check_config_option_in_central_place(logical_line, filename):
msg = ("N342: Config options should be in the central location "
"'/nova/conf/*'. Do not declare new config options outside "
"of that folder.")
# That's the correct location
if "nova/conf/" in filename:
return
# TODO(markus_z) This is just temporary until all config options are
# moved to the central place. To avoid that a once cleaned up place
# introduces new config options, we do a check here. This array will
# get quite huge over the time, but will be removed at the end of the
# reorganization.
# You can add the full path to a module or folder. It's just a substring
# check, which makes it flexible enough.
cleaned_up = ["nova/console/serial.py",
"nova/cmd/serialproxy.py",
]
if not any(c in filename for c in cleaned_up):
return
if cfg_opt_re.match(logical_line):
yield(0, msg)
def check_doubled_words(physical_line, filename):
"""Check for the common doubled-word typos
N343
"""
msg = ("N343: Doubled word '%(word)s' typo found")
match = re.search(doubled_words_re, physical_line)
if match:
return (0, msg % {'word': match.group(1)})
def check_python3_no_iteritems(logical_line):
msg = ("N344: Use six.iteritems() instead of dict.iteritems().")
if re.search(r".*\.iteritems\(\)", logical_line):
yield(0, msg)
def check_python3_no_iterkeys(logical_line):
msg = ("N345: Use six.iterkeys() instead of dict.iterkeys().")
if re.search(r".*\.iterkeys\(\)", logical_line):
yield(0, msg)
def check_python3_no_itervalues(logical_line):
msg = ("N346: Use six.itervalues() instead of dict.itervalues().")
if re.search(r".*\.itervalues\(\)", logical_line):
yield(0, msg)
def cfg_help_with_enough_text(logical_line, tokens):
# TODO(markus_z): The count of 10 chars is the *highest* number I could
# use to introduce this new check without breaking the gate. IOW, if I
# use a value of 15 for example, the gate checks will fail because we have
# a few config options which use fewer chars than 15 to explain their
# usage (for example the options "ca_file" and "cert").
# As soon as the implementation of bp centralize-config-options is
# finished, I wanted to increase that magic number to a higher (to be
# defined) value.
# This check is an attempt to programmatically check a part of the review
# guidelines http://docs.openstack.org/developer/nova/code-review.html
msg = ("N347: A config option is a public interface to the cloud admins "
"and should be properly documented. A part of that is to provide "
"enough help text to describe this option. Use at least %s chars "
"for that description. Is is likely that this minimum will be "
"increased in the future." % opt_help_text_min_char_count)
if not cfg_opt_re.match(logical_line):
return
# ignore DeprecatedOpt objects. They get mentioned in the release notes
# and don't need a lengthy help text anymore
if "DeprecatedOpt" in logical_line:
return
def get_token_value(idx):
return tokens[idx][1]
def get_token_values(start_index, length):
values = ""
for offset in range(length):
values += get_token_value(start_index + offset)
return values
def get_help_token_index():
for idx in range(len(tokens)):
if get_token_value(idx) == "help":
return idx
return -1
def has_help():
return get_help_token_index() >= 0
def get_trimmed_help_text(t):
txt = ""
# len(["help", "=", "_", "("]) ==> 4
if get_token_values(t, 4) == "help=_(":
txt = get_token_value(t + 4)
# len(["help", "=", "("]) ==> 3
elif get_token_values(t, 3) == "help=(":
txt = get_token_value(t + 3)
# len(["help", "="]) ==> 2
else:
txt = get_token_value(t + 2)
return " ".join(txt.strip('\"\'').split())
def has_enough_help_text(txt):
return len(txt) >= opt_help_text_min_char_count
if has_help():
t = get_help_token_index()
txt = get_trimmed_help_text(t)
if not has_enough_help_text(txt):
yield(0, msg)
else:
yield(0, msg)
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
register(use_timeutils_utcnow)
register(import_no_virt_driver_import_deps)
register(import_no_virt_driver_config_deps)
register(capital_cfg_help)
register(no_vi_headers)
register(no_import_translation_in_tests)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_raises_regexp)
register(no_translate_debug_logs)
register(no_setting_conf_directly_in_tests)
register(validate_log_translations)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_api_version_decorator)
register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)
register(assert_equal_in)
register(check_http_not_implemented)
register(check_no_contextlib_nested)
register(check_greenthread_spawns)
register(check_config_option_in_central_place)
register(check_doubled_words)
register(check_python3_no_iteritems)
register(check_python3_no_iterkeys)
register(check_python3_no_itervalues)
register(cfg_help_with_enough_text)
| dims/nova | nova/hacking/checks.py | Python | apache-2.0 | 26,556 | [
"VisIt"
] | 106c187c471bca5e8c2c699c84c0a864ca2ed4ce31edf76b535ca945538fb0dc |
#!/usr/bin/python
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===========================================
General utility functions & common includes
===========================================
"""
import sys
from Axon.AxonExceptions import invalidComponentInterface
try:
import Queue
def next(g): # Not built into python 2
return g.next()
vrange = xrange
apply = apply
except: # Built into python 3
next = next
vrange = range
try:
set # Exists in 2.5 & 2.6
except NameError:
from sets import Set as set # Exists in 2.3 onwards, deprecated in 2.6
#"""This sets the system into production moe which means that many exception could be suppressed to allow the system to keep running. Test all code with this set to false so that you are alerted to errors"""
production=False
def logError(someException, *args):
"""\
Currently does nothing but can be rewritten to log ignored errors if the
production value is true.
"""
pass
def axonRaise(someException,*args):
"""\
Raises the supplied exception with the supplied arguments *if*
Axon.util.production is set to True.
"""
if production:
logError(someException, *args)
return False
else:
raise someException(*args)
def removeAll(xs, y):
"""Very simplistic method of removing all occurances of y in list xs."""
try:
while 1:
del xs[xs.index(y)]
# except ValueError, reason: # Not python 3
# except ValueError as reason: # python 3
except ValueError: # Both
reason = sys.exc_info()[1] # Both
if not ("not in list" in reason.__str__()):
raise ValueError(reason)
def listSubset(requiredList, suppliedList):
"""Returns true if the requiredList is a subset of the suppliedList."""
return set(requiredList).issubset(set(suppliedList))
def testInterface(theComponent, interface):
"""Look for a minimal match interface for the component.
The interface should be a tuple of lists, i.e. ([inboxes],[outboxes])."""
(requiredInboxes,requiredOutboxes) = interface
if not listSubset(requiredInboxes, theComponent.Inboxes):
return axonRaise(invalidComponentInterface, "inboxes", theComponent, interface)
if not listSubset(requiredOutboxes, theComponent.Outboxes):
return axonRaise(invalidComponentInterface,"outboxes", theComponent, interface)
return True
def safeList(arg=None):
"""Returns the list version of arg, otherwise returns an empty list."""
try:
return list(arg)
except TypeError:
return []
class Finality(Exception):
"""Used for implementing try...finally... inside a generator."""
pass
| sparkslabs/kamaelia | Sketches/MPS/BugReports/FixTests/Axon/Axon/util.py | Python | apache-2.0 | 3,530 | [
"MOE"
] | df9791ce11af27060472fac9a8faf3a57afd5b0677a9ddb99f4b51770c789357 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import tests_common
import numpy as np
import espressomd.magnetostatics
import espressomd.magnetostatic_extensions
@utx.skipIfMissingFeatures(["DP3M"])
class MagnetostaticsP3M(ut.TestCase):
system = espressomd.System(box_l=3 * [10.])
def setUp(self):
self.partcls = self.system.part.add(pos=[[4.0, 2.0, 2.0], [6.0, 2.0, 2.0]],
dip=[(1.3, 2.1, -6.0), (7.3, 6.1, -4.0)])
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
if espressomd.has_features("DP3M"):
test_DP3M = tests_common.generate_test_for_class(
system, espressomd.magnetostatics.DipolarP3M,
dict(prefactor=1., epsilon=0., mesh_off=[0.5, 0.5, 0.5], r_cut=2.4,
cao=1, mesh=[8, 8, 8], alpha=12, accuracy=0.01, tune=False))
def test_dp3m(self):
self.system.time_step = 0.01
prefactor = 1.1
box_vol = self.system.volume()
p1, p2 = self.partcls
dip = np.copy(p1.dip + p2.dip)
dp3m_params = {'accuracy': 1e-6,
'mesh': [49, 49, 49],
'cao': 7,
'r_cut': 4.739799499511719,
'alpha': 0.9056147262573242}
mdlc_params = {'maxPWerror': 1e-5, 'gap_size': 5.}
# reference values for energy and force calculated for prefactor = 1.1
ref_dp3m_energy = 1.673333
ref_dp3m_force = np.array([-3.54175042, -4.6761059, 9.96632774])
ref_dp3m_torque1 = np.array([-3.29316117, -13.21245739, -5.33787892])
ref_dp3m_torque2 = np.array([3.98103932, -7.47123148, -4.12823244])
# check metallic case
dp3m = espressomd.magnetostatics.DipolarP3M(
prefactor=prefactor, epsilon='metallic', tune=False, **dp3m_params)
self.system.actors.add(dp3m)
self.system.integrator.run(0, recalc_forces=True)
energy = self.system.analysis.energy()['dipolar']
tol = 1e-5
np.testing.assert_allclose(energy, ref_dp3m_energy, atol=tol)
np.testing.assert_allclose(np.copy(p1.f), ref_dp3m_force, atol=tol)
np.testing.assert_allclose(np.copy(p2.f), -ref_dp3m_force, atol=tol)
np.testing.assert_allclose(
np.copy(p1.convert_vector_space_to_body(p1.torque_lab)),
ref_dp3m_torque1, atol=tol)
np.testing.assert_allclose(
np.copy(p2.convert_vector_space_to_body(p2.torque_lab)),
ref_dp3m_torque2, atol=tol)
# keep current values as reference to check for DP3M dipole correction
ref_dp3m_energy_metallic = self.system.analysis.energy()['dipolar']
ref_dp3m_forces_metallic = np.copy(self.partcls.f)
ref_dp3m_torque_metallic = np.array([
p1.convert_vector_space_to_body(p1.torque_lab),
p2.convert_vector_space_to_body(p2.torque_lab)])
# MDLC cancels out dipole correction
mdlc = espressomd.magnetostatic_extensions.DLC(**mdlc_params)
self.system.actors.add(mdlc)
# keep current values as reference to check for MDLC dipole correction
self.system.integrator.run(0, recalc_forces=True)
ref_mdlc_energy_metallic = self.system.analysis.energy()['dipolar']
ref_mdlc_forces_metallic = np.copy(self.partcls.f)
ref_mdlc_torque_metallic = np.copy(self.partcls.torque_lab)
self.system.actors.clear()
# check non-metallic case
tol = 1e-10
for epsilon in np.power(10., np.arange(-4, 5)):
dipole_correction = 4 * np.pi / box_vol / (1 + 2 * epsilon)
e_correction = dipole_correction / 2 * np.linalg.norm(dip)**2
t_correction = np.cross([p1.dip, p2.dip], dipole_correction * dip)
ref_dp3m_energy = ref_dp3m_energy_metallic + prefactor * e_correction
ref_dp3m_forces = ref_dp3m_forces_metallic
ref_dp3m_torque = ref_dp3m_torque_metallic - prefactor * t_correction
dp3m = espressomd.magnetostatics.DipolarP3M(
prefactor=prefactor, epsilon=epsilon, tune=False, **dp3m_params)
self.system.actors.add(dp3m)
self.system.integrator.run(0, recalc_forces=True)
dp3m_forces = np.copy(self.partcls.f)
dp3m_torque = np.array([
p1.convert_vector_space_to_body(p1.torque_lab),
p2.convert_vector_space_to_body(p2.torque_lab)])
dp3m_energy = self.system.analysis.energy()['dipolar']
np.testing.assert_allclose(dp3m_forces, ref_dp3m_forces, atol=tol)
np.testing.assert_allclose(dp3m_torque, ref_dp3m_torque, atol=tol)
np.testing.assert_allclose(dp3m_energy, ref_dp3m_energy, atol=tol)
# MDLC cancels out dipole correction
ref_mdlc_energy = ref_mdlc_energy_metallic
ref_mdlc_forces = ref_mdlc_forces_metallic
ref_mdlc_torque = ref_mdlc_torque_metallic
mdlc = espressomd.magnetostatic_extensions.DLC(**mdlc_params)
self.system.actors.add(mdlc)
self.system.integrator.run(0, recalc_forces=True)
mdlc_forces = np.copy(self.partcls.f)
mdlc_torque = np.copy(self.partcls.torque_lab)
mdlc_energy = self.system.analysis.energy()['dipolar']
np.testing.assert_allclose(mdlc_forces, ref_mdlc_forces, atol=tol)
np.testing.assert_allclose(mdlc_torque, ref_mdlc_torque, atol=tol)
np.testing.assert_allclose(mdlc_energy, ref_mdlc_energy, atol=tol)
self.system.actors.clear()
if __name__ == "__main__":
ut.main()
| pkreissl/espresso | testsuite/python/dipolar_p3m.py | Python | gpl-3.0 | 6,393 | [
"ESPResSo"
] | 5fe761ef5372cc2fc45a79814a6dd546c6c9e97c8183f7284ced72b9221912e7 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest
import os
import numpy as np
from pymatgen.core.sites import PeriodicSite
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer, \
PointGroupAnalyzer, cluster_sites, iterative_symmetrize
from pymatgen.io.cif import CifParser
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.structure import Molecule, Structure
"""
Created on Mar 9, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 9, 2012"
test_dir_mol = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class SpacegroupAnalyzerTest(PymatgenTest):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.structure
self.sg = SpacegroupAnalyzer(self.structure, 0.001)
self.disordered_structure = self.get_structure('Li10GeP2S12')
self.disordered_sg = SpacegroupAnalyzer(self.disordered_structure, 0.001)
s = p.structure.copy()
site = s[0]
del s[0]
s.append(site.species_and_occu, site.frac_coords)
self.sg3 = SpacegroupAnalyzer(s, 0.001)
graphite = self.get_structure('Graphite')
graphite.add_site_property("magmom", [0.1] * len(graphite))
self.sg4 = SpacegroupAnalyzer(graphite, 0.001)
self.structure4 = graphite
def test_primitive(self):
s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"],
[[0, 0, 0]])
a = SpacegroupAnalyzer(s)
self.assertEqual(len(s), 4)
self.assertEqual(len(a.find_primitive()), 1)
def test_is_laue(self):
s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"],
[[0, 0, 0]])
a = SpacegroupAnalyzer(s)
self.assertTrue(a.is_laue())
def test_magnetic(self):
lfp = PymatgenTest.get_structure("LiFePO4")
sg = SpacegroupAnalyzer(lfp, 0.1)
self.assertEqual(sg.get_space_group_symbol(), "Pnma")
magmoms = [0] * len(lfp)
magmoms[4] = 1
magmoms[5] = -1
magmoms[6] = 1
magmoms[7] = -1
lfp.add_site_property("magmom", magmoms)
sg = SpacegroupAnalyzer(lfp, 0.1)
self.assertEqual(sg.get_space_group_symbol(), "Pnma")
def test_get_space_symbol(self):
self.assertEqual(self.sg.get_space_group_symbol(), "Pnma")
self.assertEqual(self.disordered_sg.get_space_group_symbol(),
"P4_2/nmc")
self.assertEqual(self.sg3.get_space_group_symbol(), "Pnma")
self.assertEqual(self.sg4.get_space_group_symbol(), "P6_3/mmc")
def test_get_space_number(self):
self.assertEqual(self.sg.get_space_group_number(), 62)
self.assertEqual(self.disordered_sg.get_space_group_number(), 137)
self.assertEqual(self.sg4.get_space_group_number(), 194)
def test_get_hall(self):
self.assertEqual(self.sg.get_hall(), '-P 2ac 2n')
self.assertEqual(self.disordered_sg.get_hall(), 'P 4n 2n -1n')
def test_get_pointgroup(self):
self.assertEqual(self.sg.get_point_group_symbol(), 'mmm')
self.assertEqual(self.disordered_sg.get_point_group_symbol(), '4/mmm')
def test_get_symmetry_dataset(self):
ds = self.sg.get_symmetry_dataset()
self.assertEqual(ds['international'], 'Pnma')
def test_get_crystal_system(self):
crystal_system = self.sg.get_crystal_system()
self.assertEqual('orthorhombic', crystal_system)
self.assertEqual('tetragonal', self.disordered_sg.get_crystal_system())
def test_get_symmetry_operations(self):
for sg, structure in [(self.sg, self.structure),
(self.sg4, self.structure4)]:
pgops = sg.get_point_group_operations()
fracsymmops = sg.get_symmetry_operations()
symmops = sg.get_symmetry_operations(True)
latt = structure.lattice
for fop, op, pgop in zip(fracsymmops, symmops, pgops):
# translation vector values should all be 0 or 0.5
t = fop.translation_vector * 2
self.assertArrayAlmostEqual(t - np.round(t), 0)
self.assertArrayAlmostEqual(fop.rotation_matrix,
pgop.rotation_matrix)
for site in structure:
newfrac = fop.operate(site.frac_coords)
newcart = op.operate(site.coords)
self.assertTrue(np.allclose(latt.get_fractional_coords(newcart),
newfrac))
found = False
newsite = PeriodicSite(site.species_and_occu, newcart, latt,
coords_are_cartesian=True)
for testsite in structure:
if newsite.is_periodic_image(testsite, 1e-3):
found = True
break
self.assertTrue(found)
# Make sure this works for any position, not just the atomic
# ones.
random_fcoord = np.random.uniform(size=(3))
random_ccoord = latt.get_cartesian_coords(random_fcoord)
newfrac = fop.operate(random_fcoord)
newcart = op.operate(random_ccoord)
self.assertTrue(np.allclose(latt.get_fractional_coords(newcart),
newfrac))
def test_get_refined_structure(self):
for a in self.sg.get_refined_structure().lattice.angles:
self.assertEqual(a, 90)
refined = self.disordered_sg.get_refined_structure()
for a in refined.lattice.angles:
self.assertEqual(a, 90)
self.assertEqual(refined.lattice.a, refined.lattice.b)
s = self.get_structure('Li2O')
sg = SpacegroupAnalyzer(s, 0.01)
self.assertEqual(sg.get_refined_structure().num_sites, 4 * s.num_sites)
def test_get_symmetrized_structure(self):
symm_struct = self.sg.get_symmetrized_structure()
for a in symm_struct.lattice.angles:
self.assertEqual(a, 90)
self.assertEqual(len(symm_struct.equivalent_sites), 5)
symm_struct = self.disordered_sg.get_symmetrized_structure()
self.assertEqual(len(symm_struct.equivalent_sites), 8)
self.assertEqual([len(i) for i in symm_struct.equivalent_sites],
[16,4,8,4,2,8,8,8])
s1 = symm_struct.equivalent_sites[1][1]
s2 = symm_struct[symm_struct.equivalent_indices[1][1]]
self.assertEqual(s1, s2)
self.assertEqual(self.sg4.get_symmetrized_structure()[0].magmom, 0.1)
self.assertEqual(symm_struct.wyckoff_symbols[0], '16h')
# self.assertEqual(symm_struct[0].wyckoff, "16h")
def test_find_primitive(self):
"""
F m -3 m Li2O testing of converting to primitive cell
"""
parser = CifParser(os.path.join(test_dir, 'Li2O.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure)
primitive_structure = s.find_primitive()
self.assertEqual(primitive_structure.formula, "Li2 O1")
# This isn't what is expected. All the angles should be 60
self.assertAlmostEqual(primitive_structure.lattice.alpha, 60)
self.assertAlmostEqual(primitive_structure.lattice.beta, 60)
self.assertAlmostEqual(primitive_structure.lattice.gamma, 60)
self.assertAlmostEqual(primitive_structure.lattice.volume,
structure.lattice.volume / 4.0)
def test_get_ir_reciprocal_mesh(self):
grid = self.sg.get_ir_reciprocal_mesh()
self.assertEqual(len(grid), 216)
self.assertAlmostEqual(grid[1][0][0], 0.1)
self.assertAlmostEqual(grid[1][0][1], 0.0)
self.assertAlmostEqual(grid[1][0][2], 0.0)
self.assertAlmostEqual(grid[1][1], 2)
def test_get_conventional_standard_structure(self):
parser = CifParser(os.path.join(test_dir, 'bcc_1927.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 9.1980270633769461)
self.assertAlmostEqual(conv.lattice.b, 9.1980270633769461)
self.assertAlmostEqual(conv.lattice.c, 9.1980270633769461)
parser = CifParser(os.path.join(test_dir, 'btet_1915.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 5.0615106678044235)
self.assertAlmostEqual(conv.lattice.b, 5.0615106678044235)
self.assertAlmostEqual(conv.lattice.c, 4.2327080177761687)
parser = CifParser(os.path.join(test_dir, 'orci_1010.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 2.9542233922299999)
self.assertAlmostEqual(conv.lattice.b, 4.6330325651443296)
self.assertAlmostEqual(conv.lattice.c, 5.373703587040775)
parser = CifParser(os.path.join(test_dir, 'orcc_1003.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 4.1430033493799998)
self.assertAlmostEqual(conv.lattice.b, 31.437979757624728)
self.assertAlmostEqual(conv.lattice.c, 3.99648651)
parser = CifParser(os.path.join(test_dir, 'orac_632475.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 3.1790663399999999)
self.assertAlmostEqual(conv.lattice.b, 9.9032878699999998)
self.assertAlmostEqual(conv.lattice.c, 3.5372412099999999)
parser = CifParser(os.path.join(test_dir, 'monoc_1028.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 117.53832420192903)
self.assertAlmostEqual(conv.lattice.gamma, 90)
self.assertAlmostEqual(conv.lattice.a, 14.033435583000625)
self.assertAlmostEqual(conv.lattice.b, 3.96052850731)
self.assertAlmostEqual(conv.lattice.c, 6.8743926325200002)
parser = CifParser(os.path.join(test_dir, 'hex_1170.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
conv = s.get_conventional_standard_structure()
self.assertAlmostEqual(conv.lattice.alpha, 90)
self.assertAlmostEqual(conv.lattice.beta, 90)
self.assertAlmostEqual(conv.lattice.gamma, 120)
self.assertAlmostEqual(conv.lattice.a, 3.699919902005897)
self.assertAlmostEqual(conv.lattice.b, 3.699919902005897)
self.assertAlmostEqual(conv.lattice.c, 6.9779585500000003)
def test_get_primitive_standard_structure(self):
parser = CifParser(os.path.join(test_dir, 'bcc_1927.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 109.47122063400001)
self.assertAlmostEqual(prim.lattice.beta, 109.47122063400001)
self.assertAlmostEqual(prim.lattice.gamma, 109.47122063400001)
self.assertAlmostEqual(prim.lattice.a, 7.9657251015812145)
self.assertAlmostEqual(prim.lattice.b, 7.9657251015812145)
self.assertAlmostEqual(prim.lattice.c, 7.9657251015812145)
parser = CifParser(os.path.join(test_dir, 'btet_1915.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 105.015053349)
self.assertAlmostEqual(prim.lattice.beta, 105.015053349)
self.assertAlmostEqual(prim.lattice.gamma, 118.80658411899999)
self.assertAlmostEqual(prim.lattice.a, 4.1579321075608791)
self.assertAlmostEqual(prim.lattice.b, 4.1579321075608791)
self.assertAlmostEqual(prim.lattice.c, 4.1579321075608791)
parser = CifParser(os.path.join(test_dir, 'orci_1010.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 134.78923546600001)
self.assertAlmostEqual(prim.lattice.beta, 105.856239333)
self.assertAlmostEqual(prim.lattice.gamma, 91.276341676000001)
self.assertAlmostEqual(prim.lattice.a, 3.8428217771014852)
self.assertAlmostEqual(prim.lattice.b, 3.8428217771014852)
self.assertAlmostEqual(prim.lattice.c, 3.8428217771014852)
parser = CifParser(os.path.join(test_dir, 'orcc_1003.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 90)
self.assertAlmostEqual(prim.lattice.beta, 90)
self.assertAlmostEqual(prim.lattice.gamma, 164.985257335)
self.assertAlmostEqual(prim.lattice.a, 15.854897098324196)
self.assertAlmostEqual(prim.lattice.b, 15.854897098324196)
self.assertAlmostEqual(prim.lattice.c, 3.99648651)
parser = CifParser(os.path.join(test_dir, 'orac_632475.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 90)
self.assertAlmostEqual(prim.lattice.beta, 90)
self.assertAlmostEqual(prim.lattice.gamma, 144.40557588533386)
self.assertAlmostEqual(prim.lattice.a, 5.2005185662155391)
self.assertAlmostEqual(prim.lattice.b, 5.2005185662155391)
self.assertAlmostEqual(prim.lattice.c, 3.5372412099999999)
parser = CifParser(os.path.join(test_dir, 'monoc_1028.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 63.579155761999999)
self.assertAlmostEqual(prim.lattice.beta, 116.42084423747779)
self.assertAlmostEqual(prim.lattice.gamma, 148.47965136208569)
self.assertAlmostEqual(prim.lattice.a, 7.2908007159612325)
self.assertAlmostEqual(prim.lattice.b, 7.2908007159612325)
self.assertAlmostEqual(prim.lattice.c, 6.8743926325200002)
parser = CifParser(os.path.join(test_dir, 'hex_1170.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 90)
self.assertAlmostEqual(prim.lattice.beta, 90)
self.assertAlmostEqual(prim.lattice.gamma, 120)
self.assertAlmostEqual(prim.lattice.a, 3.699919902005897)
self.assertAlmostEqual(prim.lattice.b, 3.699919902005897)
self.assertAlmostEqual(prim.lattice.c, 6.9779585500000003)
parser = CifParser(os.path.join(test_dir, 'rhomb_3478_conv.cif'))
structure = parser.get_structures(False)[0]
s = SpacegroupAnalyzer(structure, symprec=1e-2)
prim = s.get_primitive_standard_structure()
self.assertAlmostEqual(prim.lattice.alpha, 28.049186140546812)
self.assertAlmostEqual(prim.lattice.beta, 28.049186140546812)
self.assertAlmostEqual(prim.lattice.gamma, 28.049186140546812)
self.assertAlmostEqual(prim.lattice.a, 5.9352627428399982)
self.assertAlmostEqual(prim.lattice.b, 5.9352627428399982)
self.assertAlmostEqual(prim.lattice.c, 5.9352627428399982)
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.structure
self.sg1 = SpacegroupAnalyzer(self.structure,
0.001).get_space_group_operations()
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0, 1]]
sites2 = [self.structure[i] for i in [2, 3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2,
1e-3))
sites1 = [self.structure[i] for i in [0, 1]]
sites2 = [self.structure[i] for i in [0, 2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2,
1e-3))
H2O2 = Molecule(["O", "O", "H", "H"],
[[0, 0.727403, -0.050147], [0, -0.727403, -0.050147],
[0.83459, 0.897642, 0.401175],
[-0.83459, -0.897642, 0.401175]])
C2H2F2Br2 = Molecule(["C", "C", "F", "Br", "H", "F", "H", "Br"],
[[-0.752000, 0.001000, -0.141000],
[0.752000, -0.001000, 0.141000],
[-1.158000, 0.991000, 0.070000],
[-1.240000, -0.737000, 0.496000],
[-0.924000, -0.249000, -1.188000],
[1.158000, -0.991000, -0.070000],
[0.924000, 0.249000, 1.188000],
[1.240000, 0.737000, -0.496000]])
H2O = Molecule(["H", "O", "H"],
[[0, 0.780362, -.456316], [0, 0, .114079],
[0, -.780362, -.456316]])
C2H4 = Molecule(["C", "C", "H", "H", "H", "H"],
[[0.0000, 0.0000, 0.6695], [0.0000, 0.0000, -0.6695],
[0.0000, 0.9289, 1.2321], [0.0000, -0.9289, 1.2321],
[0.0000, 0.9289, -1.2321], [0.0000, -0.9289, -1.2321]])
NH3 = Molecule(["N", "H", "H", "H"],
[[0.0000, 0.0000, 0.0000], [0.0000, -0.9377, -0.3816],
[0.8121, 0.4689, -0.3816], [-0.8121, 0.4689, -0.3816]])
BF3 = Molecule(["B", "F", "F", "F"],
[[0.0000, 0.0000, 0.0000], [0.0000, -0.9377, 0.00],
[0.8121, 0.4689, 0], [-0.8121, 0.4689, 0]])
CH4 = Molecule(["C", "H", "H", "H", "H"], [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.08],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]])
PF6 = Molecule(["P", "F", "F", "F", "F", "F", "F"],
[[0, 0, 0], [0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0],
[1, 0, 0], [-1, 0, 0]])
class PointGroupAnalyzerTest(PymatgenTest):
def test_spherical(self):
a = PointGroupAnalyzer(CH4)
self.assertEqual(a.sch_symbol, "Td")
self.assertEqual(len(a.get_pointgroup()), 24)
a = PointGroupAnalyzer(PF6)
self.assertEqual(a.sch_symbol, "Oh")
self.assertEqual(len(a.get_pointgroup()), 48)
m = Molecule.from_file(os.path.join(test_dir_mol, "c60.xyz"))
a = PointGroupAnalyzer(m)
self.assertEqual(a.sch_symbol, "Ih")
cube_species = ["C", "C", "C", "C", "C", "C", "C", "C"]
cube_coords = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1],
[0, 1, 1], [1, 0, 1], [1, 1, 1]]
m = Molecule(cube_species, cube_coords)
a = PointGroupAnalyzer(m, 0.1)
self.assertEqual(a.sch_symbol, "Oh")
def test_linear(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.08],
[0, 0.000000, -1.08]]
mol = Molecule(["C", "H", "H"], coords)
a = PointGroupAnalyzer(mol)
self.assertEqual(a.sch_symbol, "D*h")
mol = Molecule(["C", "H", "N"], coords)
a = PointGroupAnalyzer(mol)
self.assertEqual(a.sch_symbol, "C*v")
def test_asym_top(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.08],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
mol = Molecule(["C", "H", "F", "Br", "Cl"], coords)
a = PointGroupAnalyzer(mol)
self.assertEqual(a.sch_symbol, "C1")
self.assertEqual(len(a.get_pointgroup()), 1)
coords = [[0.000000, 0.000000, 1.08],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
cs_mol = Molecule(["H", "F", "Cl", "Cl"], coords)
a = PointGroupAnalyzer(cs_mol)
self.assertEqual(a.sch_symbol, "Cs")
self.assertEqual(len(a.get_pointgroup()), 2)
a = PointGroupAnalyzer(C2H2F2Br2)
self.assertEqual(a.sch_symbol, "Ci")
self.assertEqual(len(a.get_pointgroup()), 2)
def test_cyclic(self):
a = PointGroupAnalyzer(H2O2)
self.assertEqual(a.sch_symbol, "C2")
self.assertEqual(len(a.get_pointgroup()), 2)
a = PointGroupAnalyzer(H2O)
self.assertEqual(a.sch_symbol, "C2v")
self.assertEqual(len(a.get_pointgroup()), 4)
a = PointGroupAnalyzer(NH3)
self.assertEqual(a.sch_symbol, "C3v")
self.assertEqual(len(a.get_pointgroup()), 6)
cs2 = Molecule.from_file(os.path.join(test_dir_mol,
"Carbon_Disulfide.xyz"))
a = PointGroupAnalyzer(cs2, eigen_tolerance=0.001)
self.assertEqual(a.sch_symbol, "C2v")
def test_dihedral(self):
a = PointGroupAnalyzer(C2H4)
self.assertEqual(a.sch_symbol, "D2h")
self.assertEqual(len(a.get_pointgroup()), 8)
a = PointGroupAnalyzer(BF3)
self.assertEqual(a.sch_symbol, "D3h")
self.assertEqual(len(a.get_pointgroup()), 12)
m = Molecule.from_file(os.path.join(test_dir_mol, "b12h12.xyz"))
a = PointGroupAnalyzer(m)
self.assertEqual(a.sch_symbol, "Ih")
def test_symmetrize_molecule1(self):
np.random.seed(77)
distortion = np.random.randn(len(C2H4), 3) / 10
dist_mol = Molecule(C2H4.species, C2H4.cart_coords + distortion)
eq = iterative_symmetrize(dist_mol, max_n=100, epsilon=1e-7)
sym_mol, eq_sets, ops = eq['sym_mol'], eq['eq_sets'], eq['sym_ops']
self.assertTrue({0, 1} in eq_sets.values())
self.assertTrue({2, 3, 4, 5} in eq_sets.values())
coords = sym_mol.cart_coords
for i, eq_set in eq_sets.items():
for j in eq_set:
rotated = np.dot(ops[i][j], coords[i])
self.assertTrue(
np.allclose(np.dot(ops[i][j], coords[i]), coords[j]))
def test_symmetrize_molecule2(self):
np.random.seed(77)
distortion = np.random.randn(len(C2H2F2Br2), 3) / 20
dist_mol = Molecule(C2H2F2Br2.species,
C2H2F2Br2.cart_coords + distortion)
PA1 = PointGroupAnalyzer(C2H2F2Br2, tolerance=0.1)
self.assertTrue(PA1.get_pointgroup().sch_symbol == 'Ci')
PA2 = PointGroupAnalyzer(dist_mol, tolerance=0.1)
self.assertTrue(PA2.get_pointgroup().sch_symbol == 'C1')
eq = iterative_symmetrize(dist_mol, tolerance=0.3)
PA3 = PointGroupAnalyzer(eq['sym_mol'], tolerance=0.1)
self.assertTrue(PA3.get_pointgroup().sch_symbol == 'Ci')
def test_tricky_structure(self):
# for some reason this structure kills spglib1.9
# 1.7 can't find symmetry either, but at least doesn't kill python
s = Structure.from_file(os.path.join(test_dir, 'POSCAR.tricky_symmetry'))
sa = SpacegroupAnalyzer(s, 0.1)
sa.get_space_group_symbol()
sa.get_space_group_number()
sa.get_point_group_symbol()
sa.get_crystal_system()
sa.get_hall()
def test_get_kpoint_weights(self):
for name in ["SrTiO3", "LiFePO4", "Graphite"]:
s = PymatgenTest.get_structure(name)
a = SpacegroupAnalyzer(s)
ir_mesh = a.get_ir_reciprocal_mesh((4, 4, 4))
weights = [i[1] for i in ir_mesh]
weights = np.array(weights) / sum(weights)
for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in
ir_mesh])):
self.assertAlmostEqual(i, w)
for name in ["SrTiO3", "LiFePO4", "Graphite"]:
s = PymatgenTest.get_structure(name)
a = SpacegroupAnalyzer(s)
ir_mesh = a.get_ir_reciprocal_mesh((1, 2, 3))
weights = [i[1] for i in ir_mesh]
weights = np.array(weights) / sum(weights)
for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in
ir_mesh])):
self.assertAlmostEqual(i, w)
v = Vasprun(os.path.join(test_dir, "vasprun.xml"))
a = SpacegroupAnalyzer(v.final_structure)
wts = a.get_kpoint_weights(v.actual_kpoints)
for w1, w2 in zip(v.actual_kpoints_weights, wts):
self.assertAlmostEqual(w1, w2)
kpts = [[0, 0, 0], [0.15, 0.15, 0.15], [0.2, 0.2, 0.2]]
self.assertRaises(ValueError, a.get_kpoint_weights, kpts)
class FuncTest(unittest.TestCase):
def test_cluster_sites(self):
o, c = cluster_sites(CH4, 0.1)
self.assertEqual(o.specie.symbol, "C")
self.assertEqual(len(c), 1)
o, c = cluster_sites(C2H2F2Br2.get_centered_molecule(), 0.1)
self.assertIsNone(o)
self.assertEqual(len(c), 4)
if __name__ == "__main__":
unittest.main()
| matk86/pymatgen | pymatgen/symmetry/tests/test_analyzer.py | Python | mit | 27,586 | [
"VASP",
"pymatgen"
] | 7ce9a4d2f09abfe2c5d1d38c8f612ea02a8a647f811fe6b101e0850e3610ae6b |
# S. Ponce: Ziman formula for resistivity of Pb.
# Script to compute the resistivity
import numpy as np
import matplotlib.pyplot as P
from numpy.linalg import inv
from scipy.interpolate import spline
from pylab import *
import matplotlib.ticker as ticker
from scipy.optimize import curve_fit
# Constants
kelvin2eV= 8.6173427909E-05;
kelvin2Ry = 6.333627859634130e-06;
rhoaum = 2.2999241E6;
meV2Ha = 0.000036749;
ohm2microohmcm = 1E4;
size = 16;
cm2mev = 0.12398 ;
Thz2meV = 4.13567;
ry2ev = 13.605698066 ;
meV2ry = (1.0/(ry2ev*1000));
meV2eV = 0.001;
kB = 6.333620222466232e-06 # Ry/K
rc('axes', linewidth=2)
# ----------------------------------
# a2F_tr(1) = \omega in meV from 0-10 meV
# 4000000 k-point wo SOC with 25000 q-points RND and gaussian of 20meV
a2F = np.loadtxt('pb.a2f_400kSobol_02_tr.04',dtype='float',comments='#')
# meV to ev
a2F[:,0] = a2F[:,0]*meV2ry;
resistivity = [];
for tt in np.arange(1,601):
T = tt*kelvin2Ry;
n = 1.0/(np.exp(a2F[:,0]/T)-1);
func = a2F[:,0]*a2F[:,1]*n*(1+n);
int = np.trapz(func,a2F[:,0]);
resistivity.append((196.1075/2)*(pi/T)*int*(1/rhoaum)*1E8);
tt = np.linspace(1,600,600);
P.plot(tt,resistivity,linewidth=2, color='blue',label='Present work without SOC');
a2F = np.loadtxt('pb.a2f_400kSobol_02_tr.04_wSOC',dtype='float',comments='#')
# meV to ev
a2F[:,0] = a2F[:,0]*meV2ry;
resistivity = [];
for tt in np.arange(1,601):
T = tt*kelvin2Ry;
n = 1.0/(np.exp(a2F[:,0]/T)-1);
func = a2F[:,0]*a2F[:,1]*n*(1+n);
int = np.trapz(func,a2F[:,0]);
resistivity.append((196.1075/2)*(pi/T)*int*(1/rhoaum)*1E8);
tt = np.linspace(1,600,600);
P.plot(tt,resistivity,linewidth=2, color='red',label='Present work with SOC');
####################################################
# Experimental data in micro Ohm per cm
# 63Al2 ref
T1 = np.array([14,20.4,58,77.4,90.31]);
r1 = np.array([0.02,0.560,3.47,4.81,5.69]);
# 73 Mo 1 corrected for thermal expansion
T2 = np.array([80,100,120,140,160,180,200,220,240,260,280,300,320,340,360,380,400]);
r2 = np.array([4.92,6.349,7.78,9.222,10.678,12.152,13.639,15.143,16.661,18.196,19.758,21.35,22.985,24.656,26.358,28.073,29.824]);
# 74 Co1
T3 = np.array([260,273.15,300,350,400,450,500,550]);
r3 = np.array([18.173,19.196,21.308,25.336,29.506,33.832,38.336,43.031]);
# 66 Le 1
T4 = np.array([291.51,367.31,376.97,385.78,407.30,416.20,435.37,454.61,495.61,522.24,541.9,558.86,577.75,585.39,592.14,594.30]);
r4 = np.array([20.75,26.94,27.77,28.53,30.35,31.12,32.76,34.53,38.19,40.64,42.50,44.13,45.98,46.73,47.40,47.62]);
P.plot(T1,r1,'.',markersize=20, color='black',label='Hellwege et al.');
P.plot(T2,r2,'s',markersize=5,markerfacecolor='red', color='red',label='Moore et al.');
P.plot(T3,r3,'d',markersize=5,color='green',label='Cook et al.');
P.plot(T4,r4,'*',markersize=5,color='cyan',label='Leadbetter et al.');
############################
P.xlim([0,600])
P.ylim([0,50])
P.xticks(fontsize=size)
P.yticks(fontsize=size)
P.ylabel('$\rho(\mu\Omega$ cm)',fontsize=size)
P.xlabel('T (K)',fontsize=size)
P.legend(loc=2)
P.rc('text',usetex = True)
#P.grid('on')
P.show()
| mmdg-oxford/papers | Ponce-CPC-2016/Pb/resistivity.py | Python | gpl-3.0 | 3,086 | [
"Gaussian"
] | 9b5dcdc9aa931766f7b6fb864ba98db555a93fd56822fb120dce642f43b742e9 |
import sys
import subprocess
from .exceptions import PyperclipException
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit https://pyperclip.readthedocs.org """
PY2 = sys.version_info[0] == 2
text_type = str if PY2 else str
def init_osx_clipboard():
def copy_osx(text):
p = subprocess.Popen(['pbcopy', 'w'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_osx():
p = subprocess.Popen(['pbpaste', 'r'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_osx, paste_osx
def init_gtk_clipboard():
import gtk
def copy_gtk(text):
global cb
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def paste_gtk():
clipboardContents = gtk.Clipboard().wait_for_text()
# for python 2, returns None if the clipboard is blank.
if clipboardContents is None:
return ''
else:
return clipboardContents
return copy_gtk, paste_gtk
def init_qt_clipboard():
# $DISPLAY should exist
from PyQt4.QtGui import QApplication
app = QApplication([])
def copy_qt(text):
cb = app.clipboard()
cb.setText(text)
def paste_qt():
cb = app.clipboard()
return text_type(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
def copy_xclip(text):
p = subprocess.Popen(['xclip', '-selection', 'c'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_xclip():
p = subprocess.Popen(['xclip', '-selection', 'c', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_xclip, paste_xclip
def init_xsel_clipboard():
def copy_xsel(text):
p = subprocess.Popen(['xsel', '-b', '-i'],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text)
def paste_xsel():
p = subprocess.Popen(['xsel', '-b', '-o'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents',
text],
stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'],
stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith('\n')
if clipboardContents.endswith('\n'):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_no_clipboard():
class ClipboardUnavailable(object):
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
if PY2:
def __nonzero__(self):
return False
else:
def __bool__(self):
return False
return ClipboardUnavailable(), ClipboardUnavailable()
| tvaddonsco/tva-release-repo | matrix/plugin.video.realizerx/resources/lib/modules/pyperclip/clipboards.py | Python | gpl-3.0 | 3,970 | [
"VisIt"
] | e7fa11b5e8e6791bda67881ce18f4d590c89dd747fec4a6df57b2f3082d726d6 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Geant4(CMakePackage):
"""Geant4 is a toolkit for the simulation of the passage of particles
through matter. Its areas of application include high energy, nuclear
and accelerator physics, as well as studies in medical and space
science."""
homepage = "http://geant4.cern.ch/"
url = "https://gitlab.cern.ch/geant4/geant4/-/archive/v10.7.1/geant4-v10.7.1.tar.gz"
tags = ['hep']
maintainers = ['drbenmorgan']
version('11.0.0', sha256='04d11d4d9041507e7f86f48eb45c36430f2b6544a74c0ccaff632ac51d9644f1')
version('10.7.3', sha256='8615d93bd4178d34f31e19d67bc81720af67cdab1c8425af8523858dcddcf65b', preferred=True)
version('10.7.2', sha256='593fc85883a361487b17548ba00553501f66a811b0a79039276bb75ad59528cf')
version('10.7.1', sha256='2aa7cb4b231081e0a35d84c707be8f35e4edc4e97aad2b233943515476955293')
version('10.7.0', sha256='c991a139210c7f194720c900b149405090058c00beb5a0d2fac5c40c42a262d4')
version('10.6.3', sha256='bf96d6d38e6a0deabb6fb6232eb00e46153134da645715d636b9b7b4490193d3')
version('10.6.2', sha256='e381e04c02aeade1ed8cdd9fdbe7dcf5d6f0f9b3837a417976b839318a005dbd')
version('10.6.1', sha256='4fd64149ae26952672a81ce5579d3806fda4bd251d486897093ac57633a42b7e')
version('10.6.0', sha256='eebe6a170546064ff81ab3b00f513ccd1d4122a026514982368d503ac55a4ee4')
version('10.5.1', sha256='2397eb859dc4de095ff66059d8bda9f060fdc42e10469dd7890946293eeb0e39')
version('10.4.3', sha256='67f3bb6405a2c77e573936c2b933f5a4a33915aa379626a2eb3012009b91e1da')
version('10.4.0', sha256='e919b9b0a88476e00c0b18ab65d40e6a714b55ee4778f66bac32a5396c22aa74')
version('10.3.3', sha256='bcd36a453da44de9368d1d61b0144031a58e4b43a6d2d875e19085f2700a89d8')
_cxxstd_values = ('11', '14', '17')
variant('cxxstd',
default=_cxxstd_values[0],
values=_cxxstd_values,
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=11', when='@11:', msg='geant4@11: only supports cxxstd=17')
conflicts('cxxstd=14', when='@11:', msg='geant4@11: only supports cxxstd=17')
variant('threads', default=True, description='Build with multithreading')
variant('vecgeom', default=False, description='Enable vecgeom support')
variant('opengl', default=False, description='Optional OpenGL support')
variant('x11', default=False, description='Optional X11 support')
variant('motif', default=False, description='Optional motif support')
variant('qt', default=False, description='Enable Qt support')
variant('python', default=False, description='Enable Python bindings')
variant('tbb', default=False, description='Use TBB as a tasking backend', when='@11:')
variant('vtk', default=False, description='Enable VTK support', when='@11:')
depends_on('cmake@3.16:', type='build', when='@11.0.0:')
depends_on('cmake@3.8:', type='build', when='@10.6.0:')
depends_on('cmake@3.5:', type='build')
depends_on('geant4-data@11.0.0', when='@11.0.0')
depends_on('geant4-data@10.7.3', when='@10.7.3')
depends_on('geant4-data@10.7.2', when='@10.7.2')
depends_on('geant4-data@10.7.1', when='@10.7.1')
depends_on('geant4-data@10.7.0', when='@10.7.0')
depends_on('geant4-data@10.6.3', when='@10.6.3')
depends_on('geant4-data@10.6.2', when='@10.6.2')
depends_on('geant4-data@10.6.1', when='@10.6.1')
depends_on('geant4-data@10.6.0', when='@10.6.0')
depends_on('geant4-data@10.5.1', when='@10.5.1')
depends_on('geant4-data@10.4.3', when='@10.4.3')
depends_on('geant4-data@10.4.0', when='@10.4.0')
depends_on('geant4-data@10.3.3', when='@10.3.3')
depends_on("expat")
depends_on("zlib")
depends_on('tbb', when='+tbb')
depends_on('vtk@8.2:', when='+vtk')
# Python, with boost requirement dealt with in cxxstd section
depends_on('python@3:', when='+python')
extends('python', when='+python')
conflicts('+python', when='@:10.6.1',
msg='Geant4 <= 10.6.1 cannot be built with Python bindings')
for std in _cxxstd_values:
# CLHEP version requirements to be reviewed
depends_on('clhep@2.4.5.1: cxxstd=' + std,
when='@11.0.0: cxxstd=' + std)
depends_on('clhep@2.4.4.0: cxxstd=' + std,
when='@10.7.0: cxxstd=' + std)
depends_on('clhep@2.3.3.0: cxxstd=' + std,
when='@10.3.3:10.6 cxxstd=' + std)
# Spack only supports Xerces-c 3 and above, so no version req
depends_on('xerces-c netaccessor=curl cxxstd=' + std,
when='cxxstd=' + std)
# Vecgeom specific versions for each Geant4 version
depends_on('vecgeom@1.1.18:1.1 cxxstd=' + std,
when='@11.0.0: +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.8:1.1 cxxstd=' + std,
when='@10.7.0: +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.5 cxxstd=' + std,
when='@10.6.0:10.6 +vecgeom cxxstd=' + std)
depends_on('vecgeom@1.1.0 cxxstd=' + std,
when='@10.5.0:10.5 +vecgeom cxxstd=' + std)
depends_on('vecgeom@0.5.2 cxxstd=' + std,
when='@10.4.0:10.4 +vecgeom cxxstd=' + std)
depends_on('vecgeom@0.3rc cxxstd=' + std,
when='@10.3.0:10.3 +vecgeom cxxstd=' + std)
# Boost.python, conflict handled earlier
depends_on('boost@1.70: +python cxxstd=' + std,
when='+python cxxstd=' + std)
# Visualization driver dependencies
depends_on("gl", when='+opengl')
depends_on("glu", when='+opengl')
depends_on("glx", when='+opengl+x11')
depends_on("libx11", when='+x11')
depends_on("libxmu", when='+x11')
depends_on("motif", when='+motif')
depends_on("qt@5: +opengl", when="+qt")
# As released, 10.03.03 has issues with respect to using external
# CLHEP.
patch('CLHEP-10.03.03.patch', level=1, when='@10.3.3')
# These patches can be applied independent of the cxxstd value?
patch('cxx17.patch', when='@:10.3 cxxstd=17')
patch('cxx17_geant4_10_0.patch', level=1, when='@10.4.0 cxxstd=17')
patch('geant4-10.4.3-cxx17-removed-features.patch',
level=1, when='@10.4.3 cxxstd=17')
def cmake_args(self):
spec = self.spec
# Core options
options = [
'-DGEANT4_USE_SYSTEM_CLHEP=ON',
'-DGEANT4_USE_SYSTEM_EXPAT=ON',
'-DGEANT4_USE_SYSTEM_ZLIB=ON',
'-DGEANT4_USE_G3TOG4=ON',
'-DGEANT4_USE_GDML=ON',
'-DXERCESC_ROOT_DIR={0}'.format(spec['xerces-c'].prefix)
]
# Use the correct C++ standard option for the requested version
if spec.version >= Version('11.0'):
options.append(
self.define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'))
else:
options.append(
self.define_from_variant('GEANT4_BUILD_CXXSTD', 'cxxstd'))
# Don't install the package cache file as Spack will set
# up CMAKE_PREFIX_PATH etc for the dependencies
if spec.version >= Version('10.6'):
options.append('-DGEANT4_INSTALL_PACKAGE_CACHE=OFF')
# Multithreading
options.append(self.define_from_variant('GEANT4_BUILD_MULTITHREADED',
'threads'))
options.append(self.define_from_variant('GEANT4_USE_TBB', 'tbb'))
if '+threads' in spec:
# Locked at global-dynamic to allow use cases that load the
# geant4 libs at application runtime
options.append('-DGEANT4_BUILD_TLS_MODEL=global-dynamic')
# install the data with geant4
datadir = spec['geant4-data'].prefix.share
dataver = '{0}-{1}'.format(spec['geant4-data'].name,
spec['geant4-data'].version.dotted)
datapath = join_path(datadir, dataver)
options.append('-DGEANT4_INSTALL_DATADIR={0}'.format(datapath))
# Vecgeom
if '+vecgeom' in spec:
options.append('-DGEANT4_USE_USOLIDS=ON')
options.append('-DUSolids_DIR=%s' % spec[
'vecgeom'].prefix.lib.CMake.USolids)
# Visualization options
if 'platform=darwin' not in spec:
if "+x11" in spec and "+opengl" in spec:
options.append('-DGEANT4_USE_OPENGL_X11=ON')
if "+motif" in spec and "+opengl" in spec:
options.append('-DGEANT4_USE_XM=ON')
if "+x11" in spec:
options.append('-DGEANT4_USE_RAYTRACER_X11=ON')
if '+qt' in spec:
options.append('-DGEANT4_USE_QT=ON')
options.append(
'-DQT_QMAKE_EXECUTABLE=%s' %
spec['qt'].prefix.bin.qmake)
options.append(self.define_from_variant('GEANT4_USE_VTK', 'vtk'))
# Python
if spec.version > Version('10.6.1'):
options.append(self.define_from_variant('GEANT4_USE_PYTHON',
'python'))
return options
| LLNL/spack | var/spack/repos/builtin/packages/geant4/package.py | Python | lgpl-2.1 | 9,334 | [
"VTK"
] | f13d9191a1090a9612cae984cdde24e0bf2d469f0f9b1809168925b772723633 |
import logging
import os
import multiprocessing
import traceback
import argparse
import sys
import time
import json
import base64
from functools import partial
import pybedtools
import pysam
from defaults import ISIZE_MEAN, ISIZE_SD, GT_WINDOW, GT_NORMAL_FRAC
GT_HET = "0/1"
GT_HOM = "1/1"
GT_REF = "0/0"
GT_UNK = "./."
def count_reads_supporting_ref(chrom, start, end, bam_handle, isize_min, isize_max, window):
total_normal_reads = 0
total_read_bases = 0
total_reads = 0
window_start = max(0, start - window)
window_end = end + window
for aln in bam_handle.fetch(str(chrom), window_start, window_end):
if aln.is_duplicate or not aln.is_paired:
continue
total_reads += 1
if aln.is_unmapped or aln.mate_is_unmapped:
continue
if aln.rnext != aln.tid: continue
if aln.is_reverse:
if not (aln.pnext < aln.pos and not aln.mate_is_reverse): continue
else:
if not (aln.pnext > aln.pos and aln.mate_is_reverse): continue
if not (((aln.aend - end) >= 20 and (end - aln.pos) >= 20) or (
(start - aln.pos) >= 20 and (aln.aend - start) >= 20)):
continue
tlen = abs(aln.tlen)
if isize_min <= tlen <= isize_max:
total_normal_reads += 1
total_read_bases = total_read_bases + aln.qlen
return total_normal_reads, total_read_bases, total_reads
def genotype_interval(chrom, start, end, sv_type, sv_length, bam_handle, isize_min, isize_max, window=GT_WINDOW,
normal_frac_threshold=GT_NORMAL_FRAC):
func_logger = logging.getLogger("%s-%s" % (genotype_interval.__name__, multiprocessing.current_process()))
locations = [start, end] if sv_type != "INS" else [start]
total_normal, total = 0, 0
for location in locations:
total_normal_, total_bases_, total_ = count_reads_supporting_ref(chrom, location, location, bam_handle,
isize_min, isize_max, window)
total_normal += total_normal_
total += total_
normal_frac = float(total_normal) / float(max(1, total))
gt = GT_REF
if normal_frac < 1 - normal_frac_threshold:
gt = GT_HET if normal_frac >= normal_frac_threshold else GT_HOM
func_logger.info("For interval %s:%d-%d %s counts are %d, %d and normal_frac is %g gt is %s" % (
chrom, start, end, sv_type, total_normal, total, normal_frac, gt))
return gt
def parse_interval(interval):
chrom = interval.chrom
pos = interval.start
end = interval.end
sub_names = interval.name.split(":")
sub_lengths = map(lambda x: int(x.split(",")[2]), sub_names)
sub_types = map(lambda x: x.split(",")[1], sub_names)
sub_methods = [name.split(",")[3] for name in sub_names]
try:
info = json.loads(base64.b64decode(name.split(",")[0]))
except TypeError:
info = dict()
if len(interval.fields) > 9:
info.update(json.loads(base64.b64decode(interval.fields[9])))
index_to_use = 0
svlen = -1
for sub_type in ["DEL","INV","DUP","ITX","CTX","INS"]:
if sub_type in sub_types:
index_to_use = sub_types.index(sub_type)
break
sv_type = sub_types[index_to_use]
if (("SC" in sub_methods[index_to_use] and sv_type == "INS") or ("AS" in sub_methods[index_to_use])):
pos, end, svlen = map(int, interval.fields[6:9])
if svlen < 0: svlen = sub_lengths[index_to_use]
if sub_types[index_to_use] == "DEL":
svlen = -svlen
return chrom, pos, end, sv_type, svlen
def genotype_intervals_callback(result, result_list):
if result is not None:
result_list.append(result)
def genotype_intervals(intervals_file=None, bam=None, workdir=None, window=GT_WINDOW, isize_mean=ISIZE_MEAN,
isize_sd=ISIZE_SD, normal_frac_threshold=GT_NORMAL_FRAC):
func_logger = logging.getLogger("%s-%s" % (genotype_intervals.__name__, multiprocessing.current_process()))
if workdir and not os.path.isdir(workdir):
os.makedirs(workdir)
pybedtools.set_tempdir(workdir)
genotyped_intervals = []
start_time = time.time()
isize_min = max(0, isize_mean - 3 * isize_sd)
isize_max = isize_mean + 3 * isize_sd
try:
bam_handle = pysam.Samfile(bam, "rb")
for interval in pybedtools.BedTool(intervals_file):
chrom, start, end, sv_type, svlen = parse_interval(interval)
genotype = genotype_interval(chrom, start, end, sv_type, svlen, bam_handle, isize_min, isize_max, window,
normal_frac_threshold)
fields = interval.fields + [genotype]
genotyped_intervals.append(pybedtools.create_interval_from_list(fields))
bedtool = pybedtools.BedTool(genotyped_intervals).moveto(os.path.join(workdir, "genotyped.bed"))
except Exception as e:
func_logger.error('Caught exception in worker thread')
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
print()
raise e
func_logger.info(
"Genotyped %d intervals in %g minutes" % (len(genotyped_intervals), (time.time() - start_time) / 60.0))
return bedtool.fn
def parallel_genotype_intervals(intervals_file=None, bam=None, workdir=None, nthreads=1, chromosomes=[],
window=GT_WINDOW, isize_mean=ISIZE_MEAN, isize_sd=ISIZE_SD,
normal_frac_threshold=GT_NORMAL_FRAC):
func_logger = logging.getLogger("%s-%s" % (parallel_genotype_intervals.__name__, multiprocessing.current_process()))
if not intervals_file:
func_logger.warning("No intervals file specified. Perhaps no intervals to process")
return None
if workdir and not os.path.isdir(workdir):
os.makedirs(workdir)
chromosomes = set(chromosomes)
start_time = time.time()
bedtool = pybedtools.BedTool(intervals_file)
selected_intervals = [interval for interval in bedtool if not chromosomes or interval.chrom in chromosomes]
nthreads = min(len(selected_intervals), nthreads)
intervals_per_process = (len(selected_intervals) + nthreads - 1) / nthreads
pool = multiprocessing.Pool(nthreads)
genotyped_beds = []
for i in xrange(nthreads):
process_workdir = os.path.join(workdir, str(i))
if not os.path.isdir(process_workdir):
os.makedirs(process_workdir)
process_intervals = pybedtools.BedTool(
selected_intervals[i * intervals_per_process: (i + 1) * intervals_per_process]).saveas(
os.path.join(process_workdir, "ungenotyped.bed"))
kwargs_dict = {"intervals_file": process_intervals.fn, "bam": bam, "workdir": process_workdir, "window": window,
"isize_mean": isize_mean, "isize_sd": isize_sd, "normal_frac_threshold": normal_frac_threshold}
pool.apply_async(genotype_intervals, kwds=kwargs_dict,
callback=partial(genotype_intervals_callback, result_list=genotyped_beds))
pool.close()
pool.join()
func_logger.info("Following BED files will be merged: %s" % (str(genotyped_beds)))
if not genotyped_beds:
func_logger.warn("No intervals generated")
return None
pybedtools.set_tempdir(workdir)
bedtool = pybedtools.BedTool(genotyped_beds[0])
for bed_file in genotyped_beds[1:]:
bedtool = bedtool.cat(pybedtools.BedTool(bed_file), postmerge=False)
bedtool = bedtool.sort().moveto(os.path.join(workdir, "genotyped.bed"))
func_logger.info("Finished parallel genotyping of %d intervals in %g minutes" % (
len(selected_intervals), (time.time() - start_time) / 60.0))
return bedtool.fn
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="Genotype final BED output from MetaSV assembly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--bam", help="BAM", required=True, type=file)
parser.add_argument("--chromosomes", nargs="+",
help="Chromosomes to process. Leave unspecified to process all intervals.", default=[])
parser.add_argument("--workdir", help="Working directory", default="work")
parser.add_argument("--nthreads", help="Number of threads to use", default=1, type=int)
parser.add_argument("--intervals_file", help="Final BED output from MetaSV assembly", required=True, type=file)
parser.add_argument("--window", help="Window to use for genotyping", default=GT_WINDOW, type=int)
parser.add_argument("--isize_mean", help="Insert size mean", default=ISIZE_MEAN, type=float)
parser.add_argument("--isize_sd", help="Insert size standard deviation", default=ISIZE_SD, type=float)
parser.add_argument("--normal_frac", help="Minimum fraction of normal reads to call heterozygous",
default=GT_NORMAL_FRAC, type=float)
args = parser.parse_args()
logger.info("Command-line: " + " ".join(sys.argv))
genotyped_bed = parallel_genotype_intervals(args.intervals_file.name, args.bam.name, args.workdir, args.nthreads,
args.chromosomes, args.window, args.isize_mean, args.isize_sd,
args.normal_frac)
if genotyped_bed:
logger.info("Generated genotyped BED as %s" % genotyped_bed)
sys.exit(os.EX_OK)
else:
logger.error("No genotyped BED generated")
sys.exit(os.EX_DATAERR)
| chapmanb/metasv | metasv/genotype.py | Python | bsd-2-clause | 9,805 | [
"pysam"
] | 48da4376d50eaa423ba4b870616b0a6d4786f772f725049af4cdbcbd86c79792 |
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the Bures cost between Gaussian distributions."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax.test_util
from ott.core import sinkhorn
from ott.geometry import costs
from ott.geometry import pointcloud
class SinkhornTest(jax.test_util.JaxTestCase):
def setUp(self):
super().setUp()
self.eps = 1.0
self.n = 11
self.m = 13
self.dim = 7
self.rngs = jax.random.split(jax.random.PRNGKey(0), 6)
x = jax.random.normal(self.rngs[0], (self.n, self.dim, self.dim))
y = jax.random.normal(self.rngs[1], (self.m, self.dim, self.dim))
sig_x = jnp.matmul(x, jnp.transpose(x, (0, 2, 1)))
sig_y = jnp.matmul(y, jnp.transpose(y, (0, 2, 1)))
m_x = jax.random.uniform(self.rngs[2], (self.n, self.dim))
m_y = jax.random.uniform(self.rngs[3], (self.m, self.dim))
self.x = jnp.concatenate((m_x.reshape(
(self.n, -1)), sig_x.reshape((self.n, -1))),
axis=1)
self.y = jnp.concatenate((m_y.reshape(
(self.m, -1)), sig_y.reshape((self.m, -1))),
axis=1)
a = jax.random.uniform(self.rngs[4], (self.n,)) + .1
b = jax.random.uniform(self.rngs[5], (self.m,)) + .1
self.a = a / jnp.sum(a)
self.b = b / jnp.sum(b)
@parameterized.named_parameters(
dict(testcase_name='ker-batch', lse_mode=False, online=False))
def test_bures_point_cloud(self, lse_mode, online):
"""Two point clouds of Gaussians, tested with various parameters."""
threshold = 1e-3
geom = pointcloud.PointCloud(
self.x, self.y,
cost_fn=costs.Bures(dimension=self.dim, regularization=1e-4),
online=online,
epsilon=self.eps)
errors = sinkhorn.sinkhorn(
geom,
a=self.a,
b=self.b,
lse_mode=lse_mode).errors
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
def test_regularized_unbalanced_bures(self):
"""Tests Regularized Unbalanced Bures."""
x = jnp.concatenate((jnp.array([0.9]), self.x[0, :]))
y = jnp.concatenate((jnp.array([1.1]), self.y[0, :]))
rub = costs.UnbalancedBures(self.dim, 1, 0.8)
self.assertIsNot(jnp.any(jnp.isnan(rub(x, y))), True)
self.assertIsNot(jnp.any(jnp.isnan(rub(y, x))), True)
self.assertAllClose(rub(x, y), rub(y, x), rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
absltest.main()
| google-research/ott | tests/core/sinkhorn_bures_test.py | Python | apache-2.0 | 3,043 | [
"Gaussian"
] | 54380e76224c5c1b3c167cbdfeb210a87dd5970b729a3d693ade1fdd3d365fa6 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import List
from kivy.uix.widget import Widget
from kivy.uix.settings import SettingItem
import ORCA.Globals as Globals
__all__ = ['SettingSpacer',
'SettingHidden',
'GetLanguageList',
'GetGestureList',
'GetPageList',
'GetActionList',
'GetSendActionList']
class SettingSpacer(Widget):
""" Internal class, not documented. """
pass
class SettingHidden(SettingItem):
""" a hidden setting """
pass
def GetLanguageList() -> List[str]:
""" Return the list of all defined language vars """
uKey:str
uValue:str
if "aList" not in vars(GetLanguageList):
GetLanguageList.aList=[]
for uKey in Globals.oLanguage.dIDToString:
uValue="%s [[%s]]" % (Globals.oLanguage.dIDToString[uKey],uKey)
if not uValue in GetLanguageList.aList or True:
GetLanguageList.aList.append(uValue)
GetLanguageList.aList.sort()
return GetLanguageList.aList
def GetGestureList() -> List[str]:
""" Return the list of all Gesture Names"""
uKey:str
if "aList" not in vars(GetGestureList):
GetGestureList.aList=[]
for uKey in Globals.oTheScreen.dGestures:
GetGestureList.aList.append(uKey)
GetGestureList.aList.sort()
return GetGestureList.aList
def GetPageList() -> List[str]:
""" Return the list of all Page Names"""
uKey:str
if "aList" not in vars(GetPageList):
GetPageList.aList=[]
for uKey in Globals.oTheScreen.oScreenPages:
GetPageList.aList.append(uKey)
GetPageList.aList.sort()
return GetPageList.aList
def GetActionList() -> List[str]:
""" Return the list of all Action Names"""
uKey:str
if "aList" not in vars(GetActionList):
GetActionList.aList=[]
for uKey in Globals.oActions.dActionsCommands:
GetActionList.aList.append(uKey)
GetActionList.aList.sort()
return GetActionList.aList
def GetSendActionList() -> List[str]:
""" Return the list of all Send Action Names"""
uKey:str
if "aList" not in vars(GetSendActionList):
GetSendActionList.aList=[]
for uKey in Globals.oActions.dActionsCommands:
if uKey.startswith("Send "):
GetSendActionList.aList.append(uKey)
GetSendActionList.aList.sort()
return GetSendActionList.aList
| thica/ORCA-Remote | src/ORCA/settings/setttingtypes/Base.py | Python | gpl-3.0 | 3,399 | [
"ORCA"
] | 78eec143bbc0d8599a2025957c2eda7ae07eb53c03643e33dcd1c573a823b631 |
'''
Classes to wrap up common tasks e.g., 'run a search, visit the urls, add content', or 'remove flagged content'
Class to employ one or more searchers and add returned urls to the database. This is intended to be generic.
The __init__ method to SearchAndAdd is passed an instance of the search module's main searcher class (called,
cleverly enough SearcherClass in all instances). The searcher modules have methods to run a single search or to
max out what the search engine will provide. These are:
run_search_engine(include, exclude, position)
getMaxResults(include, exclude)
'''
#import standard module(s)
import logging
from datetime import datetime
from HTMLParser import HTMLParseError
from optparse import OptionParser
#third party modules
from sqlobject.dberrors import DuplicateEntryError
import sys
import random
#custom modules
import b_googleSearch as google
import b_bingSearch as bing
import b_dbRoutines as dbRoutines
#import django_dbRoutines as dbRoutines
import projectSpecs as projectSpecs
import captureContent
import scorer as scorerRoutines
#instansiate imported classes
dbUtils = dbRoutines.DatabaseMethods()
captureUtils = captureContent.CaptureContent()
searchers = [google.SearcherClass(),bing.SearcherClass()]
#these allow easy access to one or the other as a command line option
namedSearchers = {'google':google.SearcherClass(), 'bing': bing.SearcherClass()}
scorer = scorerRoutines.CountAndScore()
#log settings
LOG_NAME = "master.log"
LOG_LEVEL = logging.DEBUG
#return codes
RETURN_SUCCESS = projectSpecs.RETURN_SUCCESS
RETURN_FAIL = projectSpecs.RETURN_FAIL
class Visitor():
'''Routines for visiting urls, parsing html, etc. They import functionality from separate modules that: run search engines; capture content
from the urls; dig the date out of blog postings; and interact with the database.
'''
def __init__(self):
self._defaultVisitInterval = 86400 #seconds, 86400 sec/day
def dueToVisit(self, visitInterval, lastVisit=None):
#figure out whether it's time to visit given how often we want to ping a site, when we last visited, and the current time
weAreDue = True
minInterval = visitInterval or self._defaultVisitInterval
if lastVisit:
elapsedTime = datetime.now() - lastVisit
if elapsedTime.seconds < minInterval:
weAreDue = False
return weAreDue
def readAndStoreSingleUrl(self, urlid, streamType, visitInterval = None, socket_timeout = 2):
'''given a urlid, find the url, visit it, and store its html int he database
'''
#usd visit interval passed in or the default
visitInterval = visitInterval or self._defaultVisitInterval
try:
urlObj = dbUtils.getUrl(urlid)
url = urlObj.url
except:
msg = "Couldn't access the url %s"%str(urlid)
raise
try:
#if we're due for a visit, go for it
if self.dueToVisit(visitInterval, urlObj.visitDate):
rawData = captureUtils.acquire_data(name = url, streamType='u', socket_timeout = socket_timeout)
logging.info("We got content from url %i"%urlid)
uRawData = unicode(rawData, errors = 'ignore') #the ignore flag drops bad unicode
urlObj.visitDate = datetime.now()
#check if we have an entry for this url in the urlHtml table
htmlObj = dbUtils.getHtmlForUrl(urlid)
#if we have an entry for this url, update the record; otherwise make a new one
if htmlObj.count() >0: #we can just update the current record
htmlObj[0].html=uRawData
else:
dbRoutines.UrlHtml(urlid = urlid, html = uRawData)
##TODO write a routine to cull urls that can't be read for whatever reason
urlObj.readSuccesses = urlObj.readSuccesses + 1
return RETURN_SUCCESS
except Exception, e: #we don't really care why we can't load the data
logging.info("Failed to load take from %s into database"%url)
urlObj.readFailures = urlObj.readFailures+ 1
raise
return RETURN_SUCCESS
def visitUrls(self, searchid = None, urlid = None, limit = None, visitInterval = None):
'''
Visits url(s) in the database and loads its html to the database. Mostly for testing, this can be limited:
1. If a urlid is provided, we only visit that url;
2. If a searchid is provided we only visit urls for that search (up to limit if provided).
'''
visitInterval = visitInterval or self._defaultVisitInterval
if searchid: #return urls up to the limit ...
urls = dbUtils.getUrlsForSearch(searchid)
urlCount = len(urls)
urlIx = 0
while urlIx < urlCount: #... then visit each
try:
ret = self.readAndStoreSingleUrl(urlid = urls[urlIx].id, visitInterval = visitInterval, streamType = 'u')
except Exception ,e:
logging.info("url %i: %s"%(urls[urlIx].id, e))
urlIx += 1
elif urlid: #only visit the search specified (returns RETURN_SUCCESS if it's happy)
try:
return self.readAndStoreSingleUrl(urlid = urls[urlIx].id, visitInterval = visitInterval, streamType = 'u')
except:
logging.info("url %i"%urlid)
else: #Neither a searchid nor a urlid has been provided, so visit them all. We'll
# do this by passing the request back to the top of this routine.
allSearches = dbUtils.getAllSearches()
for s in allSearches:
searchid = s.id
self.visitUrls(searchid = searchid, urlid = urlid, limit = limit, visitInterval = visitInterval)
a=1
return RETURN_SUCCESS
def parseHtmlInDb(self, nukeHtmlFlag = False, urlAddDepth =1, urlid = None, searchid = None, limit = None):
'''Finds html stashed in the database (url table).
1. If a searchid specified, parse all urls associated with the search, according to the search's parsing criteria
2. If a url is specified, find all the searches that use it. Then parse it separately for each of the searches.
3. The parsed content is stored by search, so different bits of a blog might show up in different places
4. If the nukeHtmlFlag is set, the html will be deleted after all the parsing is complete
'''
urls = None
if searchid: #create a list of urls up to the limit ...
urls = dbUtils.getUrlsForSearch(searchid, limit = limit)
elif urlid: #create a one-element list of the url object
urls = [dbUtils.getUrl(urlid)]
else:
urls = dbUtils.getAllUrls()
urls = urls[0:limit]
urlCount = urls.count()
#if we don't have valid urls (maybe a non-existing one was specified) declarer succes and return
if urlCount ==0:
logging.info("no valid urls to parse")
return RETURN_SUCCESS
#So far, so good. Now loop thru the urls
for url in urls:
loopUrlid = url.id #this is for sanity, as urlid is an input parameter
#create a list of parse criteria objects
criteriaObjects = []
if searchid: #i.e., if searchid in arguments get the criterion for a single search
criteriaObjects = [dbUtils.getParseCriteriaForSearch(searchid)]
elif urlid: #i.e., if urlid in arguments, get criteria for each search that uses the url
criteriaObjects = dbUtils.getParseCriteriaForUrl(urlid)
else: #otherwise, we'll grab the criteria for the url in this loop
logging.debug("getting parse criteria for url %i" %loopUrlid)
criteriaObjects = dbUtils.getParseCriteriaForUrl(loopUrlid)
#find the html for this url
htmlObj = dbUtils.getHtmlForUrl(loopUrlid)
html = None
if htmlObj.count() > 0:
html = htmlObj[0].html
#if the html is any good, parse away
if html:
if len(html)> 0 and not url.deleteMe:
#Go thru our criteria objects (note each is associated with a *search*, not a url - that's why 'loopSearchid'.
for critDict in criteriaObjects: #
criteriaSelectObj = critDict['criteria']
loopSearchid = critDict['searchid']
#since a search can have a bunch of criteria, we'll parse separately for each
for critObj in criteriaSelectObj:
try:
#parse according to the include/exclude criteria
capture= captureUtils.capture_main(name=html, streamType='o', include= critObj.include, exclude = critObj.exclude,
minLength=10, andOr = critObj.andOr, socket_timeout=2)
#...and add the content, new urls found to the database
ret = dbUtils.addCatchFromUrlVisit(urlid = loopUrlid, searchid = loopSearchid, catchDict = capture,
urlAddDepth= urlAddDepth, criteriaid = critObj.id)
'''if len(capture['polishedCont'])>0:
logging.debug("For search %i, criteria %i added %s"%(loopSearchid, critObj.id, capture['polishedCont']))
else:
a=1
'''
except HTMLParseError, e:
#if we can't parse this html, flag it for deletion
url.deleteMe = True
logging.info(e)
pass
except Exception, e:
logging.info("Can't parse out url %i %s: %s"%(url.id, url.url, e))
#when done with parsing this url's html w/ all parse criteria delete the html text
if nukeHtmlFlag:
htmlObj = dbUtils.getHtmlForUrl(loopUrlid)
htmlObj.html = None
pass
class SearchAndAddUrls():
'''Routines to run the search engines and add the take to the database
'''
def __init__(self, searcherClassList):
self._searcherList = searcherClassList
def addUrlsAllSearches(self):
searchObj = dbUtils.getSearchObj()
searches = searchObj.select()
for s in searches:
self.addUrlsForSearch(s.id)
def addUrlsForSearch(self, searchid = None, deleteMe = False, include = None, exclude = None, doOrder = False):
'''Runs all the search engines specified at instansiation and adds the urls found to the database.
If a searchid is specified, the criteria are taken from the database. Otherwise, we'll look to the include and
exclude parameters for direction.
'''
if searchid:
dbResults = dbUtils.gatherSearchSpecs(searchid)
searchDbObj=dbResults[0]
if not isinstance(searchDbObj, Exception):
include = searchDbObj.include
exclude = searchDbObj.exclude
for searcher in self._searcherList:
results = searcher.getMaxResults(include = include, exclude = exclude)
logging.info("found %i results from %s" %(len(results), searcher.engineName))
retCode = dbUtils.addUrlsFromList(results, deleteMe = deleteMe, searchId = searchid, source = searcher.engineName, order = doOrder)
if retCode == RETURN_FAIL:
msg = "%s Failed to get results from %s"%(retCode, searcher.engineName)
logging.info(msg)
return WrapperError(msg)
else:
msg = "Failed to gather search specs for search %i"%searchid
logging.info(msg)
raise WrapperError(msg)
else:
msg="No search id provided to addUrlsForSearch"
raise WrapperError(msg)
def _set_logger(self):
#this sets up the logging parameters. The log will appear at ./logs/master.log (or whatever is in the settings
# at the top of this module).
LOGDIR = os.path.join(os.path.dirname(__file__), 'logs').replace('\\','/')
log_filename = LOGDIR + '/' + LOG_NAME
logging.basicConfig(level=LOG_LEVEL,
format='%(module)s %(funcName)s %(lineno)d %(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_filename,
filemode='w')
class WrapperError(Exception):
#a generic error for catching url-related errors
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class MainCls():
'''
Parses the options and serves as the ringmaster, dispatching tasks requested by said options
'''
def __init__(self):
pass
def _set_logger(self):
#this sets up the logging parameters. The log will appear at ./logs/master.log (or whatever is in the settings
# at the top of this module).
LOGDIR = os.path.join(os.path.dirname(__file__), 'logs').replace('\\','/')
log_filename = LOGDIR + '/' + LOG_NAME
logging.basicConfig(level=LOG_LEVEL,
format='%(module)s %(funcName)s %(lineno)d %(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_filename,
filemode='w')
def main(self, testArgs=None):
# testArgs is a stand-in for sys.argv (the command line arguments) and used for testing
parser = OptionParser()
# Indentify the options recognized
parser.add_option("-e", dest = "engines", action="store_true", help = "Only run search engines")
parser.add_option("-g", dest = "engineName", help = "Run only named search google|bing")
parser.add_option("-v", dest= "visit", action="store_true", help = "Only visit urls and collect html")
parser.add_option("-p", dest= "parse", action="store_true", help = "Only parse html")
parser.add_option("-u", dest = "url", help = "Specify a url by its ID")
parser.add_option("-s", dest = "search", help = "Specify a search by its ID")
parser.add_option("-l", dest = "limit", help = "Maximum number of items to process")
parser.add_option("-t", dest = "tests", action="store_true", help = "Run tests")
parser.add_option("-m", dest = "more", action="store_true", help = "Tell me more")
parser.add_option("-o", dest = "setVisInt",action = "store_true", help = "(with visit) Override default visit interval")
parser.add_option("-n", dest = "nukeHtml",action = "store_true", help = "(with parse) Destroys captured html after parsing")
parser.add_option("-d", dest = "depth", help = "(with parse) Set maximum depth storing links")
parser.add_option("-k", dest = "score", action="store_true", help = "Score the content. Use -k -f to force overwrite of existing scores.")
parser.add_option("-f", dest = "forceScore", action="store_true", help = "(with -k) forces overwrite of scores.")
#testArgs are applied from __main__ and used for debugging (otherwise, we use the ones supplied at the command-line
if testArgs:
(options, args) = parser.parse_args(testArgs)
else:
(options, args) = parser.parse_args()
#gather the common flags
urlid = None; searchid = None; visitInterval=None; limit = None; nukeHtml=None; engineName = None; urlAddDepth = None
if options.url:
urlid = int(options.url)
if options.search:
searchid = int(options.search)
if options.setVisInt:
visitInterval = .0001 #minimum time (seconds) between visits to a url - this allows immediate revisit
if options.limit:
limit = int(options.limit) #number of items ot process
if options.nukeHtml:
nukeHtml = True
if options.engineName:
engineName = options.engineName
if options.depth:
urlAddDepth = int(options.depth)
if options.score:
if options.forceScore:
scoresOverride = True
else:
scoresOverride = False
#Perform the indicated task
#...run search engines only
if options.engines:
if options.search:
runEnginesFor(searchid = int(options.search), engineName = engineName)
else:
runEnginesAllSearches(engineName = engineName)
#...visit the urls in the database only
if options.visit:
visitCls = Visitor()
visitCls.visitUrls(searchid = searchid, urlid = urlid, limit = limit, visitInterval =visitInterval)
#...parse the stored html only
if options.parse: #invoke html parsing routines
visitCls = Visitor()
visitCls.parseHtmlInDb(nukeHtmlFlag = nukeHtml, urlid = urlid, searchid = searchid, limit = limit)
#...score the captured content
if options.score:
scorer.countAndScoreAll(overwrite = scoresOverride)
#run the tests (except for the test that runs this class)
if options.tests: #run the tests (except for the test that runs this class)
runTests()
#print the verbose version of help
if options.more:
msg = []
msg.append('The bot does three basic things: gets urls from search engines; visits the urls; capturing ' )
msg.append('the html; and applies screening criteria to find bits of content of interest. When running ' )
msg.append('routines, you can operate at a fairly granular level. You can: ')
msg.append('')
msg.append(' Run all the search engines for all the searches: -e')
msg.append(' Run one of the search engines: -e -g google ')
msg.append(' Run one of the search engines for a single search: -e -g google -s <search id')
msg.append('')
msg.append(' Visit all the urls in the database not visited today: -v ')
msg.append(' Visit all the urls regardless of when last visited: -v -o ')
msg.append(' Visit urls associated with a search: -v -s <search id> ')
msg.append(' Visit urls associated with a search, limiting the # of urls visited: -v s <search id> -l <number visited>')
msg.append('')
msg.append(' Parse all the html in the database, storing content bits, metadata, and links: -p')
msg.append(' Parse html for all urls for a search: -p -s <search id> ')
msg.append(' Parse html for all urls for a search, up to a maximum number: -p -s <search id> -l <url limit> ')
msg.append(' Parse html for a single url (it may be used associated w/ different searches): -p -u <url id> ')
msg.append(' Parse html for a single url, getting rid of the raw html after parsing: -p -u <url id> -n ')
msg.append(' Parse html for a single url, setting max. "degrees of separation" (links to links ...) from search engine return: -p -u <url id> -d 2')
msg.append(' To do everything, set separate instances as -e, -v, -p, -k (engines, visit, parse, score)')
for m in msg:
print m
###
###*************************************** method wrappers triggered by command line options*********************************************
###
#runs all search engines against all searches specified and adds the urls found to the database
def visitUrl(searchid = None, urlid = None, limit = None, visitInterval = None):
clsObj = Visitor()
ret = clsObj. visitUrls(searchid, urlid, limit, visitInterval)
def runEnginesAllSearches(engineName = None):
#If a search engine name is provided, override the 'searchers' list with a single entity; otherwise run all available
if engineName:
try:
searchers = [namedSearchers[engineName]]
except:
print "Sorry, we don't use a search engine called %s" %str(engineName)
return
#otherwise do them all
else:
searchers = []
for s in namedSearchers.itervalues():
searchers.append(s)
clsObj = SearchAndAddUrls(searchers)
ret = clsObj.addUrlsAllSearches()
def runEnginesFor(searchid, engineName = None):
#If a search engine name is provided, override the 'searchers' list (see top of this file) with a single entity; otherwise run all available
if engineName:
try:
searchers = [namedSearchers[engineName]]
except:
print "Sorry, we don't use a search engine called %s" %str(engineName)
return
#otherwise do them all
else:
searchers = []
for s in namedSearchers.itervalues():
searchers.append(s)
clsObj = SearchAndAddUrls(searchers)
ret = clsObj.addUrlsForSearch(searchid = searchid)
#visits all the html stored in the database, parses and adds the content (several searches might use the same html)
def parseHtml(nukeHtmlFlag = False, urlAddDepth =None, urlid = None, searchid = None, limit = None):
clsObj= Visitor()
clsObj.parseHtmlInDb(nukeHtmlFlag=nukeHtmlFlag, urlAddDepth=urlAddDepth, urlid=urlid, searchid=searchid, limit=limit)
def runTests():
pass
###
###*************************************** Tests*********************************************
###
def testVisitUrls():
# tests different permutations of VisitUrls - success doesn't mean we've gotten content, just that it the visitor won't crash things
dbTestCls = dbRoutines.testDbRoutines()
visitorCls = Visitor()
#add a test url
fakeDict = dbTestCls.installSomeFakeRecords()
urlid = fakeDict['url']
searchid = fakeDict['srch']
visitInterval = .000001 #seconds, forces revisit
#try w/ url only
ret = visitorCls.visitUrls( urlid = urlid, visitInterval = visitInterval)
assert ret == RETURN_SUCCESS
#try w/ searchid only
ret = visitorCls.visitUrls( searchid = searchid, visitInterval = visitInterval)
assert ret == RETURN_SUCCESS
#try w/ searchid and limit
ret = visitorCls.visitUrls( searchid = searchid, visitInterval = visitInterval)
assert ret == RETURN_SUCCESS
#try w/ searchid only
ret = visitorCls.visitUrls( searchid = searchid, visitInterval = visitInterval)
assert ret == RETURN_SUCCESS
a=1
def testOpts():
pass
if __name__ == '__main__':
if len(sys.argv) >1: #run from command line
clsObj = MainCls()
clsObj.main()
if len(sys.argv) ==1: #run from wing for debugging
#makes some test runs on search 2
searchid = 2
clsObj=MainCls()
clsObj.main(testArgs = "-k ".split())
visitCls = Visitor()
visitCls.visitUrls(searchid = None, urlid = None, limit = None, visitInterval = None)
clsObj = MainCls() #instansiates class containing the option parser and main routing commands
#these do everything
'''
clsObj.main(testArgs = "-e -s 2 ".split()) #runs search on all engines
clsObj.main(testArgs = '-v -s 2 -o'.split()) #visits all urls
clsObj.main(testArgs = '-p -s 2 '.split()) #parses all content per content-level criteria
'''
#these do a quick and dirty run thru w/ only google, then 10 visit/parse operations
clsObj.main(testArgs = "-e -s 2 -g google ".split()) #runs search on google
clsObj.main(testArgs = '-v -s 2 -l 10 -o '.split()) #visits urls found, to a limit of 10 (even if visited recently)
clsObj.main(testArgs = '-p -s 2 -l 10 '.split()) #parses the urls to a limit of 10
cont = dbUtils.getContentForSearch(searchid = searchid)
urls = dbUtils.getUrlsForSearchWithGoodHtml(searchid = searchid)
a=1
''' #a few utilities
dbUtils.cleanUpOrphanedContent()
dbUtils.deleteUrlsForSearch(9)
cont = dbUtils.getContentForSearch(searchid = searchid)
urls = dbUtils.getUrlsForSearch(searchid = searchid)
urls = dbUtils.getUrlsForSearchWithGoodHtml(searchid = searchid)
a=1
'''
| pbarton666/buzz_bot | djangoproj/PES_web_site/trunk/djangoproj/djangoapp/crawler/b_taskWrappers.py | Python | mit | 21,979 | [
"VisIt"
] | 5c022c0a22fe74f54baf5974d20681d99fa3870bbcdc1724becf96f2ee512a55 |
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example test script for the concentric circle arc renderer
# Will attempt to open an image viewer to display the simulated plot.
# Comment out p.setShowDrawing(True) & p.setPlotting(False) to physically plot this
# Use p.setSaveDrawing(True) with p.setPlotting(False) to save the simulated plot.
import sys
import traceback
from vPiP import *
from vPiP.renderers.spiralArcRenderer import renderSpiralArc
from vPiP.renderers.conCircleArcRenderer import renderConcentricCircleArc
Vpip = vPiP.Vpip
filename = "../testImages/Vulcan.jpg"
# filename = "../testImages/TyneBridge.jpg"
# filename = "../testImages/SydneyOpera.jpg"
# filename = "../testImages/SydneyOperaNight.jpg"
# filename = "../testImages/HamptonCourt.jpg"
with Vpip() as p:
p.setShowDrawing(True)
#p.setSaveDrawing(True)
p.setPlotting(False)
try:
renderConcentricCircleArc(filename, 300, 200, 600, 10, p)
renderConcentricCircleArc(filename, 200, 1000, 800, 15, p)
renderConcentricCircleArc(filename, 0, 1950, 1200, 20, p)
renderConcentricCircleArc(filename, 1250, 50, 3700, 25, p)
p.goHome()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("test1 main thread exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
| brianinnes/pycupi | python/test6.py | Python | apache-2.0 | 1,892 | [
"Brian"
] | 224866a0bb7ee9621d020bc121109ba49c370c321bebff974ae8e7e13c7a4325 |
# Copyright (c) 2016, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#----------------------------------------------------------------------------
# Base directories.
#----------------------------------------------------------------------------
# Hostname and user.
# - Used for reporting, as well as specializing paths on a per-host basis.
import os, socket, getpass
host = socket.gethostname().lower()
user = getpass.getuser()
# Base directory for input data.
data_dir = (
os.environ['TEMPENS_DATA_DIR'] if 'TEMPENS_DATA_DIR' in os.environ else
'data')
# Directory for storing the results of individual training runs.
result_dir = (
os.environ['TEMPENS_RESULT_DIR'] if 'TEMPENS_RESULT_DIR' in os.environ else
'r:/')
#----------------------------------------------------------------------------
# Theano configuration.
#----------------------------------------------------------------------------
theano_flags = "device=gpu,floatX=float32,assert_no_cpu_op=warn,allow_gc=False,nvcc.fastmath=True,dnn.conv.algo_fwd=small,dnn.conv.algo_bwd_filter=deterministic,dnn.conv.algo_bwd_data=deterministic,print_active_device=0"
#----------------------------------------------------------------------------
# Training defaults.
#----------------------------------------------------------------------------
from collections import OrderedDict
run_desc = 'baseline' # Name the results directory to be created for current run.
network_type = 'pi' # Valid values: 'pi', 'tempens'.
dataset = 'data_128' # Valid values: 'cifar-10', 'svhn'.
image_size = 128 #image dimension
whiten_inputs = None # EDITED Valid values: None, 'norm', 'zca'.
augment_noise_stddev = 0 # Controls the Gaussian noise added inside network during training.
augment_mirror = True # Enable horizontal flip augmentation.
augment_translation = 0 # Maximum translation distance for augmentation. Must be an integer.
augment_blur = True #Blur image
num_labels = 'all' # EDITED Total number of labeled inputs (1/10th of this per class). Value 'all' uses all labels.
corruption_percentage = 0 # How big percentage of input labels to corrupt.
num_epochs = 300 # Number of epochs to train.
max_unlabeled_per_epoch = None # Set this to use at most n unlabeled inputs per epoch.
minibatch_size = 100 # Samples per minibatch.
batch_normalization_momentum = 0.999 # Mean-only batch normalization momentum.
learning_rate_max = 0.003 # Maximum learning rate.
rampup_length = 80 # Ramp learning rate and unsupervised loss weight up during first n epochs.
rampdown_length = 50 # Ramp learning rate and Adam beta1 down during last n epochs.
rampdown_beta1_target = 0.5 # Target value for Adam beta1 for rampdown.
adam_beta1 = 0.9 # Default value.
adam_beta2 = 0.999 # Default value.
adam_epsilon = 1e-8 # Default value.
prediction_decay = 0.6 # Ensemble prediction decay constant (\alpha in paper).
unsup_weight_max = 100.0 # Unsupervised loss maximum (w_max in paper). Set to 0.0 -> supervised loss only.
load_network_filename = None # Set to load a previously saved network.
start_epoch = 0 # Which epoch to start training from. For continuing a previously trained network.
cuda_device_number = 0 # Which GPU to use.
random_seed = 1000 # Randomization seed.
#----------------------------------------------------------------------------
# Individual run customizations.
#----------------------------------------------------------------------------
# SVHN: Pi.
#run_desc = 'run-pi'
#network_type = 'pi'
#dataset = 'svhn'
#whiten_inputs = 'norm'
#augment_mirror = False
#augment_translation = 2
#num_labels = 500
#learning_rate_max = 0.003
#unsup_weight_max = 100.0
# SVHN: Temporal ensembling.
#run_desc = 'run-tempens'
#network_type = 'tempens'
#dataset = 'svhn'
#whiten_inputs = 'norm'
#augment_mirror = False
#augment_translation = 2
#num_labels = 500
#learning_rate_max = 0.001
#unsup_weight_max = 30.0
# CIFAR-10: Pi.
#run_desc = 'run-pi'
#network_type = 'pi'
#dataset = 'cifar-10'
#whiten_inputs = 'zca'
#augment_mirror = True
#augment_translation = 2
#num_labels = 4000
#learning_rate_max = 0.003
#unsup_weight_max = 100.0
# CIFAR-10: Temporal ensembling.
#run_desc = 'run-tempens'
#network_type = 'tempens'
#dataset = 'cifar-10'
#whiten_inputs = 'zca'
#augment_mirror = True
#augment_translation = 2
#num_labels = 4000
#learning_rate_max = 0.003
#unsup_weight_max = 30.0
# CIFAR-100: Pi.
#run_desc = 'run-pi'
#network_type = 'pi'
#dataset = 'cifar-100'
#whiten_inputs = 'zca'
#augment_mirror = True
#augment_translation = 2
#num_labels = 10000
#learning_rate_max = 0.003
#unsup_weight_max = 100.0
# CIFAR-100: Temporal ensembling.
#run_desc = 'run-tempens'
#network_type = 'tempens'
#dataset = 'cifar-100'
#whiten_inputs = 'zca'
#augment_mirror = True
#augment_translation = 2
#num_labels = 10000
#learning_rate_max = 0.003
#unsup_weight_max = 100.0
# CIFAR-100 plus Tiny Images: Pi.
#run_desc = 'run-pi'
#network_type = 'pi'
#dataset = 'cifar-100'
#aux_tinyimg = 500000
#whiten_inputs = 'zca'
#augment_mirror = True
#augment_translation = 2
#num_labels = 'all'
#learning_rate_max = 0.003
#unsup_weight_max = 300.0
#max_unlabeled_per_epoch = 50000
# CIFAR-100 plus Tiny Images: Temporal ensembling.
#run_desc = 'run-tempens'
#network_type = 'tempens'
#dataset = 'cifar-100'
#aux_tinyimg = 500000
#whiten_inputs = 'zca'
#augment_mirror = True
#augment_translation = 2
#num_labels = 'all'
#learning_rate_max = 0.003
#unsup_weight_max = 1000.0
#max_unlabeled_per_epoch = 50000
# SVHN with label corruption: Temporal ensembling.
#run_desc = 'run-tempens'
#network_type = 'tempens'
#dataset = 'svhn'
#whiten_inputs = 'norm'
#augment_mirror = False
#augment_translation = 2
#num_labels = 'all'
#learning_rate_max = 0.001
#corruption_percentage = 20
#unsup_weight_max = 300.0 if (corruption_percentage < 50) else 3000.0
#----------------------------------------------------------------------------
# Disable mirror and translation augmentation.
#----------------------------------------------------------------------------
#if True:
# augment_mirror = False
# augment_translation = 0
# run_desc = run_desc + '_noaug'
#----------------------------------------------------------------------------
# Automatically append dataset, label count, and random seed to run_desc.
#----------------------------------------------------------------------------
if corruption_percentage != 0:
run_desc += '-corrupt%d' % corruption_percentage
if aux_tinyimg == 'c100':
run_desc += '-auxcif'
elif aux_tinyimg == 500000:
run_desc += '-aux500k'
else:
assert(aux_tinyimg is None)
if num_labels == 'all':
num_labels_str = 'all'
elif (num_labels % 1000) == 0:
num_labels_str = '%dk' % (num_labels / 1000)
else:
num_labels_str = '%d' % num_labels
if dataset == 'cifar-10':
dataset_str = 'cifar'
elif dataset == 'cifar-100':
dataset_str = 'cifar100'
else:
dataset_str = dataset
run_desc = run_desc + ('_%s%s_%04d' % (dataset_str, num_labels_str, random_seed))
#----------------------------------------------------------------------------
| annafeit/web_labeling | config.py | Python | bsd-3-clause | 11,252 | [
"Gaussian"
] | c858ab497513ba906cdd8bb4c7ac50797a1bfad50fffd9cc650089f3f55f62f0 |
from numpy import sqrt, array, ones, zeros, dot, sign, float64, hstack, vstack, random, std
from numpy.random import uniform as unf
from numpy.random import random_integers, rand, randint, shuffle
from numpy.linalg import norm
import sys
from collections import defaultdict, Counter
from experiments import *
from classrooms import *
from active_learners import *
from kernels import *
from itertools import *
from sklearn.neighbors import NearestNeighbors
from scipy.spatial import KDTree
from math import *
class Create_Noisy_Labels(object):
def __init__(self):
self.noisy = 0
self.total = 0
def randomclassnoise(self, alpha, y):
# receives a parameter alpha and an array of labels, which is randomly flips based on alpha
temp_y = zeros(y.shape[0])
self.total += y.shape[0]
for i in range(y.shape[0]):
if rand() < alpha:
self.noisy += 1
temp_y[i] = -y[i]
else:
temp_y[i] = y[i]
return temp_y
def linearclassnoise(self, intercept, slope, w_star, x, y):
# receives intercept (noise at the separator), slope (how noise changes with distance from the separator)
# w_star (optimal separator) datapoints x and array of labels y
temp_y = zeros(y.shape[0])
self.total += y.shape[0]
for i in range(y.shape[0]):
if rand() < slope * pow(abs(dot(w_star, x[i])),-1) + intercept:
self.noisy += 1
temp_y[i] = -y[i]
else:
temp_y[i] = y[i]
return temp_y
def randomballnoise(self, beta, y, dict_index):
temp_y = zeros(y.shape[0])
noisy = 0
total = y.shape[0]
while noisy <= beta*total:
k = randint(0,y.shape[0])
for v in iter(dict_index[k]):
temp_y[v] = -y[v]
noisy += len(dict_index[k])
for j in range(y.shape[0]):
if temp_y[j] == 0:
temp_y[j] = y[j]
return temp_y
def noiserate(self):
if self.total == 0:
return 0.0
else:
return self.noisy / float(self.total)
class Consensus_Dynamics(object):
def __init__(self, opt_resp_iter, opt_update, opt_num_points):
self.opt_update = opt_update
self.opt_num_points = opt_num_points
self.opt_resp_iter = opt_resp_iter
def run_majority(self, inf_y, dict_index):
# run a majority algorithm
temp_inf_y = zeros(self.opt_num_points)
if self.opt_update == 1:
for i in range(self.opt_resp_iter):
for k in dict_index.iterkeys():
sign = 0
for v in iter(dict_index[k]):
sign += inf_y[v]
if sign > 0:
temp_inf_y[k] = 1
else:
temp_inf_y[k] = -1
if temp_inf_y[k] == 0:
print "error"
print sign, temp_inf_y[k]
inf_y = temp_inf_y
else:
for i in range(self.opt_resp_iter):
k = randint(0,self.opt_num_points)
sign = 0
for v in iter(dict_index[k]):
sign += inf_y[v]
if sign > 0:
inf_y[k] = 1
else:
inf_y[k] = -1
return inf_y
def run_weighted_majority(self, inf_y, dict_index, dict_weight):
# run a weighted majority algorithm (based on distance)
# the distance represents the weights that it reads from
temp_inf_y = zeros(self.opt_num_points)
if self.opt_update == 1:
for i in range(self.opt_resp_iter):
for k in dict_index.iterkeys():
sign = 0
for v in izip(iter(dict_index[k]), iter(dict_weight[k])):
sign += inf_y[v[0]] * v[1]
if sum(dict_weight[k]) != 0:
sign /= sum(dict_weight[k])
if sign > 0:
temp_inf_y[k] = 1
else:
temp_inf_y[k] = -1
inf_y = temp_inf_y
else:
for i in range(self.opt_resp_iter):
k = randint(0, self.opt_num_points)
sign = 0
for v in izip(iter(dict_index[k]), iter(dict_weight[k])):
sign += inf_y[v[0]] * v[1]
if sum(dict_weight[k]) != 0:
sign /= sum(dict_weight[k])
if sign > 0:
inf_y[k] = 1
else:
inf_y[k] = -1
return inf_y
def run_prob_knn(self, inf_y, dict_index):
# run a probabilistic version of k-NN
# look at neighbors, calculate ratio of maximum class and probabilistically pick it up
temp_inf_y = zeros(self.opt_num_points)
if self.opt_update == 1:
for i in range(self.opt_resp_iter):
for k in dict_index.iterkeys():
temp = []
for v in iter(dict_index[k]):
temp.append(inf_y[v])
c = Counter(temp)
temp_label, count_most = c.most_common(1)[0]
flip_prob = count_most/len(dict_index[k])
# with flip prob choose the inf_label
if flip_prob < rand(1):
temp_inf_y[k] = temp_label
inf_y = temp_inf_y
else:
for i in range(self.opt_resp_iter):
k = randint(0, self.opt_num_points)
temp = []
for v in iter(dict_index[k]):
temp.append(inf_y[v])
c = Counter(temp)
temp_label, count_most = c.most_common(1)[0]
flip_prob = count_most/len(dict_index[k])
# with flip prob choose the inf_label
if flip_prob < rand(1):
inf_y[k] = temp_label
return inf_y
class create_nn_graph(object):
'''
Parameterize how sensors/points communicate their information to each other including the cost associated
The consensus game has a payout matrix where disagreement is costly and agreement is cheap
Basically a 2x2 matrix at the start that pushes for similar labels with a probability
the inclusion of probability adds to the noise of the dataset
'''
def __init__(self, dist, opt_num_points):
self.opt_num_points = opt_num_points
self.dict_index = defaultdict(list)
self.dict_weight = defaultdict(list)
self.dist = dist
def radius_nn_graph(self, radius):
# Case 1 - Sensors communicate their info only within a particular radius of themselves
tree_obj = KDTree(self.dist)
pair_points = tree_obj.query_pairs(radius,p=2.0,eps=0)
pair_points_list = list(pair_points)
for i in range(len(pair_points)):
pta, ptb = pair_points_list[i]
dist_pts = norm(self.dist[pta] - self.dist[ptb],2)
self.dict_index[pta].append(ptb)
self.dict_index[ptb].append(pta)
self.dict_weight[pta].append(dist_pts)
self.dict_weight[ptb].append(dist_pts)
def knn_nn_graph(self, des_nbrs):
# Case 3 - K - nearest neighbors instead of distance
nbrs = NearestNeighbors(n_neighbors=des_nbrs, algorithm='auto').fit(self.dist)
distances, indices = nbrs.kneighbors(self.dist)
distances = distances.tolist()
indices = indices.tolist()
for i in range(self.opt_num_points):
for j in range(des_nbrs):
self.dict_index[i].append(indices[i][j])
self.dict_weight[i].append(distances[i][j])
def gaussian_nn_graph(self, radius):
# Case 4 - A gaussian communication based on distance, (probabilistic)
tree_obj = KDTree(self.dist)
pair_points = tree_obj.query_pairs(radius,p=2.0,eps=0)
pair_points_list = list(pair_points)
for i in range(len(pair_points)):
pta, ptb = pair_points_list[i]
dist_pts = norm(self.dist[pta] - self.dist[ptb],2)
if dist_pts < rand(1):
self.dict_index[pta].append(ptb)
self.dict_index[ptb].append(pta)
self.dict_weight[pta].append(dist_pts)
self.dict_weight[ptb].append(dist_pts)
def train_classifiers_gen_error(dist, inf_y, opt_w, label_budget, internal_iters =6, num_trials=50):
'''
Learn based on the active learning algorithm
dist is the distribution on which learning will take place
d is the number of dimensions
inf_y is the current label set for the distribution
opt_w is the optimal separator for calculating the generalization error
label_budget can be list or a single number
internal_iters is the number of iterations the active algorithm will take
num_trials is the number of times the distribution will be shuffled and algorithm tested
'''
num_points = y.shape[0]
dist_pre_consensus = UniformTestDataSet(d, dist, inf_y, num_points)
avg_svm_angle = zeros(len(label_budget))
avg_algo_angle = zeros(len(label_budget))
avg_sm_angle = zeros(len(label_budget))
svm_angle_std = zeros(len(label_budget))
algo_angle_std = zeros(len(label_budget))
sm_angle_std = zeros(len(label_budget))
learners = []
learners.append(lambda: MarginBasedTheoreticalParams(d,internal_iters,1,1,1))
#learners.append(lambda: SimpleMarginSoftSVMBatch(d, 10))
for r in range(len(label_budget)):
accuracy = libsvm_angle_compare_test(learners, dist_pre_consensus, num_trials, num_points, label_budget[r], opt_w, display = False)
# Get final direction
# Compute the generalization error and print
for i in range(len(accuracy)):
avg_svm_angle[r] += accuracy[i][0]
avg_algo_angle[r] += accuracy[i][1]
#avg_sm_angle[r] += accuracy[i][2]
svm_angle_std[r] = std(accuracy[:][0])
algo_angle_std[r] = std(accuracy[:][1])
sm_angle_std[r] = std(accuracy[:][2])
avg_svm_angle[r] /= len(accuracy)
avg_algo_angle[r] /= len(accuracy)
#avg_sm_angle[r] /= len(accuracy)
# Check accuracy or distance from optimal separator based on angle
# print "For Label budget of " +str(label_budget[p])
#print avg_algo_angle, avg_svm_angle
#print "Error Labels are "
#print algo_angle_std, svm_angle_std, sm_angle_std
return avg_algo_angle, avg_svm_angle, avg_sm_angle, svm_angle_std, algo_angle_std, sm_angle_std
def train_classifiers_class_error(dist, inf_y, label_budget, internal_iters =6, num_trials=50):
'''
This differs from the previous method and calculates the classification error in a non-realizable case
Learn based on the active learning algorithm
dist is the distribution on which learning will take place
d is the number of dimensions
inf_y is the current label set for the distribution
label_budget can be list or a single number
internal_iters is the number of iterations the active algorithm will take
num_trials is the number of times the distribution will be shuffled and algorithm tested
'''
num_points = y.shape[0]
dist_used = UniformTestDataSet(d, dist, inf_y, num_points)
test_size = 1000
unlabeled_points = num_points - test_size
avg_svm_angle = zeros(len(label_budget))
avg_algo_angle = zeros(len(label_budget))
avg_sm_angle = zeros(len(label_budget))
p = GaussianKernel(0.1)
learners = []
learners.append(lambda: KernelMarginBasedTheoreticalParams(d, p, internal_iters))
for r in range(len(label_budget)):
accuracy = libsvm_compare_learners(learners, dist_used, num_trials, unlabeled_points, label_budget[r], test_size, display = False)
# Get final direction
# Compute the classification error
# get algo error and then svm error
avg_algo_angle[r] = accuracy[0][0]
avg_svm_angle[r] = accuracy[0][1]
#print avg_algo_angle[r], avg_svm_angle[r]
return avg_algo_angle, avg_svm_angle, avg_sm_angle
def calculate_noisy(y, inf_y):
# Calculate the final noise based on the best response dynamics
count_noisy = 0
num_points = y.shape[0]
for i in range(num_points):
if y[i] != inf_y[i]:
count_noisy += 1
noise_rate = (float(count_noisy)/num_points) * 100
#print "Final noise after consensus calculations"
#print count_noisy, noise_rate
return count_noisy, noise_rate
if __name__ == '__main__':
# Get variables for options
opt_init_label_method = int(sys.argv[1])
opt_comm = int(sys.argv[2])
opt_dynamic = int(sys.argv[3])
opt_update = int(sys.argv[4])
opt_num_points = int(sys.argv[5])
# So many parameters
# At what radius do you want your nearest neighbors to be ?
radius = 0.1
# How many iterations do you want to do consensus updates
opt_resp_iter = 100
# how many nearest_neighbors do you want if you choose the nearest neighbors for connectivity
des_nbrs = 20
# how many random variations dataset, w_star combinations that you want to try ?
super_iter = 50
# How many dimensions ?
d = 2
# random noise rate
alpha = 0.35
# linear noise rate
intercept = 0.06
slope = 0.07
# ball noise rate
beta = 0.15
# Label budget you want to test
label_budget = [30, 40, 50, 60, 70, 80, 90, 100]
#label_budget = [40]
inf_y = zeros(opt_num_points)
'''
# prefix noisy points
dist_noisy = []
num_noisy = int(opt_num_points*alpha)
while len(dist_noisy) < num_noisy:
temp = unf(-1,1,(1,d))
if norm(temp,2) <=1:
dist_noisy.append(temp[0])
'''
for x in range(1):
# list to calculate noise per super iter and thus calculate average noise
iter_final_count_noisy = []
iter_init_count_noisy = []
pre_denoise_iter_algo_error = zeros((super_iter, len(label_budget)))
pre_denoise_iter_svm_error = zeros((super_iter, len(label_budget)))
pre_denoise_iter_sm_error = zeros((super_iter, len(label_budget)))
post_denoise_iter_algo_error = zeros((super_iter, len(label_budget)))
post_denoise_iter_svm_error = zeros((super_iter, len(label_budget)))
post_denoise_iter_sm_error = zeros((super_iter, len(label_budget)))
pre_denoise_iter_algo_std = zeros((super_iter, len(label_budget)))
pre_denoise_iter_svm_std = zeros((super_iter, len(label_budget)))
pre_denoise_iter_sm_std = zeros((super_iter, len(label_budget)))
post_denoise_iter_algo_std = zeros((super_iter, len(label_budget)))
post_denoise_iter_svm_std = zeros((super_iter, len(label_budget)))
post_denoise_iter_sm_std = zeros((super_iter, len(label_budget)))
for p in range(super_iter):
print "In Iteration # " + str(p)
'''
Setup a two dimensional environment with points, which should act like sensors
Topology of points can vary
Creating a random uniform distribution first
'''
# Choose randomly an optimal separator
w_star = ones(2)
w_star /= norm(w_star,2)
#print w_star
# generating a uniform ball instead of a square
dist = []
# change based on pre selecting noisy points or not
#num_clean = opt_num_points - num_noisy
while len(dist) < opt_num_points:
temp = unf(-1,1,(1,d))
if norm(temp,2) <= 1:
dist.append(temp[0])
# Select or unselect based on fixing the noisy points in advance
#for i in range(num_noisy):
# dist.append(dist_noisy[i])
dist = array(dist)
# Change based on realizable or non-realizable case
y = zeros(opt_num_points)
for i in range(opt_num_points):
# Realizable case
y[i] = sign(dot(w_star, dist[i]))
# Non- Realizable case with a sine curve
#if dist[i,1] > sin(dist[i,0]*pi)/3:
# y[i] = 1
#else:
# y[i] = -1
dict_index = defaultdict(list)
dict_weight = defaultdict(list)
nn_object = create_nn_graph(dist,opt_num_points)
# Accumulate information
# create the Neighborhood dictionary
if opt_comm == 1:
nn_object.radius_nn_graph(radius)
dict_index = nn_object.dict_index
dict_weight = nn_object.dict_weight
elif opt_comm == 2:
nn_object.radius_nn_graph(radius)
dict_index = nn_object.dict_index
dict_weight = nn_object.dict_weight
else:
nn_object.knn_nn_graph(des_nbrs)
dict_index = nn_object.dict_index
dict_weight = nn_object.dict_weight
noisy = Create_Noisy_Labels()
# Introducing noise
# Random
# Distance based
# Pocket
count_noisy = 0
if opt_init_label_method == 1:
# random noise
inf_y = noisy.randomclassnoise(alpha, y)
'''
# Scheme for pre-choosing noisy points
for i in range(num_clean):
inf_y[i] = y[i]
for j in range(num_noisy):
inf_y[j+num_clean] = -y[j + num_clean]
# creating own shuffling scheme
z = hstack((dist, inf_y.reshape(opt_num_points,1)))
u = hstack((z, y.reshape(opt_num_points,1)))
shuffle(u)
dist = u[:,0:2]
inf_y = u[:,2]
y = u[:,3]
'''
elif opt_init_label_method == 2:
# choose based on w_star and distance (a random line in the unit ball)
inf_y = noisy.linearclassnoise(intercept,slope,w_star,dist,y)
else:
# create random balls of noise
inf_y = noisy.randomballnoise(beta, y, dict_index)
temp1, temp2 = calculate_noisy(y, inf_y)
iter_init_count_noisy.append(temp1)
'''
Learn based on the active learning algorithm
Switch between train_classifier_gen_error
and train_classifier_class_error as necessary
'''
# Running before denoising
temp3, temp4, temp5, temp6, temp7, temp8 = train_classifiers_gen_error(dist, inf_y, w_star, label_budget)
pre_denoise_iter_algo_error[p] = temp3
pre_denoise_iter_svm_error[p] = temp4
pre_denoise_iter_sm_error[p] = temp5
pre_denoise_iter_algo_std[p] = temp6
pre_denoise_iter_svm_std[p] = temp7
pre_denoise_iter_sm_std[p] = temp8
#print iter_algo_error[p], iter_svm_error[p]
#print "\n"
'''
run best response dynamics for desired number of iterations
'''
consensus_obj = Consensus_Dynamics(opt_resp_iter,opt_update, opt_num_points)
if opt_dynamic == 1:
# run a majority algorithm
inf_y = consensus_obj.run_majority(inf_y, dict_index)
elif opt_dynamic == 2:
# run a weighted majority algorithm (based on distance)
# the distance represents the weights that it reads from
inf_y = consensus_obj.run_weighted_majority(inf_y, dict_index, dict_weight)
else:
# run a probabilistic version of k-NN
# look at neighbors, calculate ratio of maximum class and probabilistically pick it up
inf_y = consensus_obj.run_prob_knn(inf_y, dict_index)
temp6, temp7 = calculate_noisy(y, inf_y)
# print "In iteration " + str(p) + " noise rate is " + str(temp7)
iter_final_count_noisy.append(temp6)
'''
Learn based on the active learning algorithm
Query appropriate points to determine optimal direction
Remember you are querying the inferred labels rather than actual labels
Theoretical margin based method with parameters for the uniform distribution
Calculate cost of inferring results
'''
temp8, temp9, temp10, temp11, temp12, temp13 = train_classifiers_gen_error(dist, inf_y, w_star, label_budget)
post_denoise_iter_algo_error[p] = temp8
post_denoise_iter_svm_error[p] = temp9
post_denoise_iter_sm_error[p] = temp10
post_denoise_iter_algo_std[p] = temp11
post_denoise_iter_svm_std[p] = temp12
post_denoise_iter_sm_std[p] = temp13
#print iter_algo_error[p], iter_svm_error[p]
#print "\n"
avg_init_noise_rate = sum(iter_init_count_noisy)/float(opt_num_points*super_iter)
print "Average initial noise rate over " + str(super_iter) + " is " + str(avg_init_noise_rate)
for q in range(len(label_budget)):
avg_algo_error = sum(pre_denoise_iter_algo_error[:,q])/float(super_iter)
avg_svm_error = sum(pre_denoise_iter_svm_error[:,q])/float(super_iter)
avg_sm_error = sum(pre_denoise_iter_sm_error[:,q])/float(super_iter)
avg_algo_std = std(pre_denoise_iter_algo_std[:,q])
avg_svm_std = std(pre_denoise_iter_svm_std[:,q])
avg_sm_std = std(pre_denoise_iter_sm_std[:,q])
print "Averaged generalization error pre denoising for " + str(label_budget[q])
print "Algo: SVM: SimpleMargin: " + str(avg_algo_error) + " " + str(avg_svm_error) + " " + str(avg_sm_error)
print "Algo: SVM: SimpleMargin: " + str(avg_algo_std) + " " + str(avg_svm_std) + " " + str(avg_sm_std)
print "\n"
avg_final_noise_rate = sum(iter_final_count_noisy)/float(opt_num_points*super_iter)
print "Average final noise rate over " + str(super_iter) + " is " + str(avg_final_noise_rate)
print "\n"
for q in range(len(label_budget)):
avg_algo_error = sum(post_denoise_iter_algo_error[:,q])/float(super_iter)
avg_svm_error = sum(post_denoise_iter_svm_error[:,q])/float(super_iter)
avg_sm_error = sum(post_denoise_iter_sm_error[:,q])/float(super_iter)
avg_algo_std = std(post_denoise_iter_algo_std[:,q])
avg_svm_std = std(post_denoise_iter_svm_std[:,q])
avg_sm_std = std(post_denoise_iter_sm_std[:,q])
print "Averaged generalization error post denoising for " + str(label_budget[q])
print "Algo: SVM: SimpleMargin: " + str(avg_algo_error) + " " + str(avg_svm_error) + " " + str(avg_sm_error)
print "Algo: SVM: SimpleMargin: " + str(avg_algo_std) + " " + str(avg_svm_std) + " " + str(avg_sm_std)
print "\n"
| Kaushikpatnaik/Active-Learning-and-Best-Response-Dynamics | game_theory.py | Python | mit | 23,574 | [
"Gaussian"
] | 3c79d61fc75090dd6fd36d42115b3f11c201158330c014dd55f7fc559e7bc630 |
import h5py
import numpy as np
from mayavi import mlab
def plot_whole_mesh_mayavi(hdf5_fname, vsc=1.0):
"""
"""
f5 = h5py.File(hdf5_fname, 'r')
#
#
msh = f5['slab/top'][:]
#
# plot upper mesh
plot_mesh_mayavi(msh, vsc)
#
#
for idx, key in enumerate(f5['inslab']):
if idx > 0:
continue
plot_mesh_mayavi(f5['inslab'][key][:], vsc)
mlab.show()
#
#
f5.close()
def plot_mesh_mayavi(msh, vsc, lw=2, color=(1, 0, 0)):
"""
:param numpy.ndarray msh:
:param float vsc:
"""
for i in range(0, msh.shape[0] - 1):
for j in range(0, msh.shape[1] - 1):
xt = [msh[i, j, 0], msh[i + 1, j, 0], msh[i + 1, j + 1, 0],
msh[i, j + 1, 0], msh[i, j, 0]]
yt = [msh[i, j, 1], msh[i + 1, j, 1], msh[i + 1, j + 1, 1],
msh[i, j + 1, 1], msh[i, j, 1]]
zt = [msh[i, j, 2] * vsc, msh[i + 1, j, 2] * vsc,
msh[i + 1, j + 1, 2] * vsc,
msh[i, j + 1, 2] * vsc, msh[i, j, 2] * vsc]
if all(np.isfinite(xt)):
mlab.plot3d(xt, yt, zt, color=color, line_width=lw)
| GEMScienceTools/oq-subduction | openquake/sub/slab/utils_plot.py | Python | agpl-3.0 | 1,182 | [
"Mayavi"
] | f89d1b9c1ffe8c6301fbd64398966e949e9c1b7761aa89d36c3230edfd7e8b83 |
## Copyright (C) 2016
## Written by Travis Thieme <tthieme@hawaii.edu>
## University of Hawai`i at Hilo
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# -*- coding: utf-8 -*-
##########################
## Load required modules##
##########################
import numpy as np
import scipy as sp
#import sympy as sy
#import statsmodels as stats
#import astropy as astro
#import math as m
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mayavi.mlab as ml
#import vpython as visual
#import pyqtgraph as pyqt
#import wx as wxp
#import pandas
#import random
import time
#import pdb
## ###################################
##
## Author: Travis Thieme
## Created on: July 5th, 2016
##
## Application Name: star_model.py
## Programming Language: Python
##
## Description: This program
##
## ###################################
# Project Info
name = 'Travis Thieme'
date = time.strftime("%d/%m/%Y")
clock = time.strftime("%H:%M:%S %Z")
def project_info(project = ''):
"""
This function prints the info for the project your working on. Info such as author,
date, current time, and the project name.
"""
name = 'Travis Thieme'
date = time.strftime("%A, %d/%m/%Y")
clock = time.strftime("%H:%M:%S %Z")
project_name = project
print name
print date
print clock
print project_name
def magnetic_field(v_scale = 1, h_scale = 1, R = 3.8918, k1 = 0.9549, k2 = 0.4608, k3 = 0.6320, h = 0.3257, B_0 = 3.3118):
"""
This function plots various magnetic field visualization graphs.
"""
# select which plot to show
# 1 for PLOT 1, 2 for PLOT 2, etc.
# all for all plots
# info for plot info
print 'Which plot would you like to display?\n'
for i in range(1,10): # number of plots total
print 'For plot ', i, ', please type ', i
print 'For all plots, please type all'
print 'For info on all the plots, please type info'
print_which = str(raw_input("Plot #: "))
# model parameters
k = [] # hold calculated k components
k.append(k1) # k_1, k[0]
k.append(k2) # k_2, k[1]
k.append(k3) # k_3, k[2]
lambda_m = []
lambda_m = lambda_m_calc(h_scale, R)
##############################################################
### PLOT 1 ###################################################
##############################################################
if print_which == '1' or print_which == 'all':
############################################################
## These two contour plots show the representations of
## Br and Bz in terms of r and z components.
############################################################
### PLOT
r = np.linspace(-1, 1, 100)*R
z = np.linspace(-0.5, 0.5, 100)*R
x, y = mesh_grid(r,z,R)
Br, Bz = calc_Br_Bz(x,y,k,lambda_m,h,B_0)
subtitle='$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}$'.format(k1,k2,k3,R,h,B_0)
figure_info(1, title='Magnetic Field Plot 1', subtitle=subtitle)
plt.subplot(1,2,1)
plot_contour(x,y,Br,title='\n\n$B_r$',xlab='$r$',ylab='$z$')
plt.subplot(1,2,2)
plot_contour(x,y,Bz,title='\n\n$B_z$',xlab='$r$',ylab='$z$')
plt.tight_layout()
plt.savefig('mag_field_plots_1.png', dpi = 800)
plt.show()
##############################################################
### PLOT 2 ###################################################
##############################################################
if print_which == '2' or print_which == 'all':
############################################################
## These two contour plots show the representations of
## arctan(Bz/Br) and sqrt(Br^2+Bz^2) in terms of r and
## z components.
############################################################
### PLOT
r = np.linspace(-1, 1, 100)*R
z = np.linspace(-0.5, 0.5, 100)*R
x, y = mesh_grid(r,z,R)
Br, Bz = calc_Br_Bz(x,y,k,lambda_m,h,B_0)
subtitle='$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}$'.format(k1,k2,k3,R,h,B_0)
figure_info(2, title='Magnetic Field Plot 2',subtitle=subtitle)
plt.subplot(1,2,1)
plot_contour(x,y,np.arctan(Bz/Br),title='\n\n$tan^{-1}(B_z/B_r)$',xlab='$r$',ylab='$z$')
plt.subplot(1,2,2)
plot_contour(x,y,np.sqrt((Bz**2)+(Br**2)),title='\n\n$\sqrt{Bz^{2}+Br^{2}}$',xlab='$r$',ylab='$z$')
plt.tight_layout()
plt.savefig('mag_field_plots_2.png', dpi = 800)
plt.show()
##############################################################
### PLOT 3 ###################################################
##############################################################
if print_which == '3' or print_which == 'all':
############################################################
## These line plots show the features of Br and Bz by using
## modified z components to get a 2D line graph.
############################################################
### PLOT
r = np.linspace(0, 1, 100)*R
Br0, Bz0 = calc_Br_Bz_mod(k,lambda_m,h,B_0,R,r)
subtitle='$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}$'.format(k1,k2,k3,R,h,B_0)
figure_info(3, title='Magnetic Field Plot 3',subtitle=subtitle)
plt.subplot(1,2,1)
plot(r,Bz0,title='\n\n$z-component$ $at$ $z=0$',xlab = '$r$',ylab='$B_z$')
plt.subplot(1,2,2)
plot(r,Br0,title='\n\n$r-component$ $at$ $z=0.26R$',xlab = '$r$',ylab='$B_r$')
plt.tight_layout()
plt.savefig('mag_field_plots_3.png', dpi = 800)
plt.show()
##############################################################
### PLOT 4 ###################################################
##############################################################
if print_which == '4' or print_which == 'all':
############################################################
## This plots a representation of the magnetic field based
## on the r, z, Br, and Bz components so we can look at the
## actual lines creatd by the formulas.
############################################################
### PLOT
r = np.linspace(-1, 1, 100)*R
z = np.linspace(-0.5, 0.5, 100)*R
x, y = mesh_grid(r,z,R)
Br, Bz = calc_Br_Bz(x,y,k,lambda_m,h,B_0)
subtitle='$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}$'.format(k1,k2,k3,R,h,B_0)
figure_info(4, title='Magnetic Field Plot 4',size = (8,8),subtitle=subtitle,subxpos=0.5, subypos=0.945)
plot_streamplot(r,z,Br,Bz,title='\n\n$Model$ $of$ $Magnetic$ $Field$ $Lines$',xlab = '$r$',ylab='$z$')
plt.tight_layout()
plt.savefig('mag_field_plots_4.png', dpi = 800)
plt.show()
##############################################################
### PLOT 5 ###################################################
##############################################################
if print_which == '5' or print_which == 'all':
############################################################
## This is a plot of stokes parameters Q and U found from
## the Bx, By and Bz vector components.
############################################################
## WARNING
## If you change the number of points in r and z, you must
## change the corresponding points in the mgrid for yy and
## zz. As well as the values in the for loop for y and z
## when you calculate Q and U and the array size for
## Sq and Su.
############################################################
### PLOT
r = np.linspace(-1, 1, 100)*R
z = np.linspace(-0.5, 0.5, 100)*R
x, y = np.meshgrid(r,z)
xx, yy, zz = np.mgrid[-0.5:0.5:10j, -0.5:0.5:100j, -0.5:0.5:100j]*R
rr = np.sqrt(xx*xx+yy*yy)
Br, Bz = calc_Br_Bz(rr,zz,k,lambda_m,h,B_0)
Bx, By= calc_BxBy(Br,xx,yy)
Bx *= density(xx, yy, zz, R, h)
By *= density(xx, yy, zz, R, h)
Bz *= density(xx, yy, zz, R, h)
tx = int(raw_input("Rotation in x plane (degrees): "))
ty = int(raw_input("Rotation in y plane (degrees): "))
Q, U = calc_Sq_Su(Bx, By, Bz, tx, ty)
subtitle=(r'$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}, \theta_x = {6}, \theta_y = {7}$').format(k1, k2, k3, R, h, B_0, tx, ty)
figure_info(5, title='Stokes Parameters with Magnetic Fields: Q and U',subtitle=subtitle)
plt.subplot(1,2,1)
plot_contour(x,y,Q,title='\n\n$Q$',xlab='$r$',ylab='$z$')
plt.subplot(1,2,2)
plot_contour(x,y,U,title='\n\n$U$',xlab='$r$',ylab='$z$')
plt.tight_layout()
plt.savefig('stokes_param.png', dpi = 800)
plt.show()
##############################################################
### PLOT 6 ###################################################
##############################################################
if print_which == '6' or print_which == 'all':
############################################################
## This is a plot of stokes parameters Q and U found from
## the Bx, By and Bz vector components for multiple thetas.
## Prints 5 different plots
############################################################
## WARNING
## If you change the number of points in r and z, you must
## change the corresponding points in the mgrid for yy and
## zz. As well as the values in the for loop for y and z
## when you calculate Q and U and the array size for
## Sq and Su.
############################################################
### PLOT
r = np.linspace(-1, 1, 100)*R
z = np.linspace(-0.5, 0.5, 100)*R
x, y = np.meshgrid(r,z)
xx, yy, zz = np.mgrid[-1:1:100j, -1:1:100j, -0.5:0.5:100j]*R
rr = np.sqrt(xx*xx+yy*yy)
Br, Bz = calc_Br_Bz(rr,zz,k,lambda_m,h,B_0)
Bx, By= calc_BxBy(Br,xx,yy)
#Bx *= density3(xx, yy, zz, R, h)
#By *= density3(xx, yy, zz, R, h)
#Bz *= density3(xx, yy, zz, R, h)
#tx = 0
#ty = 0
for tx in [0, 45]:
for ty in [0]:
Q, U = calc_Sq_Su(Bx, By, Bz, tx, ty)
subtitle=(r'$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}, \theta_x = {6}, \theta_y = {7}$').format(k1, k2, k3, R, h, B_0, tx, ty)
figure_info(6, title='Stokes Parameters of Magnetic Fields: Q and U',subtitle=subtitle,size=(8,12),subxpos=0.5,subypos=0.96)
plt.subplot(3,2,1)
plot_contour(x,y,Q,title='\n\n\n\n\nContour Plot: $Q$',xlab='$r$',ylab='$z$')
plt.subplot(3,2,2)
plot_contour(x,y,U,title='\n\n\n\n\nContour Plot: $U$',xlab='$r$',ylab='$z$')
plt.subplot(3,2,3)
plot_image(Q,title='\n\nImage Plot: $Q$',xlab='$r$',ylab='$z$')
plt.subplot(3,2,4)
plot_image(U,title='\n\nImage Plot: $U$',xlab='$r$',ylab='$z$')
plt.subplot(3,1,3)
plot_quiver(x,y,np.cos((np.arctan2(U,Q)/2)),np.sin((np.arctan2(U,Q)/2)),title=('\n\n'r'Quiver: $\arctan(\frac{U}{Q})$'),xlab='$r$',ylab='$z$')
plt.tight_layout()
plt.savefig('stokes_param_'+str(tx)+'_'+str(ty)+'.png', dpi = 800)
plt.show()
##############################################################
### PLOT 7 ###################################################
##############################################################
if print_which == '7' or print_which == 'all':
############################################################
## This is the mayavi3d version of the magnetic field
## in plot 4. This opens a seperate window where you
## can see a 3D visualization and rotate it about
## any axis.
############################################################
## import mayavi as maya
## Br = sqrt(Bx*Bx+By*By)
## theta = arctan(y/x)
## Bx = Br*cos(theta)
## By = Br*sin(theta)
############################################################
## mayavi.mlab.quiver3d(x, y, z, u, v, w, ...)
##
## If 6 arrays, (x, y, z, u, v, w) are passed, the
## 3 first arrays give the position of the arrows, and
## the 3 last the components. They can be of any shape.
############################################################
### PLOT
xx, yy, zz = np.mgrid[-0.5:0.5:10j, -0.5:0.5:10j, -0.5:0.5:100j]*R
rr = np.sqrt(xx*xx+yy*yy)
Br, Bz = calc_Br_Bz(rr,zz,k,lambda_m,h,B_0)
Bx, By= calc_BxBy(Br,xx,yy)
Bx *= density(xx, yy, zz, R, h)
By *= density(xx, yy, zz, R, h)
Bz *= density(xx, yy, zz, R, h)
ml.figure(fgcolor=(0, 0, 0),bgcolor=(0,0,0))
ml.quiver3d(xx,yy,zz,Bx,By,Bz, colormap='spectral', mode='2ddash')
ml.savefig('mag_field_mayavi.png')
ml.show()
##############################################################
### PLOT 8 ###################################################
##############################################################
if print_which == '8' or print_which == 'all':
############################################################
## This graph is the mplot3d version of the mayavi3d graph.
## You can set the rotation of the xy and z axis by
## entering an integer (in degrees) between -360 and 360
## for either axis.
############################################################
print '\n'
print 'Please enter the rotation parameters for plot 7.'
print 'You can rotate this graph in the xy direction and \nthe z direction.'
print 'The xy direction is set at 0 degrees to be looking \nat the x-axis.'
print 'The z direction is set at 0 degrees to be vertically \noriented.'
print 'Please enter an integer from -360 to 360'
try:
rotate_xy = int(raw_input("Rotation amount in xy direction (degrees): "))
except:
###ERROR###
print 'Error! Try again.'
print 'Please enter an integer (in degrees) from -360 to 360'
try:
rotate_z = int(raw_input("Rotation amount in z direction (degrees): "))
except:
###ERROR###
print 'Error! Try again.'
print 'Please enter an integer (in degrees) from -360 to 360'
### PLOT
xx, yy, zz = np.mgrid[-0.5:0.5:10j, -0.5:0.5:10j, -0.5:0.5:100j]*R
rr = np.sqrt(xx*xx+yy*yy)
Br, Bz = calc_Br_Bz(rr,zz,k,lambda_m,h,B_0)
Bx, By= calc_BxBy(Br,xx,yy)
Bx *= density2(xx, yy, zz)
By *= density2(xx, yy, zz)
Bz *= density2(xx, yy, zz)
subtitle='$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}$'.format(k1,k2,k3,R,h,B_0)
fig = figure_info(8, title='3D Magnetic Field Plot (6) using mplot3d',size = (8,8),subtitle=subtitle,subxpos=0.5, subypos=0.945)
plot_3d_quiver(fig,xx,yy,zz,Bx,By,Bz,rotate_xy,rotate_z)
plt.tight_layout()
plt.savefig('3D_mag_field_plots', dpi = 800)
plt.show()
##############################################################
### PLOT 9 ###################################################
##############################################################
if print_which == '9' or print_which == 'all':
############################################################
##
############################################################
print '\n'
print 'Please enter the rotation parameters for plot 8.'
print 'You can rotate this graph in the xy direction and \nthe z direction.'
print 'The xy direction is set at 0 degrees to be looking \nat the x-axis.'
print 'The z direction is set at 0 degrees to be vertically \noriented.'
print 'Please enter an integer from -360 to 360'
try:
rotate_xy = int(raw_input("Rotation amount in xy direction (degrees): "))
except:
###ERROR###
print 'Error! Try again.'
print 'Please enter an integer (in degrees) from -360 to 360'
try:
rotate_z = int(raw_input("Rotation amount in z direction (degrees): "))
except:
###ERROR###
print 'Error! Try again.'
print 'Please enter an integer (in degrees) from -360 to 360'
### PLOT
r = np.linspace(-1, 1, 100)*R
z = np.linspace(-0.5, 0.5, 100)*R
x, y = np.meshgrid(r,z)
xx, yy, zz = np.mgrid[-0.5:0.5:10j, -0.5:0.5:100j, -0.5:0.5:100j]*R
rr = np.sqrt(xx*xx+yy*yy)
Br, Bz = calc_Br_Bz(rr,zz,k,lambda_m,h,B_0)
Bx, By= calc_BxBy(Br,xx,yy)
Bx *= density(xx, yy, zz, R, h)
By *= density(xx, yy, zz, R, h)
Bz *= density(xx, yy, zz, R, h)
Q, U = calc_Sq_Su(Bx, By, Bz, tx = 0, ty = 0)
subtitle='$k_1 = {0}, k_2 = {1}, k_3 = {2}, R = {3}, h = {4}, B_0 = {5}$'.format(k1, k2, k3, R, h, B_0)
fig = figure_info(9, title='Stokes Parameters: Q and U',subtitle=subtitle)
plt.subplot(1,2,1)
plot_3d_contour(fig,x,y,Q,rotate_xy,rotate_z)
plt.subplot(1,2,2)
plot_3d_contour(fig,x,y,U,rotate_xy,rotate_z)
plt.tight_layout()
plt.savefig('3D_stokes_param', dpi = 800)
plt.show()
##############################################################
### INFO #####################################################
##############################################################
if print_which == 'info':
###DISPLAY GRAPH INFO###
print graph_info()
##############################################################
### ERROR ####################################################
##############################################################
if print_which not in ('1','2','3','4','5','6','7','8','9','all','info'):
###ERROR###
print 'Error!'
print 'Input', str(print_which), 'not recognized.'
print 'Please input the graph number, all or info.'
def plot_quiver(x,y,u,v,title='',xlab = '',ylab=''):
"""
This function
"""
plt.quiver(x[::16],y[::16],u[::16],v[::16],headlength=0,headwidth=0,headaxislength=0,angles='xy',scale_units='xy',scale=10,pivot='mid')
#plt.axis([-1,1,-0.5,0.5])
plt.title(title, fontsize=8)
plt.xlabel(xlab, fontsize=8)
plt.ylabel(ylab, fontsize=8)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
#plt.clabel(cp, inline = True, fontsize = 3)
#plt.colorbar(ticks=tick)
#cb = plt.colorbar()
#cb.ax.tick_params(labelsize = 5) # change number size on colorbar
def plot_image(x,title='',xlab = '',ylab=''):
"""
This function
"""
plt.imshow(x,extent=[-1,1,-0.5,0.5],aspect='auto')
plt.title(title, fontsize=8)
plt.xlabel(xlab, fontsize=8)
plt.ylabel(ylab, fontsize=8)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.gca().invert_yaxis()
#plt.clabel(cp, inline = True, fontsize = 3)
cb = plt.colorbar()
cb.ax.tick_params(labelsize = 5) # change number size on colorbar
def graph_info():
"""
This function displays info for what each plot is.
"""
def plot_3d_contour(fig,x,y,z,rotate_xy,rotate_z):
"""
This function
"""
ax = fig.gca(projection='3d')
ax.contour(x,y,z)
ax.view_init(elev=90+rotate_z,azim=90+rotate_xy)
ax.set_xlabel('x', fontsize=8)
ax.set_ylabel('y', fontsize=8)
ax.set_zlabel('z', fontsize=8)
def plot_3d_quiver(fig,x,y,z,X,Y,Z,rotate_xy,rotate_z):
"""
This function
"""
ax = fig.gca(projection='3d')
ax.quiver(x,y,z,X,Y,Z,length=0.05)
ax.view_init(elev=0+rotate_z,azim=90+rotate_xy)
ax.set_xlabel('x', fontsize=8)
ax.set_ylabel('y', fontsize=8)
ax.set_zlabel('z', fontsize=8)
def calc_Sq_Su(Bx, By, Bz, theta_x, theta_y):
"""
This function calculates the Q and U for Stokes parameters.
"""
Sq = np.empty([100,100])
Su = np.empty([100,100])
#theta_x = int(raw_input("Rotation in x plane (degrees): "))*(np.pi/180)
#theta_y = int(raw_input("Rotation in y plane (degrees): "))*(np.pi/180)
theta_x *= (np.pi/180)
theta_y *= (np.pi/180)
for k in range(100):
for j in range(100):
Sq[j][k] = 0
Su[j][k] = 0
for i in range(100):
Bxx = Bx[i][j][k]
Byy = By[i][j][k]
Bzz = Bz[i][j][k]
# rotation around y axis
y_rot = Byy
z_rot = -Bxx*np.sin(theta_y)+Bzz*np.cos(theta_y)
# rotation around x axis
yy_rot = y_rot*np.cos(theta_x) - z_rot*np.sin(theta_x)
zz_rot = y_rot*np.sin(theta_x) + z_rot*np.cos(theta_x)
# theta for stokes parameters
theta_s = np.arctan2(zz_rot,yy_rot)
theta_p = (np.pi/2) + theta_s
# calculate Q and U for stokes parameters
Q = np.cos(2*theta_p)
U = np.sin(2*theta_p)
Sq[j][k] += Q
Su[j][k] += U
return Sq, Su
def calc_BxBy(Br,xx,yy):
"""
This function calulates Bx and By for our 3D graph.
"""
theta = np.arctan2(yy,xx) # use arctan2 for choosing the quadrant correctly
Bx = Br*np.cos(theta)
By = Br*np.sin(theta)
return Bx, By
def plot_streamplot(r,z,Br,Bz,title='',xlab = '',ylab=''):
"""
This function creates a streamplot with four parameters (r,z,Br,Bz).
"""
plt.streamplot(r, z, Br, Bz)
plt.title(title, fontsize=16)
plt.xlabel(xlab, fontsize=14)
plt.ylabel(ylab, fontsize=14)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
plt.legend()
def lambda_m_calc(h_scale,R):
"""
This function calculates lambda_m for the Br and Bz functions.
"""
lambda_m = []
# find first three bessel roots
a_m = (sp.special.jn_zeros(1, 3))/h_scale
# find first three lambda_m's
for i in range(3):
lambda_m.append((a_m[i]/R)**2)
return lambda_m
def mesh_grid(r,z,R):
"""
This function creates a mesh grid for two parameters r and z.
"""
x, y = np.meshgrid(r, z)
return x, y
def calc_Br_Bz(x,y,k,lambda_m,h,B_0):
"""
This function calculates the values for Br and Bz given x and y values.
"""
Br = 0
Bz = 0
# calculate first three summations of Br and Bz
for j in range(3):
Br += B_r_calc(x, y, k[j], lambda_m[j], h)
Bz += B_z_calc(x, y, k[j], lambda_m[j], h, B_0)
return Br, Bz
def calc_Br_Bz_mod(k,lambda_m,h,B_0,R,r):
"""
This function calculate Br and Bz with the required modified z values
of the model.
"""
# substituting values for curve graphs
z1 = 0
z2 = 0.26*R
Bz0 = 0
Br0 = 0
# calculate first three summations of Br and Bz with modified z's
# for line graph
for u in range(3):
Bz0 += B_z_calc(r, z1, k[u], lambda_m[u], h, B_0)/B_0
Br0 += B_r_calc(r, z2, k[u], lambda_m[u], h)/B_0
return Br0, Bz0
def figure_info(fignum,title='',size=(8,4), subtitle='', subxpos=0.5, subypos=0.92):
"""
This function is to set the info for a given figure. Parameters include
fignum, title, and size.
"""
fig = plt.figure(fignum, size)
plt.suptitle(title, fontsize=12, fontweight='bold') # Title for whole figure
plt.figtext(subxpos,subypos,subtitle, size=6,horizontalalignment='center',verticalalignment='center')
plt.figtext(0.01,0.97,'Created by: ' + name, size=5) # Add created by to top left corner
plt.figtext(0.01,0.95, 'Todays Date: ' + date, size=5) # Add date to top left corner
plt.figtext(0.01,0.93,'Time: ' + clock, size=5) # Add clock time to top left corner
return fig
def plot_contour(x,y,z,title='',xlab = '',ylab=''):
"""
This function is for making a contour plot with three parameters (x,y,z).
"""
cp = plt.contour(x, y, z)
plt.title(title, fontsize=8)
plt.xlabel(xlab, fontsize=8)
plt.ylabel(ylab, fontsize=8)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.clabel(cp, inline = True, fontsize = 3)
cb = plt.colorbar()
cb.ax.tick_params(labelsize = 5) # change number size on colorbar
def plot(x,y,title='',xlab = '',ylab=''):
"""
This function is for making a simple plot with two parameters (x,y).
"""
plt.plot(x, y)
plt.title(title, fontsize=8)
plt.xlabel(xlab, fontsize=8)
plt.ylabel(ylab, fontsize=8)
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.legend()
def linspace(a,b,c):
"""
This function returns a linear space.
r = (-1,1)
z = (-0.5,0.5)
"""
return np.linspace(a, b, c)
def B_r_calc(r,z,k_m,lambda_m,h):
"""
This function calculates and returns an expression for Br.
"""
# compontents
a = k_m
b = np.sqrt(lambda_m)
c = h
return a*b*sp.special.j1(b*r)*((sp.special.erfc((b*c)/2 - (z/c))*np.exp((-b*z))) - (sp.special.erfc((b*c)/2 + (z/c))*np.exp((b*z))))
def B_z_calc(r,z,k_m,lambda_m,h,B_0):
"""
This function calculates and returns an expression for Bz.
"""
# compontents
a = k_m
b = np.sqrt(lambda_m)
c = h
return a*b*sp.special.j0(b*r)*((sp.special.erfc((b*c)/2 + (z/c))*np.exp((b*z))) + (sp.special.erfc((b*c)/2 - (z/c))*np.exp((-b*z))))+B_0
def density(x, y, z, R, h):
"""
This function returns an expression for density.
"""
# parameters
row_0 = 10**7
a = 1
b = 1
### turnover at: a=68, b=68
### At 68, Q changes dramatically but U looks similar still
return row_0*np.exp(-a*(x*x+y*y)/R*R)*np.exp(-b*(z*z)/h*h)
def density2(x, y, z):
"""
This function returns an expression for density.
"""
# parameters
row_0 = 10**7
A = 0.3
B = 0.3
return row_0*np.exp(-(x*x+y*y)/A*A)*np.exp(-(z*z)/B*B)
def density3(x, y, z, R, h):
"""
This function returns an expression for density.
"""
# parameters
row_0 = 10**7
a = 1
b = 1
### turnover at: a=68, b=68
### At 68, Q changes dramatically but U looks similar still
dens = row_0*np.exp(-a*(x*x+y*y)/R*R)*np.exp(-b*(z*z)/h*h)
if np.any(dens <= 1):
return 0
else:
return dens | tthieme/Akamai2016 | Star_Model/star_model.py | Python | gpl-3.0 | 27,235 | [
"Mayavi"
] | 1a12f801495706010ed798d4190bb7c5906e8b954ae6c13f7cf8c5959b58d4b4 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Rollback Config back to Lenovo Switches
#
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_rollback
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Roll back the running or startup configuration from a remote
server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to roll back configurations of a switch from a remote server. This is
achieved by using startup or running configurations of the target device
that were previously backed up to a remote server using FTP, SFTP, TFTP,
or SCP. The first step is to create a directory from where the remote
server can be reached. The next step is to provide the full file path of
he backup configuration's location. Authentication details required by the
remote server must be provided as well.
By default, this method overwrites the switch's configuration file with
the newly downloaded file. This module uses SSH to manage network device
configuration. The results of the operation will be placed in a directory
named 'results' that must be created by the user in their local directory
to where the playbook is run. For more information about this module from
Lenovo and customizing it usage for your use cases, please visit
U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_rollback.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This refers to the type of configuration which will be used for
the rolling back process. The choices are the running or startup
configurations. There is no default value, so it will result
in an error if the input is incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server from where to download the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not
specified, there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
- This specifies the IP Address of the remote server from where the
backup configuration will be downloaded.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path of the configuration file
located on the remote server. In case the relative path is used as
the variable value, the root folder for the user of the server
needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify username for the server relating to the protocol used.
required: Yes
default: Null
serverpassword:
description:
- Specify password for the server relating to the protocol used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_rollback.
These are written in the main.yml file of the tasks directory.
---
- name: Test Rollback of config - Running config
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Startup config
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Running config - TFTP
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Rollback of config - Startup config - TFTP
cnos_rolback:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_rollback_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Config file tranferred to Device"
'''
import sys
import time
import socket
import array
import json
import time
import re
import os
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
# Utility Method to rollback the running config or start up copnfig
# This method supports only SCP or SFTP or FTP or TFTP
def doConfigRollBack(module, prompt, answer):
host = module.params['host']
server = module.params['serverip']
username = module.params['serverusername']
password = module.params['serverpassword']
protocol = module.params['protocol'].lower()
rcPath = module.params['rcpath']
configType = module.params['configType']
confPath = rcPath
retVal = ''
command = "copy " + protocol + " " + protocol + "://"
command = command + username + "@" + server + "/" + confPath
command = command + " " + configType + " vrf management\n"
cnos.debugOutput(command + "\n")
# cnos.checkForFirstTimeAccess(module, command, 'yes/no', 'yes')
cmd = []
if(protocol == "scp"):
scp_cmd1 = [{'command': command, 'prompt': 'timeout:', 'answer': '0'}]
scp_cmd2 = [{'command': '\n', 'prompt': 'Password:',
'answer': password}]
cmd.extend(scp_cmd1)
cmd.extend(scp_cmd2)
if(configType == 'startup-config'):
scp_cmd3 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(scp_cmd3)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "sftp"):
sftp_cmd = [{'command': command, 'prompt': 'Password:',
'answer': password}]
cmd.extend(sftp_cmd)
# cnos.debugOutput(configType + "\n")
if(configType == 'startup-config'):
sftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(sftp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "ftp"):
ftp_cmd = [{'command': command, 'prompt': 'Password:',
'answer': password}]
cmd.extend(ftp_cmd)
if(configType == 'startup-config'):
ftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(ftp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
elif(protocol == "tftp"):
command = "copy " + protocol + " " + protocol
command = command + "://" + server + "/" + confPath
command = command + " " + configType + " vrf management\n"
cnos.debugOutput(command)
tftp_cmd = [{'command': command, 'prompt': None, 'answer': None}]
cmd.extend(tftp_cmd)
if(configType == 'startup-config'):
tftp_cmd2 = [{'command': 'y', 'prompt': None, 'answer': None}]
cmd.extend(tftp_cmd2)
retVal = retVal + str(cnos.run_cnos_commands(module, cmd))
else:
return "Error-110"
return retVal
# EOM
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
outputfile = module.params['outputfile']
protocol = module.params['protocol'].lower()
output = ''
if protocol in ('tftp', 'ftp', 'sftp', 'scp'):
transfer_status = doConfigRollBack(module, None, None)
else:
transfer_status = 'Invalid Protocol option'
output = output + "\n Config Transfer status \n" + transfer_status
# Save it into the file
if '/' in outputfile:
path = outputfile.rsplit('/', 1)
# cnos.debugOutput(path[0])
if not os.path.exists(path[0]):
os.makedirs(path[0])
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to Device")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| caphrim007/ansible | lib/ansible/modules/network/cnos/cnos_rollback.py | Python | gpl-3.0 | 11,943 | [
"VisIt"
] | ffea358812c347ac34ad6e826e1ad7fc7e365a2a774e4f81640e6be5742eea6e |
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Manila specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to manila/tests/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")")
log_translation_LC = re.compile(
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
M319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "M319 Don't translate debug level logs")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test and tempest
# directories.
if ("manila/tests" in filename or
"contrib/tempest" in filename):
return
if pep8.noqa(physical_line):
return
msg = "M327: LOG.critical messages require translations `_LC()`!"
if log_translation_LC.match(logical_line):
yield (0, msg)
msg = ("M328: LOG.error and LOG.exception messages require translations "
"`_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "M329: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = "M330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "M331: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrExc(BaseASTChecker):
"""Checks for the use of str() on an exception.
This currently only handles the case where str() is used in
the scope of an exception handler. If the exception is passed
into a function, returned from an assertRaises, or used on an
exception created in the same scope, this does not catch it.
"""
CHECK_DESC = ('M325 str() cannot be used on an exception. '
'Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def factory(register):
register(validate_log_translations)
register(check_explicit_underscore_import)
register(no_translate_debug_logs)
register(CheckForStrExc)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
| weiting-chen/manila | manila/hacking/checks.py | Python | apache-2.0 | 8,771 | [
"VisIt"
] | b53ca0916c7e758e6959975d13b9fd77d42e22267e3b26b14d554bf11e368692 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Tests for arXiv workflows."""
from __future__ import absolute_import, division, print_function
import os
import mock
import pkg_resources
import pytest
@pytest.fixture
def record_oai_arxiv_plots():
"""Provide record fixture."""
return pkg_resources.resource_string(
__name__,
os.path.join(
'fixtures',
'oai_arxiv_record_with_plots.xml'
)
)
@pytest.fixture
def record_oai_arxiv_accept():
"""Provide record fixture."""
return pkg_resources.resource_string(
__name__,
os.path.join(
'fixtures',
'oai_arxiv_record_to_accept.xml'
)
)
@pytest.fixture
def some_record():
"""Provide record fixture."""
return pkg_resources.resource_string(
__name__,
os.path.join(
'fixtures',
'some_record.xml'
)
)
@pytest.fixture
def arxiv_tarball():
"""Provide file fixture."""
return pkg_resources.resource_filename(
__name__,
os.path.join(
'fixtures',
'1407.7587v1'
)
)
@pytest.fixture
def arxiv_pdf():
"""Provide file fixture."""
return pkg_resources.resource_filename(
__name__,
os.path.join(
'fixtures',
'1407.7587v1.pdf'
)
)
@pytest.fixture
def arxiv_tarball_accept():
"""Provide file fixture."""
return pkg_resources.resource_stream(
__name__,
os.path.join(
'fixtures',
'1511.01097'
)
)
@pytest.fixture
def arxiv_pdf_accept():
"""Provide file fixture."""
return pkg_resources.resource_stream(
__name__,
os.path.join(
'fixtures',
'1511.01097v1.pdf'
)
)
def fake_download_file(obj, name, url):
"""Mock download_file func."""
if url == 'http://arxiv.org/e-print/1407.7587':
obj.files[name] = pkg_resources.resource_stream(
__name__,
os.path.join(
'fixtures',
'1407.7587v1'
)
)
return obj.files[name]
elif url == 'http://arxiv.org/pdf/1407.7587':
obj.files[name] = pkg_resources.resource_stream(
__name__,
os.path.join(
'fixtures',
'1407.7587v1.pdf'
)
)
return obj.files[name]
raise Exception("Download file not mocked!")
def fake_beard_api_block_request(dummy):
"""Mock json_api_request func."""
return {}
def fake_beard_api_request(url, data):
"""Mock json_api_request func."""
return {
'decision': u'Rejected',
'scores': [
-0.20895982018928272, -1.6722188892559084, 0.8358207729691823
]
}
def fake_magpie_api_request(url, data):
"""Mock json_api_request func."""
if data.get('corpus') == "experiments":
return {
"labels": [
[
"CMS",
0.75495152473449707
],
[
"GEMS",
0.45495152473449707
],
[
"ALMA",
0.39597576856613159
],
[
"XMM",
0.28373843431472778
],
],
"status_code": 200
}
elif data.get('corpus') == "categories":
return {
"labels": [
[
"Astrophysics",
0.9941025972366333
],
[
"Phenomenology-HEP",
0.0034253709018230438
],
[
"Instrumentation",
0.0025460966862738132
],
[
"Gravitation and Cosmology",
0.0017545684240758419
],
],
"status_code": 200
}
elif data.get('corpus') == "keywords":
return {
"labels": [
[
"galaxy",
0.29424679279327393
],
[
"numerical calculations",
0.22625420987606049
],
[
"numerical calculations: interpretation of experiments",
0.031719371676445007
],
[
"luminosity",
0.028066780418157578
],
[
"experimental results",
0.027784878388047218
],
[
"talk",
0.023392116650938988
],
],
"status_code": 200
}
@mock.patch('inspirehep.utils.helpers.download_file_to_record',
side_effect=fake_download_file)
@mock.patch('inspirehep.modules.workflows.tasks.beard.json_api_request',
side_effect=fake_beard_api_request)
@mock.patch('inspirehep.modules.workflows.tasks.magpie.json_api_request',
side_effect=fake_magpie_api_request)
@mock.patch('inspirehep.modules.authors.receivers._query_beard_api',
side_effect=fake_beard_api_block_request)
def test_harvesting_arxiv_workflow_rejected(
mocked_api_request_beard_block, mocked_api_request_magpie,
mocked_api_request_beard, mocked_download,
small_app, record_oai_arxiv_plots):
"""Test a full harvesting workflow."""
from invenio_workflows import (
start, WorkflowEngine, ObjectStatus, workflow_object_class
)
from dojson.contrib.marc21.utils import create_record
from invenio_db import db
from inspirehep.dojson.hep import hep
from inspirehep.modules.converter.xslt import convert
# Convert to MARCXML, then dict, then HEP JSON
record_oai_arxiv_plots_marcxml = convert(
record_oai_arxiv_plots,
"oaiarXiv2marcxml.xsl"
)
record_marc = create_record(record_oai_arxiv_plots_marcxml)
record_json = hep.do(record_marc)
extra_config = {
"BEARD_API_URL": "http://example.com/beard",
"MAGPIE_API_URL": "http://example.com/magpie",
}
workflow_uuid = None
with small_app.app_context():
with mock.patch.dict(small_app.config, extra_config):
workflow_uuid = start('article', [record_json])
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
assert obj.status == ObjectStatus.HALTED
assert obj.data_type == "hep"
# Files should have been attached (tarball + pdf, and plots)
assert obj.files["1407.7587.pdf"]
assert obj.files["1407.7587.tar.gz"]
assert len(obj.files) > 2
# A publication note should have been extracted
pub_info = obj.data.get('publication_info')
assert pub_info
assert pub_info[0]
assert pub_info[0].get('year') == "2014"
assert pub_info[0].get('journal_title') == "J. Math. Phys."
# A prediction should have been made
prediction = obj.extra_data.get("relevance_prediction")
assert prediction
assert prediction['decision'] == "Rejected"
assert prediction['scores']['Rejected'] == 0.8358207729691823
experiments_prediction = obj.extra_data.get("experiments_prediction")
assert experiments_prediction
assert experiments_prediction['experiments'] == [
['CMS', 0.7549515247344971]
]
keywords_prediction = obj.extra_data.get("keywords_prediction")
assert keywords_prediction
assert {"label": "galaxy", "score": 0.29424679279327393,
"accept": True} in keywords_prediction['keywords']
# This record should not have been touched yet
assert "approved" not in obj.extra_data
# Now let's resolve it as accepted and continue
# FIXME Should be accept, but record validation prevents us.
obj.remove_action()
obj.extra_data["approved"] = False
# obj.extra_data["core"] = True
obj.save()
db.session.commit()
with small_app.app_context():
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
obj_id = obj.id
obj.continue_workflow()
obj = workflow_object_class.get(obj_id)
# It was rejected
assert obj.status == ObjectStatus.COMPLETED
@pytest.mark.xfail(reason='record updates are busted due to validation issue')
@mock.patch('inspirehep.utils.arxiv.download_file_to_record',
side_effect=fake_download_file)
def test_harvesting_arxiv_workflow_accepted(
mocked, small_app, record_oai_arxiv_plots):
"""Test a full harvesting workflow."""
from invenio_workflows import (
start, WorkflowEngine, ObjectStatus, workflow_object_class
)
from dojson.contrib.marc21.utils import create_record
from invenio_db import db
from inspirehep.dojson.hep import hep
from inspirehep.modules.converter.xslt import convert
# Convert to MARCXML, then dict, then HEP JSON
record_oai_arxiv_plots_marcxml = convert(
record_oai_arxiv_plots,
"oaiarXiv2marcxml.xsl"
)
record_marc = create_record(record_oai_arxiv_plots_marcxml)
record_json = hep.do(record_marc)
workflow_uuid = None
with small_app.app_context():
workflow_uuid = start('article', [record_json])
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
assert obj.status == ObjectStatus.HALTED
assert obj.data_type == "hep"
# Files should have been attached (tarball + pdf)
assert obj.files["1407.7587.pdf"]
assert obj.files["1407.7587.tar.gz"]
# A publication note should have been extracted
pub_info = obj.data.get('publication_info')
assert pub_info
assert pub_info[0]
assert pub_info[0].get('year') == "2014"
assert pub_info[0].get('journal_title') == "J. Math. Phys."
# This record should not have been touched yet
assert "approved" not in obj.extra_data
# Now let's resolve it as accepted and continue
# FIXME Should be accept, but record validation prevents us.
obj.remove_action()
obj.extra_data["approved"] = True
obj.extra_data["core"] = True
obj.save()
db.session.commit()
with small_app.app_context():
eng = WorkflowEngine.from_uuid(workflow_uuid)
obj = eng.processed_objects[0]
obj_id = obj.id
obj.continue_workflow()
obj = workflow_object_class.get(obj_id)
# It was accepted
assert obj.status == ObjectStatus.COMPLETED
| jacenkow/inspire-next | tests/integration/workflows/test_arxiv_workflow.py | Python | gpl-2.0 | 11,780 | [
"Galaxy"
] | 6d05581467e904018563443082838bb22d38b86cdcb3f760c6f0377a4a0c5618 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depening on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Usefull links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os, time
try:
import PIL
from PIL import Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extention. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparancy.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animaged GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
im = Image.fromarray(im[:,:,:3],'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
im = nqInstance.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(list(range(mid-1,-1,-1))+list(range(-1,mid)))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad) )
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0*alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w*h)) )
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
| PennyQ/stero_3D_dust_map | 360-sphere-photo/make_anaglyph/images2gif.py | Python | gpl-3.0 | 36,970 | [
"NEURON"
] | 6e1edcfa3392230ee5c1dec4b5a8721db2c474d4101d73590b73c42469b8468e |
from copy import deepcopy
import numpy as np
from mpi4py import MPI
from scipy import special
from scipy.constants import physical_constants
from sirius.coefficient_array import CoefficientArray, diag, inner, threaded
kb = (
physical_constants["Boltzmann constant in eV/K"][0]
/ physical_constants["Hartree energy in eV"][0]
)
def find_chemical_potential(fun, mu0, tol=1e-10):
"""
fun -- ne - fn(mu)
mu0 -- initial gues energies
"""
mu = mu0
de = 0.1
sp = 1
s = 1
nmax = 1000
counter = 0
while np.abs(fun(mu)) > tol and counter < nmax:
sp = s
s = 1 if fun(mu) > 0 else -1
if s == sp:
de *= 1.25
else:
# decrease step size if we change direction
de *= 0.25
mu += s * de
counter += 1
assert np.abs(fun(mu)) < 1e-10
return mu
@threaded
def df_fermi_entropy_reg(fn, dd):
"""
Derivative of the regularized Fermi-Dirac entropy.
Keyword Arguments:
fn -- occupation numbers
dd -- regularization parameter
"""
fn = np.array(fn).flatten()
return (
fn * (1 - dd) / (fn + dd * (1 - fn))
+ (1 - fn) * (-1 + dd) / (1 - fn + dd * fn)
+ np.log(fn + dd * (1 - fn))
- np.log(1 - fn + dd * fn)
)
@threaded
def fermi_entropy_reg(fn, dd):
"""
Regularized Fermi-Dirac entropy.
Keyword Arguments:
fn -- occupation numbers
dd -- regularization parameter
"""
fn = np.array(fn).flatten()
return np.sum(fn * np.log(fn + dd * (1 - fn)) + (1 - fn) * np.log(1 - fn + dd * fn))
@threaded
def fermi_entropy(fn):
"""
Fermi-Dirac entropy
"""
idx = np.logical_or(np.isclose(fn, 0, atol=1e-20),
np.isclose(fn, 1, atol=1e-15))
fi = fn[~idx]
return np.sum(fi * np.log(fi) + (1 - fi) * np.log(1 - fi))
@threaded
def fermi_dirac(x):
# np.exp(nlogm16) < 1e-16
# nlogm16 = -37
# is_one = x < nlogm16
is_one = x < -50
is_zero = x > 40
out = np.zeros_like(x)
out[is_one] = 1
out[is_zero] = 0
ii = np.logical_and(~is_one, ~is_zero)
out[ii] = 1 / (1 + np.exp(x[ii]))
return out
@threaded
def inverse_fermi_dirac(f):
"""
"""
en = np.zeros_like(f, dtype=np.double)
is_zero = np.isclose(f, 0, atol=1e-20)
is_one = np.isclose(f, 1, rtol=1e-15)
en[is_zero] = 50 + np.arange(0, np.sum(is_zero))
# make sure we do not get degenerate band energies
en[is_one] = -50 - np.arange(0, np.sum(is_one))
ii = np.logical_not((np.logical_or(is_zero, is_one)))
en[ii] = np.log(1 / f[ii] - 1)
return en
@threaded
def fermi_dirac_reg(x, dd):
"""
x = (ε-μ) / kT
Keyword Arguments:
x --
T -- temperature in Kelvin
dd -- regularization parameter
"""
import scipy
def fguess(x):
return 1 / (1 + np.exp(x))
rootf = lambda f: df_fermi_entropy_reg(f, dd=dd) + x
res = scipy.optimize.root(rootf, x0=fguess(x))
assert res["success"]
return res["x"]
def chemical_potential(ek, T, nel, fermi_function, kw, comm):
"""
find mu for Fermi-Dirac smearing
Keyword Arguments:
ek -- band energies
T -- temperature
nel -- number of electrons
fermi_function -- f((e-mu)/kT)
kw -- k-point weights
comm -- communicator
"""
kT = kb * T
if isinstance(ek, CoefficientArray):
vek = np.hstack(comm.allgather(ek.to_array()))
vkw = deepcopy(ek)
for k in vkw._data.keys():
vkw[k] = np.ones_like(vkw[k]) * kw[k]
vkw = np.hstack(comm.allgather(vkw.to_array()))
else:
vek = ek
vkw = kw
# print('fermi_function.shape', fermi_function(vek).shape)
# update occupation numbers
mu = find_chemical_potential(
lambda mu: nel - np.sum(vkw * fermi_function((vek - mu) / kT)), mu0=0
)
return mu
@threaded
def inverse_efermi_spline(fn):
"""
inverts f(x)
where x is (epsilon-mu) / kT
Returns:
x
"""
ifu = fn > 0.5
xi = np.zeros_like(fn)
# remove numerical noise
fn = np.where(fn < 0, 0, fn)
ub = 8.0
lb = -5.0
if0 = efermi_spline(np.array(ub)) > fn
if1 = efermi_spline(np.array(lb)) < fn
ifb = np.logical_or(if0, if1)
# and ~ifb => and is not at the boundary
iifu = np.logical_and(ifu, ~ifb)
iifl = np.logical_and(~ifu, ~ifb)
xi[iifu] = (1 - np.sqrt(1 - 2 * np.log(2 - 2 * fn[iifu]))) / np.sqrt(2)
xi[iifl] = (-1 + np.sqrt(1 - 2 * np.log(2 * fn[iifl]))) / np.sqrt(2)
xi[if0] = ub
xi[if1] = lb
return xi
@threaded
def efermi_spline(x):
"""
...
"""
x = np.array(x, copy=False)
im = x < 0
um = ~im
out = np.empty_like(x)
# # truncate
# a = 5
# x = np.where(x > a, a, x)
# x = np.where(x < -a, -a, x)
out[im] = 1 - 1 / 2 * np.exp(1 / 2 - (-1 / np.sqrt(2) + x[im]) ** 2)
out[um] = 1 / 2 * np.exp(1 / 2 - (1 / np.sqrt(2) + x[um]) ** 2)
return out
@threaded
def gaussian_spline_entropy(fn):
"""
todo: add a docstring
"""
from scipy.special import erfc
S = np.empty_like(fn)
ifu = fn > 0.5
ifl = ~ifu
dd = 1e-10
S[ifu] = (
np.sqrt(np.e * np.pi) * erfc(np.sqrt(0.5 - np.log(2 - 2 * fn[ifu] + dd)))
- 2
* np.sqrt(2)
* (-1 + fn[ifu])
* (-1 + np.sqrt(1 - 2 * np.log(2 - 2 * fn[ifu] + dd)))
) / 4.0
S[ifl] = (
np.sqrt(np.e * np.pi) * erfc(np.sqrt(0.5 - np.log(2 * fn[ifl] + dd)))
) / 4.0 + (fn[ifl] * (-1 + np.sqrt(1 - 2 * np.log(2 * fn[ifl] + dd)))) / np.sqrt(2)
# feq0 = np.isclose(fn, 0, atol=1e-20)
# feq1 = np.isclose(fn, 1, rtol=1e-14)
# S[feq0] = 0
# S[feq1] = 0
return S
@threaded
def gaussian_spline_entropy_df(fn):
"""
todo: add a docstring
"""
dSdf = np.empty_like(fn)
ifu = fn > 0.5
ifl = ~ifu
dd = 1e-10
dSdf[ifu] = (
1
+ (-1 - dd / (2 + dd - 2 * fn[ifu]) + 2 * np.log(2 + dd - 2 * fn[ifu]))
/ np.sqrt(1 - 2 * np.log(2 + dd - 2 * fn[ifu]))
) / np.sqrt(2)
dSdf[ifl] = (
-1
+ (1 + dd / (dd + 2 * fn[ifl]) - 2 * np.log(dd + 2 * fn[ifl]))
/ np.sqrt(1 - 2 * np.log(dd + 2 * fn[ifl]))
) / np.sqrt(2)
return dSdf
@threaded
def gaussian_spline_entropy_x(x):
"""
todo: add a docstring
"""
z = np.abs(x)
S = 0.25 * (2 * np.exp(-z * (np.sqrt(2) + z)) * z
+ np.sqrt(np.e * np.pi) * special.erfc(1 / np.sqrt(2) + z))
return S
class Smearing:
def __init__(self, T, nel, nspin, kw, comm=MPI.COMM_SELF):
"""
Keyword Arguments:
T -- Temperature in Kelvin
nel -- number of electrons
"""
self.kT = kb * T
self.T = T
self.nel = nel
self.nspin = nspin
self.kw = kw
self.comm = comm
# assert nspin == 2 # magnetic case
@property
def mo(self):
"""
Returns:
maximal occupancy, either 1 or 2
"""
factor = {1: 2, 2: 1}
return factor[self.nspin]
class GaussianSplineSmearingReg(Smearing):
"""
Gaussian spline smearing
"""
def __init__(self, T, nel, nspin, kw, comm=MPI.COMM_SELF):
"""
Keyword Arguments:
T -- Temperature in Kelvin
nel -- number of electrons
kw -- k-point weights
"""
super().__init__(T=T, nel=nel, nspin=nspin, kw=kw, comm=comm)
self.T = T
def entropy(self, fn):
# x = inverse_efermi_spline(fn)
return -self.kT * np.sum(self.kw * gaussian_spline_entropy(fn))
def ek(self, fn):
x = inverse_efermi_spline(fn, a=10) * self.kT
return x
def fn(self, ek):
"""
Keyword Arguments:
ek -- band energies
"""
mu = chemical_potential(
ek,
T=self.T,
nel=self.nel,
fermi_function=efermi_spline,
kw=self.kw,
comm=self.comm,
)
occ = efermi_spline((ek - mu) / self.kT)
return occ * self.mo
def dSdf(self, fn):
"""
"""
return -self.kT * gaussian_spline_entropy_df(fn)
class GaussianSplineSmearing(Smearing):
"""
Gaussian spline smearing
"""
def __init__(self, T, nel, nspin, kw, comm=MPI.COMM_SELF):
"""
Keyword Arguments:
T -- Temperature in Kelvin
nel -- number of electrons
kw -- k-point weights
"""
super().__init__(T=T, nel=nel, nspin=nspin, kw=kw, comm=comm)
self.T = T
def entropy(self, fn):
x = inverse_efermi_spline(fn)
return -self.kT * np.sum(self.kw * gaussian_spline_entropy_x(x))
def ek(self, fn):
x = inverse_efermi_spline(fn) * self.kT
return x
def fn(self, ek):
"""
Keyword Arguments:
ek -- band energies
"""
factor = {1: 2, 2: 1}
mu = chemical_potential(
ek,
T=self.T,
nel=self.nel / factor[self.nspin],
fermi_function=efermi_spline,
kw=self.kw,
comm=self.comm)
occ = efermi_spline((ek - mu) / self.kT)
return occ * factor[self.nspin], mu
def dSdf(self, fn):
"""
"""
x = inverse_efermi_spline(fn)
return -self.kT * x
class FermiDiracSmearing(Smearing):
"""
Fermi-Dirac smearing
"""
def __init__(self, T, nel, nspin, kw, comm=MPI.COMM_SELF):
"""
Keyword Arguments:
T -- Temperature in Kelvin
nel -- number of electrons
kset -- k-point set
"""
super().__init__(T, nel, nspin, kw, comm)
def entropy(self, fn):
factor = {1: 2, 2: 1}
S = self.kT * np.sum(self.kw * fermi_entropy(fn / factor[self.nspin]))
return S
def fn(self, ek):
"""
Keyword Arguments:
ek -- band energies
"""
factor = {1: 2, 2: 1}
mu = chemical_potential(
ek,
T=self.T,
nel=self.nel / factor[self.nspin],
fermi_function=fermi_dirac,
kw=self.kw,
comm=self.comm,
)
occ = fermi_dirac((ek - mu) / self.kT)
return occ * factor[self.nspin], mu
def ek(self, fn):
"""
Keyword Arguments:
fn -- occupation numbers
"""
fn_loc = fn / self.mo
x = inverse_fermi_dirac(fn_loc)
return x * self.kT
class FermiDiracSmearingReg(Smearing):
"""
Fermi-Dirac smearing with regularization
"""
def __init__(self, T, nel, nspin, kw, comm=MPI.COMM_SELF, dd=1e-10):
"""
Keyword Arguments:
T -- Temperature in Kelvin
nel -- number of electrons
nspin -- number of spins: 1 or 2
kw -- k-point weights
comm -- k-point communicator
dd -- regularization parameter
"""
super().__init__(T=T, nel=nel, nspin=nspin, kw=kw, comm=comm)
self.dd = dd
if nspin == 1:
self.scale = 0.5
elif nspin == 2:
self.scale = 1
else:
raise ValueError("npsin must be 1 or 2")
def fn(self, ek):
"""
occupation numbers
TODO: this is not correct
Keyword Arguments:
ek -- band energies
Returns:
fn -- occupation numbers
mu -- chemical potential
"""
factor = {1: 2, 2: 1}
mu = chemical_potential(
ek,
T=self.T,
nel=self.nel / factor[self.nspin],
kw=self.kw,
comm=self.comm,
fermi_function=lambda x: fermi_dirac(x),
)
fn = self.nspin * fermi_dirac((ek - mu) / self.kT)
return fn, mu
def entropy(self, fn):
"""
computes the entropy
Keyword Arguments:
fn -- occupation numbers
"""
S = fermi_entropy_reg(self.scale * fn, dd=self.dd)
entropy_loc = self.kT * np.sum(np.array(list((self.kw * S)._data.values())))
loc = np.array(entropy_loc, dtype=np.float64)
entropy = np.array(0.0, dtype=np.float64)
MPI.COMM_WORLD.Allreduce([loc, MPI.DOUBLE], [entropy, MPI.DOUBLE], op=MPI.SUM)
return np.asscalar(entropy)
def dSdf(self, fn):
"""
computes the gradient wrt fn of the entropy
Keyword Arguments:
fn -- occupation numbers
"""
from ..baarman.direct_minimization import df_fermi_entropy
df = self.scale * df_fermi_entropy(self.scale * fn, dd=self.dd)
return df
def make_gaussian_spline_smearing(T, ctx, kset):
nel = ctx.unit_cell().num_valence_electrons
mo = ctx.max_occupancy()
nspin = {1: 2, 2: 1}
# assert mo == 1
smearing = GaussianSplineSmearing(T=T,
nel=nel,
nspin=nspin[mo],
kw=kset.w,
comm=kset.ctx().comm_k())
return smearing
def make_fermi_dirac_smearing(T, ctx, kset):
nel = ctx.unit_cell().num_valence_electrons
mo = ctx.max_occupancy()
# assert mo == 1
nspin = {1: 2, 2: 1}
smearing = FermiDiracSmearing(T=T,
nel=nel,
nspin=nspin[mo],
kw=kset.w,
comm=kset.ctx().comm_k())
return smearing
| electronic-structure/sirius | python_module/sirius/edft/smearing.py | Python | bsd-2-clause | 13,660 | [
"DIRAC",
"Gaussian"
] | 3d36daa0b41ce7444b303d0087495d7efe5b74e49e47c35b5eaac9eb2654f4fa |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
import unittest
from pymatgen.analysis.eos import EOS, NumericalEOS
from pymatgen.util.testing import PymatgenTest
class EOSTest(PymatgenTest):
def setUp(self):
# Si data from Cormac
self.volumes = [25.987454833, 26.9045702104, 27.8430241908,
28.8029649591, 29.7848370694, 30.7887887064,
31.814968055, 32.8638196693, 33.9353435494,
35.0299842495, 36.1477417695, 37.2892088485,
38.4543854865, 39.6437162376, 40.857201102,
42.095136449, 43.3579668329, 44.6456922537,
45.9587572656, 47.2973100535, 48.6614988019,
50.0517680652, 51.4682660281, 52.9112890601,
54.3808371612, 55.8775030703, 57.4014349722,
58.9526328669]
self.energies = [-7.63622156576, -8.16831294894, -8.63871612686,
-9.05181213218, -9.41170988374, -9.72238224345,
-9.98744832526, -10.210309552, -10.3943401353,
-10.5427238068, -10.6584266073, -10.7442240979,
-10.8027285713, -10.8363890521, -10.8474912964,
-10.838157792, -10.8103477586, -10.7659387815,
-10.7066179666, -10.6339907853, -10.5495538639,
-10.4546677714, -10.3506386542, -10.2386366017,
-10.1197772808, -9.99504030111, -9.86535084973,
-9.73155247952]
num_eos = EOS(eos_name="numerical_eos")
self.num_eos_fit = num_eos.fit(self.volumes, self.energies)
def test_run_all_models(self):
# these have been checked for plausibility,
# but are not benchmarked against independently known values
test_output = {
'birch': {'b0': 0.5369258244952931,
'b1': 4.178644231838501,
'e0': -10.8428039082307,
'v0': 40.98926572870838},
'birch_murnaghan': {'b0': 0.5369258245417454,
'b1': 4.178644235500821,
'e0': -10.842803908240892,
'v0': 40.98926572528106},
'deltafactor': {'b0': 0.5369258245611414,
'b1': 4.178644231924639,
'e0': -10.842803908299294,
'v0': 40.989265727927936},
'murnaghan': {'b0': 0.5144967693786603,
'b1': 3.9123862262572264,
'e0': -10.836794514626673,
'v0': 41.13757930387086},
'numerical_eos': {'b0': 0.5557257614101998,
'b1': 4.344039148405489,
'e0': -10.847490826530702,
'v0': 40.857200064982536},
'pourier_tarantola': {'b0': 0.5667729960804602,
'b1': 4.331688936974368,
'e0': -10.851486685041658,
'v0': 40.86770643373908},
'vinet': {'b0': 0.5493839425156859,
'b1': 4.3051929654936885,
'e0': -10.846160810560756,
'v0': 40.916875663779784}
}
for eos_name in EOS.MODELS:
eos = EOS(eos_name=eos_name)
_ = eos.fit(self.volumes, self.energies)
for param in ('b0', 'b1', 'e0', 'b0'):
# TODO: solutions only stable to 2 decimal places
# between different machines, this seems far too low?
self.assertArrayAlmostEqual(_.results[param],
test_output[eos_name][param],
decimal=1)
def test_fitting(self):
# courtesy of @katherinelatimer2013
# known correct values for Vinet
# Mg
mp153_volumes = [
16.69182365,
17.25441763,
17.82951915,
30.47573817,
18.41725977,
29.65211363,
28.84346369,
19.01777055,
28.04965916,
19.63120886,
27.27053682,
26.5059864,
20.25769112,
25.75586879,
20.89736201,
25.02003097,
21.55035204,
24.29834347,
22.21681221,
23.59066888,
22.89687316
]
mp153_energies = [
-1.269884575,
-1.339411225,
-1.39879471,
-1.424480995,
-1.44884184,
-1.45297499,
-1.4796246,
-1.49033594,
-1.504198485,
-1.52397006,
-1.5264432,
-1.54609291,
-1.550269435,
-1.56284009,
-1.569937375,
-1.576420935,
-1.583470925,
-1.58647189,
-1.591436505,
-1.592563495,
-1.594347355
]
mp153_known_energies_vinet = [
-1.270038831,
-1.339366487,
-1.398683238,
-1.424556061,
-1.448746649,
-1.453000456,
-1.479614511,
-1.490266797,
-1.504163502,
-1.523910268,
-1.526395734,
-1.546038792,
-1.550298657,
-1.562800797,
-1.570015274,
-1.576368392,
-1.583605186,
-1.586404575,
-1.591578378,
-1.592547954,
-1.594410995
]
# C: 4.590843262
# B: 2.031381599
mp153_known_e0_vinet = -1.594429229
mp153_known_v0_vinet = 22.95764159
eos = EOS(eos_name='vinet')
fit = eos.fit(mp153_volumes, mp153_energies)
np.testing.assert_array_almost_equal(fit.func(mp153_volumes),
mp153_known_energies_vinet,
decimal=5)
self.assertAlmostEqual(mp153_known_e0_vinet, fit.e0, places=4)
self.assertAlmostEqual(mp153_known_v0_vinet, fit.v0, places=4)
# expt. value 35.5, known fit 36.16
self.assertAlmostEqual(fit.b0_GPa, 36.16258657649159)
# Si
mp149_volumes = [
15.40611854,
14.90378698,
16.44439516,
21.0636307,
17.52829835,
16.98058208,
18.08767363,
18.65882487,
19.83693435,
15.91961152,
22.33987173,
21.69548924,
22.99688883,
23.66666322,
20.44414922,
25.75374305,
19.24187473,
24.34931029,
25.04496106,
27.21116571,
26.4757653
]
mp149_energies = [
-4.866909695,
-4.7120965,
-5.10921253,
-5.42036228,
-5.27448405,
-5.200810795,
-5.331915665,
-5.3744186,
-5.420058145,
-4.99862686,
-5.3836163,
-5.40610838,
-5.353700425,
-5.31714654,
-5.425263555,
-5.174988295,
-5.403353105,
-5.27481447,
-5.227210275,
-5.058992615,
-5.118805775
]
mp149_known_energies_vinet = [
-4.866834585,
-4.711786499,
-5.109642598,
-5.420093739,
-5.274605844,
-5.201025714,
-5.331899365,
-5.374315789,
-5.419671568,
-4.998827503,
-5.383703409,
-5.406038887,
-5.353926272,
-5.317484252,
-5.424963418,
-5.175090887,
-5.403166824,
-5.275096644,
-5.227427635,
-5.058639193,
-5.118654229
]
# C: 4.986513158
# B: 4.964976215
mp149_known_e0_vinet = -5.424963506
mp149_known_v0_vinet = 20.44670279
eos = EOS(eos_name='vinet')
fit = eos.fit(mp149_volumes, mp149_energies)
np.testing.assert_array_almost_equal(fit.func(mp149_volumes),
mp149_known_energies_vinet,
decimal=5)
self.assertAlmostEqual(mp149_known_e0_vinet, fit.e0, places=4)
self.assertAlmostEqual(mp149_known_v0_vinet, fit.v0, places=4)
# expt. value 97.9, known fit 88.39
self.assertAlmostEqual(fit.b0_GPa, 88.38629264585195)
# Ti
mp72_volumes = [
12.49233296,
12.91339188,
13.34380224,
22.80836212,
22.19195533,
13.78367177,
21.58675559,
14.23310328,
20.99266009,
20.4095592,
14.69220297,
19.83736385,
15.16106697,
19.2759643,
15.63980711,
18.72525771,
16.12851491,
18.18514127,
16.62729878,
17.65550599,
17.13626153
]
mp72_energies = [
-7.189983803,
-7.33985647,
-7.468745423,
-7.47892835,
-7.54945107,
-7.578012237,
-7.61513166,
-7.66891898,
-7.67549721,
-7.73000681,
-7.74290386,
-7.77803379,
-7.801246383,
-7.818964483,
-7.84488189,
-7.85211192,
-7.87486651,
-7.876767777,
-7.892161533,
-7.892199957,
-7.897605303
]
mp72_known_energies_vinet = [
-7.189911138,
-7.339810181,
-7.468716095,
-7.478678021,
-7.549402394,
-7.578034391,
-7.615240977,
-7.669091347,
-7.675683891,
-7.730188653,
-7.74314028,
-7.778175824,
-7.801363213,
-7.819030923,
-7.844878053,
-7.852099741,
-7.874737806,
-7.876686864,
-7.891937429,
-7.892053535,
-7.897414664
]
# C: 3.958192998
# B: 6.326790098
mp72_known_e0_vinet = -7.897414997
mp72_known_v0_vinet = 17.13223229
eos = EOS(eos_name='vinet')
fit = eos.fit(mp72_volumes, mp72_energies)
np.testing.assert_array_almost_equal(fit.func(mp72_volumes),
mp72_known_energies_vinet,
decimal=5)
self.assertAlmostEqual(mp72_known_e0_vinet, fit.e0, places=4)
self.assertAlmostEqual(mp72_known_v0_vinet, fit.v0, places=4)
# expt. value 107.3, known fit 112.63
self.assertAlmostEqual(fit.b0_GPa, 112.62927094503254)
def test_numerical_eoswrapper(self):
# using numerical eos directly vs via EOS wrapper
numerical_eos = NumericalEOS(self.volumes, self.energies)
numerical_eos.fit()
self.assertGreater(len(numerical_eos.eos_params), 3)
self.assertAlmostEqual(float(numerical_eos.e0), self.num_eos_fit.e0, 3)
self.assertAlmostEqual(float(numerical_eos.v0), self.num_eos_fit.v0, 3)
self.assertAlmostEqual(float(numerical_eos.b0), self.num_eos_fit.b0, 3)
self.assertAlmostEqual(float(numerical_eos.b1), self.num_eos_fit.b1, 3)
self.assertArrayAlmostEqual(numerical_eos.eos_params, self.num_eos_fit.eos_params)
def test_numerical_eos_values(self):
np.testing.assert_almost_equal(self.num_eos_fit.e0, -10.84749, decimal=3)
np.testing.assert_almost_equal(self.num_eos_fit.v0, 40.857201, decimal=1)
np.testing.assert_almost_equal(self.num_eos_fit.b0, 0.55, decimal=2)
# TODO: why were these tests commented out?
# np.testing.assert_almost_equal(self.num_eos_fit.b0_GPa, 89.0370727, decimal=1)
# np.testing.assert_almost_equal(self.num_eos_fit.b1, 4.344039, decimal=2)
def test_eos_func(self):
# list vs np.array arguments
np.testing.assert_almost_equal(self.num_eos_fit.func([0, 1, 2]),
self.num_eos_fit.func(np.array([0, 1, 2])),
decimal=10)
# func vs _func
np.testing.assert_almost_equal(self.num_eos_fit.func(0.),
self.num_eos_fit._func(
0., self.num_eos_fit.eos_params),
decimal=10)
# test the eos function: energy = f(volume)
# numerical eos evaluated at volume=0 == a0 of the fit polynomial
np.testing.assert_almost_equal(self.num_eos_fit.func(0.),
self.num_eos_fit.eos_params[-1], decimal=6)
birch_eos = EOS(eos_name="birch")
birch_eos_fit = birch_eos.fit(self.volumes, self.energies)
# birch eos evaluated at v0 == e0
np.testing.assert_almost_equal(birch_eos_fit.func(birch_eos_fit.v0),
birch_eos_fit.e0, decimal=6)
# TODO: Reactivate
# fig = birch_eos_fit.plot_ax(ax=None, show=False, fontsize=8, title="birch eos")
# self.assertTrue(hasattr(fig, "savefig"))
def test_eos_func_call(self):
# eos_fit_obj.func(volume) == eos_fit_obj(volume)
np.testing.assert_almost_equal(self.num_eos_fit.func(0.),
self.num_eos_fit(0.), decimal=10)
def test_summary_dict(self):
d = {"e0": self.num_eos_fit.e0, "b0": self.num_eos_fit.b0,
"b1": self.num_eos_fit.b1, "v0": self.num_eos_fit.v0}
self.assertDictEqual(self.num_eos_fit.results, d)
if __name__ == "__main__":
unittest.main()
| tschaume/pymatgen | pymatgen/analysis/tests/test_eos.py | Python | mit | 14,339 | [
"pymatgen"
] | a239424060ba50dd2f047c2b701108dc661e1e309e7e580f6b678f55c5983de2 |
from pyopenscad import *
from math import *
def main():
# Height of the crystal
z_dim = 0
# Max height of crystal (# vertical circles)
max_height = 3
# Offset counter for the crystal
offset = 0
# Holds the overall object
objList = []
# Build the base but change the height and offset each iteration
while z_dim < max_height:
# X and Y dimension of the object
x_dim = 0 + offset
y_dim = 0 + offset
# Iterative count for the spheres
sphere_count = 0
# Max number of spheres per square
sphere_max_count = 100
# Used to determine when the x dimension of the base should end
switch_number = sqrt(sphere_max_count)
switch_count = switch_number
# Create the square
while sphere_count < sphere_max_count:
if switch_count == sphere_count:
# Switch to a new row
y_dim += 2
x_dim = 0 + offset
switch_count = switch_count + switch_number
objList.append(translate(Sphere(1,fn=100), x_dim, y_dim, z_dim))
# Iterate the counter variables
x_dim += 2
sphere_count += 1
# Increase the height by one
z_dim += 1
# Offset the square by one
offset += 1
obj = union(objList)
save('crystal.scad',obj)
if __name__ == '__main__':
main()
| jaredpetersen/wou | CS314/Lab2/crystal.py | Python | mit | 1,515 | [
"CRYSTAL"
] | d7ead2e66588ff7c32bfabac3297818554f7cb523cfeb022cf6e8fa658969e4d |
#!/usr/bin/env python
'''
GAEUnit: Google App Engine Unit Test Framework
Usage:
1. Put gaeunit.py into your application directory. Modify 'app.yaml' by
adding the following mapping below the 'handlers:' section:
- url: /test.*
script: gaeunit.py
2. Write your own test cases by extending unittest.TestCase.
3. Launch the development web server. Point your browser to:
http://localhost:8080/test?name=my_test_module
Replace 'my_test_module' with the module that contains your test cases,
and modify the port if necessary.
For plain text output add '&format=plain' to the URL.
4. The results are displayed as the tests are run.
Visit http://code.google.com/p/gaeunit for more information and updates.
------------------------------------------------------------------------------
Copyright (c) 2008, George Lei and Steven R. Farley. All rights reserved.
Distributed under the following BSD license:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------------------------------------------------------------------
'''
__author__ = "George Lei and Steven R. Farley"
__email__ = "George.Z.Lei@Gmail.com"
__version__ = "#Revision: 1.2.2 $"[11:-2]
__copyright__= "Copyright (c) 2008, George Lei and Steven R. Farley"
__license__ = "BSD"
__url__ = "http://code.google.com/p/gaeunit"
import sys
import os
import unittest
import StringIO
import time
import re
import logging
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_file_stub
from google.appengine.api import urlfetch_stub
from google.appengine.api.memcache import memcache_stub
_DEFAULT_TEST_DIR = 'test'
##############################################################################
# Web Test Runner
##############################################################################
class _WebTestResult(unittest.TestResult):
def __init__(self):
unittest.TestResult.__init__(self)
self.testNumber = 0
def getDescription(self, test):
return test.shortDescription() or str(test)
def printErrors(self):
stream = StringIO.StringIO()
stream.write('{')
self.printErrorList('ERROR', self.errors, stream)
stream.write(',')
self.printErrorList('FAIL', self.failures, stream)
stream.write('}')
return stream.getvalue()
def printErrorList(self, flavour, errors, stream):
stream.write('"%s":[' % flavour)
for test, err in errors:
stream.write('{"desc":"%s", "detail":"%s"},' %
(self.getDescription(test), self.escape(err)))
if len(errors):
stream.seek(-1, 2)
stream.write("]")
def escape(self, s):
newstr = re.sub('"', '"', s)
newstr = re.sub('\n', '<br/>', newstr)
return newstr
class WebTestRunner:
def run(self, test):
"Run the given test case or test suite."
result = getTestResult(True)
result.testNumber = test.countTestCases()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
return result
#############################################################
# Http request handler
#############################################################
class GAEUnitTestRunner(webapp.RequestHandler):
def __init__(self):
self.package = "test"
def get(self):
"""Execute a test suite in response to an HTTP GET request.
The request URL supports the following formats:
http://localhost:8080/test?package=test_package
http://localhost:8080/test?name=test
Parameters 'package' and 'name' should not be used together. If both
are specified, 'name' is selected and 'package' is ignored.
When 'package' is set, GAEUnit will run all TestCase classes from
all modules in the package.
When 'name' is set, GAEUnit will assume it is either a module (possibly
preceded by its package); a module and test class; or a module,
test class, and test method. For example,
http://localhost:8080/test?name=test_package.test_module.TestClass.test_method
runs only test_method() whereas,
http://localhost:8080/test?name=test_package.test_module.TestClass
runs all test methods in TestClass, and
http://localhost:8080/test?name=test_package.test_module
runs all test methods in all test classes in test_module.
If the default URL is requested:
http://localhost:8080/test
it is equivalent to
http://localhost:8080/test?package=test
"""
svcErr = getServiceErrorStream()
format = self.request.get("format")
if not format or format not in ["html", "plain"]:
format = "html"
unknownArgs = [arg for arg in self.request.arguments() if arg not in ("package", "name", "format")]
if len(unknownArgs) > 0:
for arg in unknownArgs:
_logError("The parameter '%s' is unrecognizable, please check it out." % arg)
package_name = self.request.get("package")
test_name = self.request.get("name")
loader = unittest.defaultTestLoader
suite = unittest.TestSuite()
# As a special case for running tests under the 'test' directory without
# needing an "__init__.py" file:
if not _DEFAULT_TEST_DIR in sys.path:
sys.path.append(_DEFAULT_TEST_DIR)
if not package_name and not test_name:
module_names = [mf[0:-3] for mf in os.listdir(_DEFAULT_TEST_DIR) if mf.endswith(".py")]
for module_name in module_names:
module = reload(__import__(module_name))
suite.addTest(loader.loadTestsFromModule(module))
elif test_name:
try:
module = reload(__import__(test_name))
suite.addTest(loader.loadTestsFromModule(module))
except:
pass
elif package_name:
try:
package = reload(__import__(package_name))
module_names = package.__all__
for module_name in module_names:
suite.addTest(loader.loadTestsFromName('%s.%s' % (package_name, module_name)))
except Exception, error:
_logError("Error loading package '%s': %s" % (package_name, error))
if suite.countTestCases() > 0:
runner = None
if format == "html":
runner = WebTestRunner()
self.response.out.write(testResultPageContent)
else:
self.response.headers["Content-Type"] = "text/plain"
if svcErr.getvalue() != "":
self.response.out.write(svcErr.getvalue())
else:
self.response.out.write("====================\n" \
"GAEUnit Test Results\n" \
"====================\n\n")
runner = unittest.TextTestRunner(self.response.out)
if runner:
self._runTestSuite(runner, suite)
else:
_logError("'%s' is not found or does not contain any tests." % \
(test_name or package_name))
def _runTestSuite(self, runner, suite):
"""Run the test suite.
Preserve the current development apiproxy, create a new apiproxy and
temporary datastore that will be used for this test suite, run the
test suite, and restore the development apiproxy. This isolates the
test and development datastores from each other.
"""
original_apiproxy = apiproxy_stub_map.apiproxy
try:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
temp_stub = datastore_file_stub.DatastoreFileStub(
'GAEUnitDataStore', None, None)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', temp_stub)
apiproxy_stub_map.apiproxy.RegisterStub('urlfetch', urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
runner.run(suite)
finally:
apiproxy_stub_map.apiproxy = original_apiproxy
class ResultSender(webapp.RequestHandler):
def get(self):
cache = StringIO.StringIO()
result = getTestResult()
if svcErr.getvalue() != "":
cache.write('{"svcerr":%d, "svcinfo":"%s",' %
(1, svcErr.getvalue()))
else:
cache.write('{"svcerr":%d, "svcinfo":"%s",' % (0, ""))
cache.write(('"runs":"%d", "total":"%d", ' \
'"errors":"%d", "failures":"%d",') %
(result.testsRun, result.testNumber,
len(result.errors), len(result.failures)))
cache.write('"details":%s' % result.printErrors())
cache.write('}')
self.response.out.write(cache.getvalue())
svcErr = StringIO.StringIO()
testResult = None
def getServiceErrorStream():
global svcErr
if svcErr:
svcErr.truncate(0)
else:
svcErr = StringIO.StringIO()
return svcErr
def _logInfo(s):
logging.info(s)
def _logError(s):
# TODO: When using 'plain' format, the error is not returned to
# the HTTP client. To fix this, svcErr must have been previously set
# to self.response.out for the plain format. Also, a non-200 error
# code would help 'curl' and other automated clients to determine
# the success/fail status of the test suite.
logging.warn(s)
svcErr.write(s)
def getTestResult(createNewObject=False):
global testResult
if createNewObject or not testResult:
testResult = _WebTestResult()
return testResult
################################################
# Browser codes
################################################
testResultPageContent = """
<html>
<head>
<style>
body {font-family:arial,sans-serif; text-align:center}
#title {font-family:"Times New Roman","Times Roman",TimesNR,times,serif; font-size:28px; font-weight:bold; text-align:center}
#version {font-size:87%; text-align:center;}
#weblink {font-style:italic; text-align:center; padding-top:7px; padding-bottom:20px}
#results {margin:0pt auto; text-align:center; font-weight:bold}
#testindicator {width:950px; height:16px; border-style:solid; border-width:2px 1px 1px 2px; background-color:#f8f8f8;}
#footerarea {text-align:center; font-size:83%; padding-top:25px}
#errorarea {padding-top:25px}
.error {border-color: #c3d9ff; border-style: solid; border-width: 2px 1px 2px 1px; width:945px; padding:1px; margin:0pt auto; text-align:left}
.errtitle {background-color:#c3d9ff; font-weight:bold}
</style>
<script language="javascript" type="text/javascript">
/* Create a new XMLHttpRequest object to talk to the Web server */
var xmlHttp = false;
/*@cc_on @*/
/*@if (@_jscript_version >= 5)
try {
xmlHttp = new ActiveXObject("Msxml2.XMLHTTP");
} catch (e) {
try {
xmlHttp = new ActiveXObject("Microsoft.XMLHTTP");
} catch (e2) {
xmlHttp = false;
}
}
@end @*/
if (!xmlHttp && typeof XMLHttpRequest != 'undefined') {
xmlHttp = new XMLHttpRequest();
}
function callServer() {
var url = "/testresult";
xmlHttp.open("GET", url, true);
xmlHttp.onreadystatechange = updatePage;
xmlHttp.send(null);
}
function updatePage() {
if (xmlHttp.readyState == 4) {
var response = xmlHttp.responseText;
var result = eval('(' + response + ')');
if (result.svcerr) {
document.getElementById("errorarea").innerHTML = result.svcinfo;
testFailed();
} else {
setResult(result.runs, result.total, result.errors, result.failures);
var errors = result.details.ERROR;
var failures = result.details.FAIL;
var details = "";
for(var i=0; i<errors.length; i++) {
details += '<p><div class="error"><div class="errtitle">ERROR '+errors[i].desc+'</div><div class="errdetail"><pre>'+errors[i].detail+'</pre></div></div></p>';
}
for(var i=0; i<failures.length; i++) {
details += '<p><div class="error"><div class="errtitle">FAILURE '+failures[i].desc+'</div><div class="errdetail"><pre>'+failures[i].detail+'</pre></div></div></p>';
}
document.getElementById("errorarea").innerHTML = details;
}
}
}
function testFailed() {
document.getElementById("testindicator").style.backgroundColor="red";
clearInterval(timer);
}
function testSucceed() {
document.getElementById("testindicator").style.backgroundColor="green";
clearInterval(timer);
}
function setResult(runs, total, errors, failures) {
document.getElementById("testran").innerHTML = runs;
document.getElementById("testtotal").innerHTML = total;
document.getElementById("testerror").innerHTML = errors;
document.getElementById("testfailure").innerHTML = failures;
if (errors==0 && failures==0) {
testSucceed();
} else {
testFailed();
}
}
// Update page every 5 seconds
var timer = setInterval(callServer, 3000);
</script>
<title>GAEUnit: Google App Engine Unit Test Framework</title>
</head>
<body>
<div id="headerarea">
<div id="title">GAEUnit: Google App Engine Unit Test Framework</div>
<div id="version">version 1.2.2</div>
<div id="weblink">Please check <a href="http://code.google.com/p/gaeunit">http://code.google.com/p/gaeunit</a> for the latest version</div>
</div>
<div id="resultarea">
<table id="results"><tbody>
<tr><td colspan="3"><div id="testindicator"> </div></td</tr>
<tr>
<td>Runs: <span id="testran">0</span>/<span id="testtotal">0</span></td>
<td>Errors: <span id="testerror">0</span></td>
<td>Failures: <span id="testfailure">0</span></td>
</tr>
</tbody></table>
</div>
<div id="errorarea">The test is running, please wait...</div>
<div id="footerarea">
Please write to the <a href="mailto:George.Z.Lei@Gmail.com">author</a> to report problems<br/>
Copyright 2008 George Lei and Steven R. Farley
</div>
</body>
</html>
"""
application = webapp.WSGIApplication([('/test', GAEUnitTestRunner),
('/testresult', ResultSender)],
debug=True)
def main():
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| gabriel/shrub | test/lib/gaeunit/gaeunit.py | Python | mit | 16,795 | [
"VisIt"
] | 5b389a6aadc7844f3ddc85e776bed4b6dc13afaa519a3945baa79ca484cbe717 |
"""
Provides classes that represent complete taxonomies, built using components from
the taxacomponents module.
"""
from taxacomponents import Citation, RankTable, Taxon
from taxonvisitor import TaxonVisitor
from taxonvisitors_concrete import PrintTaxonVisitor, CSVTaxonVisitor
from nameresolve import CoLNamesResolver
class TaxonomyError(Exception):
"""
A basic exception class for reporting errors encountered while working with taxonomies.
"""
def __init__(self, msg):
msg = 'Taxonomy error:\n ' + msg
Exception.__init__(self, msg)
class TaxonomyBase:
# Define the "nil" UUID constant as returned by the uuid-osp Postgres module
# function uuid_nil().
#NIL_UUID = '00000000-0000-0000-0000-000000000000'
NIL_UUID = 0
def __init__(self, taxonomy_id, name='', ismaster=False, citation=None, roottaxon=None):
self.taxonomy_id = taxonomy_id
self.name = name
self.ismaster = ismaster
self.citation = citation
self.roottaxon = roottaxon
def loadFromDB(self, pgcur, taxanum=-1, maxdepth=-1):
"""
Attempts to load the taxonomy from a taxonomy database, including the full tree
of taxa. If taxanum > 0, then only taxanum taxa will be loaded. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
query = """SELECT name, citation_id, ismaster, root_tc_id
FROM taxonomies
WHERE taxonomy_id=?"""
pgcur.execute(query, (self.taxonomy_id,))
res = pgcur.fetchone()
if res == None:
raise TaxonomyError('Taxonomy ID ' + str(self.taxonomy_id) + ' was not found in the database.')
self.name = res[0]
self.ismaster = res[2]
roottc_id = res[3]
# Create the Citation object.
self.citation = Citation()
self.citation.loadFromDB(pgcur, res[1])
# Get the rank ID and taxonomy ID of the root taxon concept.
query = """SELECT tc.rank_id, tc.taxonomy_id
FROM taxon_concepts tc, ranks r
WHERE tc.tc_id=? AND tc.rank_id=r.rank_id"""
pgcur.execute(query, (roottc_id,))
res = pgcur.fetchone()
rankid = res[0]
root_taxonomy_id = res[1]
# Initialize the rank lookup table.
rankt = RankTable()
rankt.loadFromDB(pgcur)
# Load the taxa tree.
self.roottaxon = Taxon(self.taxonomy_id, rankid, rankt, roottaxo_id = root_taxonomy_id, isroot=True)
self.roottaxon.loadFromDB(pgcur, roottc_id, taxanum, maxdepth)
def persist(self):
"""
Persist the Taxonomy to the database. This method should be implemented by
concrete subclasses.
"""
pass
def __str__(self):
tstr = 'name: ' + self.name + '\nID: ' + str(self.taxonomy_id) + '\nmaster: '
if self.ismaster:
tstr += 'yes'
else:
tstr += 'no'
return tstr
def printTaxonomyInfo(self):
"""
Prints the metadata that describes this taxonomy.
"""
print '** Taxonomy information **'
print str(self)
print str(self.citation)
def printCSVTaxaTree(self, numtaxa=-1, maxdepth=-1):
"""
Prints the tree of taxa for this taxonomy in "flat" format as CSV outut. If
numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
if numtaxa > 0:
print '(Only printing first', numtaxa, 'taxa.)'
if maxdepth > -1:
print '(Only traversing taxa tree to a depth of ' + str(maxdepth) + '.)'
csvvisitor = CSVTaxonVisitor(numtaxa, maxdepth)
csvvisitor.visit(self.roottaxon)
def printTaxaTree(self, numtaxa=-1, maxdepth=-1):
"""
Prints the tree of taxa for this taxonomy. If numtaxa > 0, only the first numtaxa
taxa will be printed. If maxdepth > -1, the taxa tree will only be traversed to a
depth of maxdepth.
"""
print '** Taxa tree **'
if numtaxa > 0:
print '(Only printing first', numtaxa, 'taxa.)'
if maxdepth > -1:
print '(Only traversing taxa tree to a depth of ' + str(maxdepth) + '.)'
ptvisitor = PrintTaxonVisitor(numtaxa, maxdepth)
ptvisitor.visit(self.roottaxon)
def printAll(self, numtaxa=-1, maxdepth=-1):
"""
Prints a text representation of this taxonomy, including the tree of taxa.
If numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth.
"""
self.printTaxonomyInfo()
print
self.printTaxaTree(numtaxa, maxdepth)
class Taxonomy(TaxonomyBase):
"""
A class that represents a single taxonomy in the MOL taxonomy database. Provides methods
to load a taxonomy from the database and persist a taxonomy to the database. Can also link
a taxonomy to the backbone taxonomy.
"""
def __init__(self, taxonomy_id, name='', ismaster=False, citation=None, roottaxon=None):
TaxonomyBase.__init__(self, taxonomy_id, name, ismaster, citation, roottaxon)
# A reference for the backbone taxonomy, which encompasses all other taxonomies.
# This reference is used if this taxonomy is linked to the backbone taxonomy.
self.bb_taxonomy = None
def linkToBackbone(self, pgcur, adjustdepth=True):
"""
Tries to connect this taxonomy to the backbone taxonomy, creating new nodes
in the backbone taxonomy, if needed, to link the two together. If adjustdepth
is True, the depth property of all nodes in the taxonomy are set to match the
correct depth relative to the root of the backbone taxonomy. Returns True if
the linking operation succeeded, False otherwise.
"""
bb_taxonomy = BackboneTaxonomy(pgcur)
if bb_taxonomy.linkTaxonomy(self):
self.bb_taxonomy = bb_taxonomy
if adjustdepth:
self.bb_taxonomy.setNodeDepths()
return True
else:
self.bb_taxonomy = None
return False
def getBackboneTaxonomy(self):
"""
Returns a reference to the backbone taxonomy object that links this taxonomy
to the MOL backbone taxonomy.
"""
return self.bb_taxonomy
def persist(self, pgcur, printprogress=False):
"""
Writes the taxonomy information to the database, if it does not already
exist. This includes calling the persist() methods on the Citation and
Taxon tree associated with this Taxonomy object.
"""
# First, check if this taxonomy already exists in the database.
query = """SELECT taxonomy_id
FROM taxonomies
WHERE taxonomy_id=? AND ismaster=?"""
pgcur.execute(query, (self.taxonomy_id, self.ismaster))
res = pgcur.fetchone()
if res == None:
# Write the citation information to the database, if needed.
citation_id = self.citation.persist(pgcur)
# Create the initial database entry for the taxonomy metadata so that the
# foreign key constraint for the child taxon concepts can be satisfied.
query = """INSERT INTO taxonomies
(taxonomy_id, name, citation_id, ismaster, root_tc_id)
VALUES (?, ?, ?, ?, ?)"""
pgcur.execute(query, (self.taxonomy_id, self.name, citation_id, self.ismaster, None))
# Make sure all taxon concepts, including those from the backbone taxonomy,
# are persisted to the database. Use the "nil" UUID as the parent_id for
# the root of the taxonomy if there is not an existing root entry.
if self.bb_taxonomy != None:
self.bb_taxonomy.roottaxon.persist(pgcur, self.NIL_UUID, printprogress,
self.roottaxon.depth)
else:
self.roottaxon.persist(pgcur, self.NIL_UUID, printprogress, self.roottaxon.depth)
# Get the ID of the root taxon.
root_tcid = self.roottaxon.existsInDB(pgcur)
# Update the taxonomy metadata entry with the root taxon concept ID.
query = """UPDATE taxonomies
SET root_tc_id=?
WHERE taxonomy_id=?"""
pgcur.execute(query, (root_tcid, self.taxonomy_id))
pgcur.connection.commit()
elif printprogress:
print ('The metadata for taxonomy "' + self.name + '" (ID ' + str(self.taxonomy_id) +
') already exist in the database; no changes were made.')
def printAll(self, numtaxa=-1, maxdepth=-1):
"""
Prints a text representation of this taxonomy, including the tree of taxa.
If numtaxa > 0, only the first numtaxa taxa will be printed. If maxdepth > -1,
the taxa tree will only be traversed to a depth of maxdepth. Unlike the method
in the base class, this method accounts for the possibility of this taxonomy
being linked to the backbone taxonomy.
"""
self.printTaxonomyInfo()
print
if self.bb_taxonomy != None:
self.bb_taxonomy.printTaxaTree(numtaxa, maxdepth)
else:
self.printTaxaTree(numtaxa, maxdepth)
class DepthAdjustVisitor(TaxonVisitor):
"""
Sets the "depth" values for all Taxon objects in a taxa tree, using an initial
starting depth value.
"""
def __init__(self, startdepth):
"""
Assigns startdepth as the "depth" value for the top-level Taxon object. All
other "depth" values are calculated relative to startdepth.
"""
TaxonVisitor.__init__(self)
self.startdepth = startdepth
def processTaxon(self, taxon, depth):
taxon.depth = self.startdepth + depth
class BackboneTaxonomy(TaxonomyBase):
"""
A special case of Taxonomy that represents the MOL backbone taxonomy. Provides
methods to link other taxonomies to the backbone taxonomy. Does not provide a
persist() method because the backbone taxonomy metadata are set when the database
tables are created.
"""
def __init__(self, pgcur):
"""
Initialize the backbone Taxonomy object and automatically load it from the
database, but load only the root node by default.
"""
self.pgcur = pgcur
# The ID of the backbone taxonomy is always 1.
TaxonomyBase.__init__(self, 1)
self.loadFromDB(pgcur)
def loadFromDB(self, pgcur, taxanum=-1, maxdepth=0):
"""
Exactly the same as loadFromDB() from the superclass, except loads only the root
taxonomy node (i.e., Eukaryota) by default.
"""
TaxonomyBase.loadFromDB(self, pgcur, taxanum, maxdepth)
def linkTaxonomy(self, taxonomy):
"""
Given a Taxonomy object, this method searches for the root taxon
concept in the database, verifies whether it is already connected to
the MOL backbone taxonomy, and if not, attempts to create the Taxon
objects needed to link it to the backbone taxonomy. To do this, the
method loads all ancestors of the root of the provided taxonomy, and
checks if the top-most ancestor is the root of the backbone taxonomy.
If it not, then Catalog of Life is used to try to infer the missing
taxon nodes that connect the target taxonomy to the backbone taxonomy.
If the linking is succesful, the method returns True; otherwise, False
is returned.
"""
# Load any parent links to the target taxonomy from the database.
topnode = self.getLinksFromDB(taxonomy)
# See if we made it back to the root of the backbone taxonomy.
if topnode.equals(self.roottaxon):
# We did, so simply link the child of the returned node to our root taxon.
self.roottaxon.addChild(topnode.children[0])
success = True
else:
# Otherwise, try to use Catalog of Life to fill in any missing links.
success = self._buildCoLLinks(topnode)
return success
def _buildCoLLinks(self, taxon):
"""
Uses Catalog of Life to fill in missing taxa needed to link the target taxon to the
MOL backbone taxonomy. If linking was successful, the target taxon will be connected
to the backbone root taxon by one or more linking taxa. Returns True on success;
False otherwise.
"""
# Use the Catalog of Life names resolver to try to get higher taxonomy information
# for the taxon.
resolver = CoLNamesResolver()
searchres = resolver.searchCoLForTaxon(taxon, taxon.name.namestr, True)
if searchres == None:
return False
res, sname, srank, authorinfo = searchres
# Process each parent taxon in the CoL classification, creating a chain of Taxon
# objects to capture the higher taxonomy. Because the name resolver search method
# verifies that the kingdom is correct, we already know that we are connecting the
# taxonomy to the correct kingdom.
taxaxml = res.find('./classification')
# It is important that we use the rank system from the taxonomy (not the backbone)
# to ensure that rank name lookups retrieve the correct ID.
tranksys = taxon.ranksys
ranktable = taxon.rankt
curnode = self.roottaxon
for taxonxml in taxaxml:
namestr = taxonxml.find('name').text
rankstr = taxonxml.find('rank').text
child = curnode.createChild(ranktable.getID(rankstr, tranksys), namestr)
#print child
curnode = child
# Link the root of the target taxonomy to the backbone taxonomy.
curnode.addChild(taxon)
return True
def getLinksFromDB(self, taxonomy):
"""
Starting from the root node of the provided taxonomy, follows parent
links upward, building a chain of taxon objects until the top-most
parent is reached. Returns the top-most node that could be reached by
following the links upward.
"""
# See if the root taxon_concept already has a parent.
curnode = taxonomy.roottaxon
parent_id = taxonomy.roottaxon.getParentIDFromDB(self.pgcur)
# Follow parent links upwards until we reach the root or any other node
# that has no parent or does not yet exist in the database.
while parent_id != None and parent_id != self.NIL_UUID:
# Create the parent node and load it from the database.
parent = Taxon(curnode.taxonomy_id, curnode.rank_id, curnode.rankt)
parent.loadFromDB(self.pgcur, parent_id, maxdepth=0)
parent.addChild(curnode)
curnode = parent
parent_id = curnode.getParentIDFromDB(self.pgcur)
return curnode
def setNodeDepths(self):
"""
After linking a new taxonomy to the backbone taxonomy, the values of the depth
properties on the Taxon objects in the target taxonomy are likely to be incorrect.
This method will visit all nodes and set the correct value of the depth property
for each node.
"""
depthvisitor = DepthAdjustVisitor(0)
depthvisitor.visit(self.roottaxon)
| stuckyb/sqlite_taxonomy | utilities/taxolib/taxonomy.py | Python | gpl-3.0 | 15,593 | [
"VisIt"
] | 643c273d0bfe1ba83ef75f7f1e958bc6cfca248ac468bf8635105c19ad8d12ff |
#!/usr/bin/env python
from __future__ import division, print_function #, unicode_literals
import sys
import os
import time
import shutil
import textwrap
import platform
import tarfile
import re
import warnings
from socket import gethostname
from subprocess import Popen, PIPE
# Handle py2, py3k differences.
py2 = sys.version_info[0] <= 2
if py2:
import cPickle as pickle
from StringIO import StringIO
from ConfigParser import SafeConfigParser, NoOptionError
else:
import pickle
from io import StringIO
from configparser import SafeConfigParser, NoOptionError
from .jobrunner import TimeBomb
from .tools import (RestrictedShell, StringColorizer, unzip, tail_file,
pprint_table, Patcher, Editor)
from .xyaptu import xcopier
from .devtools import FileLock
# FIXME Test ibm6
from collections import namedtuple
# OrderedDict was added in 2.7
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
import logging
logger = logging.getLogger(__name__)
__version__ = "0.5"
__author__ = "Matteo Giantomassi"
__all__ = [
"BuildEnvironment",
"AbinitTestSuite",
]
_MY_NAME = os.path.basename(__file__)[:-3] + "-" + __version__
# Helper functions and tools
def html_colorize_text(string, code):
return "<FONT COLOR='%s'>%s</FONT>" % (code, string)
_status2htmlcolor = {
"succeeded": lambda string : html_colorize_text(string, 'Green'),
"passed": lambda string : html_colorize_text(string, 'DeepSkyBlue'),
"failed": lambda string : html_colorize_text(string, 'Red'),
"disabled": lambda string : html_colorize_text(string, 'Cyan'),
"skipped": lambda string : html_colorize_text(string, 'Cyan'),
}
def status2html(status):
"""Convert test status in a colored HTML string."""
return _status2htmlcolor[status](status)
def sec2str(seconds):
"""Convert seconds to string."""
return "%.2f" % seconds
def str2html(string, end="<br>"):
"""Returns a HTML string."""
lines = string.splitlines()
return "<br>".join(lines) + end
def args2htmltr(*args):
string = ""
for arg in args:
string += "<td>" + str(arg) + "</td>"
return string
def html_link(string, href=None):
"""Create a HTML link from a string. Use href as link of href is not None."""
if href is not None:
return "<a href='%s'>%s</a>" % (href, string)
else:
return "<a href='%s'>%s</a>" % (string, string)
def is_string(s):
try:
s + "hello"
return True
except TypeError:
return False
def has_exts(path, exts):
"""True if path ends with extensions exts"""
root, ext = os.path.splitext(path)
if is_string(exts):
return ext == exts
else:
return ext in exts
def lazy__str__(func):
"""Lazy decorator for __str__ methods"""
def oncall(*args, **kwargs):
self = args[0]
return "\n".join([str(k) + " : " + str(v) for (k, v) in self.__dict__.items()])
return oncall
# Helper functions for performing IO
def lazy_read(fname):
with open(fname, "r") as fh:
return fh.read()
def lazy_readlines(fname):
with open(fname, "r") as fh:
return fh.readlines()
def lazy_write(fname, s):
with open(fname, "w") as fh:
fh.write(s)
def lazy_writelines(fname, lines):
with open(fname, "w") as fh:
fh.writelines(lines)
class Record(object):
@lazy__str__
def __str__(self):
pass
def rmrf(top, exclude_paths=None):
"""
Recursively remove all files and directories contained in directory top.
Args:
exclude_paths:
list with the absolute paths that should be preserved
Returns the list of files and the directories that have been removed.
"""
exc_paths = []
if exclude_paths is not None:
if is_string(exclude_paths):
exc_paths = [exclude_paths]
else:
exc_paths = exclude_paths
removed = []
for (root, dirs, files) in os.walk(top):
for f in files:
file_path = os.path.join(root, f)
if file_path not in exc_paths:
os.unlink(file_path)
removed.append(file_path)
for d in dirs:
dir_path = os.path.join(root, d)
if dir_path not in exc_paths:
shutil.rmtree(dir_path)
removed.append(dir_path)
return removed
class FileToTest(object):
"""This object contains information on the output file that will be analyzed by fldiff"""
# atr_name, default, conversion function. None designes mandatory attributes.
_attrbs = [
("name", None, str),
("tolnlines",None, int), # fldiff tolerances
("tolabs", None, float),
("tolrel", None, float),
("fld_options","",str) , # options passed to fldiff.
("fldiff_fname","",str),
("hdiff_fname","",str),
("diff_fname","",str),
#("pydiff_fname","",str),
]
def __init__(self, dic):
for atr in FileToTest._attrbs:
atr_name = atr[0]
default = atr[1]
f = atr[2]
value = dic.get(atr_name, default)
if value is None:
raise ValueError("%s must be defined" % atr_name)
value = f(value)
if hasattr(value, "strip"): value = value.strip()
self.__dict__[atr_name] = value
# Postprocess fld_options
self.fld_options = self.fld_options.split()
for opt in self.fld_options:
if not opt.startswith("-"):
raise ValueError("Wrong fldiff option: %s" % opt)
@lazy__str__
def __str__(self): pass
def compare(self, fldiff_path, ref_dir, workdir, timebomb=None, outf=sys.stdout):
"""
Use fldiff_path to compare the reference file located in ref_dir with
the output file located in workdir. Results are written to stream outf.
"""
ref_fname = os.path.abspath(os.path.join(ref_dir, self.name))
# FIXME Hack due to the stdout-out ambiguity
if not os.path.exists(ref_fname) and ref_fname.endswith(".stdout"):
ref_fname = ref_fname[:-7] + ".out"
out_fname = os.path.abspath(os.path.join(workdir, self.name))
opts = self.fld_options
label = self.name
fld_result, got_summary = wrap_fldiff(fldiff_path, ref_fname, out_fname,
opts=opts, label=label, timebomb=timebomb, out_filobj=outf)
if not got_summary:
# Wait 10 sec, then try again (workaround for woopy)
logger.critical("Didn't got fldiff summary, will sleep for 10 s...")
time.sleep(10)
fld_result, got_summary = wrap_fldiff(fldiff_path, ref_fname, out_fname,
opts=opts, label=label, timebomb=timebomb, out_filobj=outf)
if not got_summary:
logger.critical("fldiff summary is still empty!")
isok, status, msg = fld_result.passed_within_tols(self.tolnlines, self.tolabs, self.tolrel)
# Save comparison results.
self.fld_isok = isok
self.fld_status = status
self.fld_msg = msg
return isok, status, msg
# Parsers used for the different TEST_INFO options
def _str2filestotest(string):
"""
Parse the files_to_test section.
Returns a tuple of `FileToTest` objects.
"""
if not string:
return []
if ";" in string:
file_lines = [s for s in string.split(";") if s.strip()]
else:
file_lines = [string]
files_to_test = []
for line in file_lines:
tokens = line.split(",")
d = {"name": tokens[0]}
for tok in tokens[1:]:
k, v = [s.strip() for s in tok.split("=")]
if k in d:
err_msg = "Found multiple occurences of keyword %s" % k
raise AbinitTestInfoParserError(err_msg)
d[k] = v
files_to_test.append(FileToTest(d))
return tuple(files_to_test)
def _str2list(string): return [s.strip() for s in string.split(",") if s]
def _str2intlist(string): return [int(item) for item in _str2list(string) ]
def _str2set(string): return set([s.strip() for s in string.split(",") if s])
def _str2cmds(string): return [s.strip() for s in string.split(";") if s]
def _str2bool(string):
string = string.strip().lower()
if string == "yes": return True
return False
# TEST_INFO specifications
TESTCNF_KEYWORDS = {
# keyword : (parser, default, section, description)
# [setup]
"executable" : (str , None , "setup", "Name of the executable e.g. abinit"),
"test_chain" : (_str2list , "" , "setup", "Defines a ChainOfTest i.e. a list of tests that are connected together."),
"need_cpp_vars" : (_str2set , "" , "setup", "CPP variables that must be defined in config.h in order to enable the test."),
"exclude_hosts" : (_str2list , "" , "setup", "The test is not executed if we are running on a slave that matches compiler@hostname"),
"input_prefix" : (str , "" , "setup", "Prefix for input files (used for the ABINIT files file)"),
"output_prefix" : (str , "" , "setup", "Prefix for output files (used for the ABINIT files file)"),
"expected_failure": (_str2bool, "no" , "setup", "yes if the subprocess executing executable is expected to fail (retcode != 0) (default: no)"),
"input_ddb" : (str , "" , "setup", "The input DDB file read by anaddb"),
"input_gkk" : (str , "" , "setup", "The input GKK file read by anaddb"),
# [files]
"files_to_test" : (_str2filestotest, "", "files", "List with the output files that are be compared with the reference results. Format:\n" +
"\t file_name, tolnlines = int, tolabs = float, tolrel = float [,fld_options = -medium]\n" +
"\t tolnlines: the tolerance on the number of differing lines\n" +
"\t tolabs:the tolerance on the absolute error\n" +
"\t tolrel: tolerance on the relative error\n" +
"\t fld_options: options passed to fldiff.pl (optional).\n" +
"\t Multiple files are separated by ; e.g.\n" +
"\t foo.out, tolnlines = 2, tolabs = 0.1, tolrel = 1.0e-01;\n" +
"\t bar.out, tolnlines = 4, tolabs = 0.0, tolrel = 1.0e-01"
),
"psp_files" : (_str2list, "", "files", "List of pseudopotential files (located in the Psps_for_tests directory)."),
"extra_inputs" : (_str2list, "", "files", "List of extra input files."),
# [shell]
"pre_commands" : (_str2cmds, "", "shell", "List of commands to execute before starting the test"),
"post_commands" : (_str2cmds, "", "shell", "List of commands to execute after the test is completed"),
# [paral_info]
"max_nprocs" : (int , 1 , "paral_info", "Maximum number of MPI processors (1 for sequential run)"),
"nprocs_to_test" : (_str2intlist, "","paral_info","List with the number of MPI processes that should be used for the test"),
"exclude_nprocs" : (_str2intlist, "","paral_info","List with the number of MPI processes that should not be used for the test"),
# [extra_info]
"authors" : (_str2set , "Unknown" , "extra_info", "Author(s) of the test"),
"keywords" : (_str2set , "" , "extra_info", "List of keywords associated to the test"),
"description" : (str , "No description available", "extra_info", "String containing extra information on the test"),
"references" : (_str2list, "", "extra_info", "List of references to papers or other articles"),
}
#TESTCNF_SECTIONS = set( [ TESTCNF_KEYWORDS[k][2] for k in TESTCNF_KEYWORDS ] )
# This extra list is hardcoded in order to have a fixed order of the sections in doc_testcfn_format.
# OrderedDict have been introduced in python2.7 sigh!
TESTCNF_SECTIONS = [
"setup",
"files",
"shell",
"paral_info",
"extra_info",
]
# consistency check.
for key, tup in TESTCNF_KEYWORDS.items():
if tup[2] not in TESTCNF_SECTIONS:
raise ValueError("Please add the new section %s to TESTCNF_SECTIONS" % tup[2])
def line_starts_with_section_or_option(string):
"""True if string start with a TEST_INFO section or option."""
from re import compile
re_ncpu = compile("^NCPU_(\d+)$")
s = string.strip()
idx = s.find("=")
if idx == -1: # might be a section.
if s.startswith("[") and s.endswith("]"):
if s[1:-1] in TESTCNF_SECTIONS: return 1 # [files]...
if re_ncpu.search(s[1:-1]): return 1 # [NCPU_1] ...
else:
if s[:idx].strip() in TESTCNF_KEYWORDS: return 2
return 0
def doc_testcnf_format(fh=sys.stdout):
"""Automatic documentation of the TEST_INFO sections and related options."""
def writen(string): fh.write(string + "\n")
writen("Automatic documentation of the TEST_INFO sections and options.")
for section in TESTCNF_SECTIONS:
writen("\n["+section+"]")
for key in TESTCNF_KEYWORDS:
tup = TESTCNF_KEYWORDS[key]
if section == tup[2]:
line_parser = tup[0]
default = tup[1]
if default is None:
default = "Mandatory"
#print section, key
desc = tup[3]
if default:
msg = "%s = %s (DEFAULT: %s)" % (key, desc, default)
else:
msg = "%s = %s" % (key, desc)
writen(msg)
class AbinitTestInfo(object):
"""Container storing the options specified in the TEST_INFO section."""
def __init__(self, dct):
for k, v in dct.items():
self.__dict__[k] = v
#if self.nprocs_to_test and self.test_chain:
# err_msg = "test_chain and nprocs_to_test are mutually exclusive"
# raise TestInfoParserError(err_msg)
# Add the executable name to the list of keywords.
self.add_keywords([self.executable])
@lazy__str__
def __str__(self): pass
def add_cpp_vars(self, need_cpp_vars):
"""Add new set of CPP variables."""
self.need_cpp_vars = self.need_cpp_vars.union(need_cpp_vars)
def add_keywords(self, keywords):
"""Add new set of keywords."""
self.keywords = self.keywords.union(keywords)
def make_test_id(self):
"""
Generate the string with the test identifier
A special treatment is used for the multi-parallel tests.
In this case, the test_id is constructed by appending the string _MPI#
where # is the number of MPI processors.
"""
## FIXME Assumes inp_fname is in the form name.in
test_id = os.path.basename(self.inp_fname).split(".")[0]
if self.ismulti_parallel:
test_id += "_MPI%d" % self.max_nprocs
return test_id
@property
def ismulti_parallel(self):
"""True is this is a multi-parallel test."""
return self._ismulti_paral
class AbinitTestInfoParserError(Exception):
"""Error class raised by the parse"""
class AbinitTestInfoParser(object):
"""This object parses the TEST_INFO section that describes the test."""
Error = AbinitTestInfoParserError
def __init__(self, inp_fname, defaults=None):
"""
Args:
inp_fname:
test input file
defaults:
default values passed to the INI parser.
"""
logger.info("Parsing TEST_INFO section from input file : " + str(inp_fname))
self.inp_fname = os.path.abspath(inp_fname)
self.inp_dir, x = os.path.split(self.inp_fname)
SENTINEL = '#%%'
HEADER = "<BEGIN TEST_INFO>\n"
FOOTER = "<END TEST_INFO>\n"
# Extract the lines that start with SENTINEL and parse the file.
lines = lazy_readlines(inp_fname)
#for l in lines:
# print(l)
#print(inp_fname)
lines = [l.replace(SENTINEL, "", 1).lstrip() for l in lines if l.startswith(SENTINEL)]
try:
start, stop = lines.index(HEADER), lines.index(FOOTER)
except ValueError:
raise self.Error("%s does not contain any valid testcnf section!" % inp_fname)
lines = lines[start+1:stop]
if not lines:
raise self.Error("%s does not contain any valid testcnf section!" % inp_fname)
# Hack to allow options occupying more than one line.
string = ""
for l in lines:
if line_starts_with_section_or_option(l):
string += l
else:
if l.startswith("#"): continue
string = string.rstrip() + " " + l
lines = [l + "\n" for l in string.split("\n")]
s = StringIO()
s.writelines(lines)
s.seek(0)
class MySafeConfigParser(SafeConfigParser):
"""Wrap the get method of SafeConfigParser to disable the interpolation of raw_options."""
raw_options = ["description",]
def get(self, section, option, raw=False, vars=None):
if option in self.raw_options and section == TESTCNF_KEYWORDS[option][2]:
logger.debug("Disabling interpolation for section = %s, option = %s" % (section, option))
if py2:
return SafeConfigParser.get(self, section, option, raw=True, vars=vars)
else:
return SafeConfigParser.get(self, section, option, raw=True, vars=vars, fallback=None)
else:
if py2:
return SafeConfigParser.get(self, section, option, raw, vars)
else:
return SafeConfigParser.get(self, section, option, raw=raw, vars=vars, fallback=None)
self.parser = MySafeConfigParser(defaults) # Wrap the parser.
self.parser.readfp(s)
# Consistency check
opt = "test_chain"
section = TESTCNF_KEYWORDS[opt][2]
pars = TESTCNF_KEYWORDS[opt][0]
if self.parser.has_option(section, opt):
string = self.parser.get(section, opt)
chain = pars(string)
ones = [chain.count(value) for value in chain]
if sum(ones) != len(ones):
err_msg = "%s : test_chain contains repeated tests %s" % (inp_fname, string)
raise self.Error(err_msg)
# Check whether (section, option) is correct.
#_defs = [s.upper() for s in defaults] if defaults else []
#err_msg = ""
#for section in parser.sections():
# for opt in parser.options(section):
# if opt.upper() in _defs: continue
# if opt not in TESTCNF_KEYWORDS:
# err_msg += "Unknown (section, option) = %s, %s\n" % (section, opt)
# elif section != TESTCNF_KEYWORDS[opt][2]:
# err_msg += "Wrong (section, option) = %s, %s\n" % (section, opt)
#if err_msg: raise ValueError(err_msg)
def generate_testinfo_nprocs(self, nprocs):
"""Returns a record with the variables needed to handle the job with nprocs."""
info = Record()
d = info.__dict__
# First read and parse the global options.
for key in TESTCNF_KEYWORDS:
tup = TESTCNF_KEYWORDS[key]
line_parser = tup[0]
section = tup[2]
if section in self.parser.sections():
try:
d[key] = self.parser.get(section, key)
except NoOptionError:
d[key] = tup[1] # Section exists but option is not specified. Use default value.
else:
d[key] = tup[1] # Section does not exist. Use default value.
# Process the line
try:
d[key] = line_parser(d[key])
except:
err_msg = "Wrong line:\n key = %s, d[key] = %s\n in file: %s" % (key, d[key], self.inp_fname)
raise self.Error(err_msg)
# At this point info contains the parsed global values.
# Now check if this is a parallel test and, in case, overwrite the values
# using those reported in the [CPU_nprocs] sections.
# Set also the value of info._ismulti_paral so that we know how to create the test id
if not info.nprocs_to_test:
assert nprocs == 1
info._ismulti_paral = False
else:
logger.debug("multi parallel case")
if nprocs not in info.nprocs_to_test:
err_msg = "in file: %s. nprocs = %s > not in nprocs_to_test = %s" % (self.inp_fname, nprocs, info.nprocs_to_test)
raise self.Error(err_msg)
if nprocs > info.max_nprocs:
err_msg = "in file: %s. nprocs = %s > max_nprocs = %s" % (self.inp_fname, nprocs, self.max_nprocs)
raise self.Error(err_msg)
# Redefine variables related to the number of CPUs.
info._ismulti_paral = True
info.nprocs_to_test = [nprocs]
info.max_nprocs = nprocs
info.exclude_nprocs = list(range(1, nprocs))
#print(self.inp_fname, nprocs, info.exclude_nprocs)
ncpu_section = "NCPU_" + str(nprocs)
if not self.parser.has_section(ncpu_section):
err_msg = "Cannot find section %s in %s" % (ncpu_section, self.inp_fname)
raise self.Error(err_msg)
for key in self.parser.options(ncpu_section):
if key in self.parser.defaults(): continue
opt = self.parser.get(ncpu_section, key)
tup = TESTCNF_KEYWORDS[key]
line_parser = tup[0]
#
# Process the line and replace the global value.
try:
d[key] = line_parser(opt)
except:
err_msg = "In file: %s. Wrong line: key: %s, value: %s" % (self.inp_fname, key, d[key])
raise self.Error(err_msg)
#print(self.inp_fname, d["max_nprocs"])
# Add the name of the input file.
info.inp_fname = self.inp_fname
return AbinitTestInfo(d)
@property
def nprocs_to_test(self):
"""List with the number of MPI processors to be tested."""
key = "nprocs_to_test"
opt_parser = TESTCNF_KEYWORDS[key][0]
default = TESTCNF_KEYWORDS[key][1]
section = TESTCNF_KEYWORDS[key][2]
try:
opt = self.parser.get(section, key)
except NoOptionError:
opt = default
return opt_parser(opt)
@property
def is_testchain(self):
"""True if this is a chain of tests"""
opt = "test_chain"
section = TESTCNF_KEYWORDS[opt][2]
return self.parser.has_option(section, opt)
def chain_inputs(self):
"""Return a list with the path of the input files belonging to the test chain"""
assert self.is_testchain
opt = "test_chain"
section = TESTCNF_KEYWORDS[opt][2]
parse = TESTCNF_KEYWORDS[opt][0]
fnames = parse(self.parser.get(section, opt))
return [os.path.join(self.inp_dir, fname) for fname in fnames]
#@property
#def is_parametrized_test(self):
# """True if this is a parametrized test."""
# raise NotImplemented()
#def get_parametrized_tests(self):
# """Return the list of parametrized tests."""
def find_top_build_tree(start_path, with_abinit=True, ntrials=10):
"""
Returns the absolute path of the ABINIT build tree.
Assume start_path is within the build tree.
Raises:
`RuntimeError` if build tree is not found after ntrials attempts.
"""
abs_path = os.path.abspath(start_path)
trial = 0
while trial <= ntrials:
config_h = os.path.join(abs_path, "config.h")
abinit_bin = os.path.join(abs_path, "src", "98_main", "abinit")
# Check if we are in the top of the ABINIT source tree
if with_abinit:
found = os.path.isfile(config_h) and os.path.isfile(abinit_bin)
else:
found = os.path.isfile(config_h)
if found:
return abs_path
else:
abs_path, tail = os.path.split(abs_path)
trial += 1
raise RuntimeError("Cannot find the ABINIT build tree after %s trials" % ntrials)
class Compiler(object):
"""
Base class for C,Fortran,C++ compilers.
Usually instantiated through the class method from_defined_cpp_vars.
"""
def __init__(self, name, version=None):
self.name = name
self.version = version
def __str__(self):
return "%s: %s %s" % (self.__class__.__name__, self.name, self.version)
@classmethod
def from_defined_cpp_vars(cls, defined_cpp_vars):
for var in defined_cpp_vars:
# TODO: version may be useful but it's not reported in config.h
if var in cls._KNOWN_CPP_VARS:
# Build the name of the compiler.
name = var.lower().split("_")[1]
if name == "gnu": name = "gfortran"
if name == "pathscale": name = "psc"
return cls(name=name, version=None)
else:
err_msg = "Cannot detect the name of the %s\n. Defined CPP vars: %s " % (cls.__name__, str(defined_cpp_vars))
raise RuntimeError(err_msg)
class FortranCompiler(Compiler):
"""
Store information on the Fortran compiler used to build abinit.
"""
# CPP variables used in config.h
_KNOWN_CPP_VARS = [
"FC_ABSOFT",
"FC_FUJITSU",
"FC_G95",
"FC_GNU",
"FC_HITACHI",
"FC_IBM",
"FC_INTEL",
"FC_MIPSPRO",
"FC_NAG",
"FC_OPEN64",
"FC_PATHSCALE",
"FC_PGI",
"FC_SUN",
]
class CPreProcessorError(Exception):
"""Errors raised by `CPreProcessors`"""
class CPreProcessor(object):
"""Pre-process source code with ANSI CPP."""
Error = CPreProcessorError
def __init__(self, includes=None, opts=None, bin="cpp", verbose=0):
self.includes = ["."]
if includes is not None: self.includes = includes
self.opts = ["-DHAVE_CONFIG_H"]
if opts is not None: self.opts = opts
self.bin, self.verbose = bin, verbose
def process_file(self, filepath, remove_lhash=True):
"""
Read source from filepath, call CPP wit the includes and the
options passed to the constructor.
Returns:
preprocessed text.
"""
if self.bin is None:
# No pre-processing, return raw string.
with open(filepath, "r") as f:
return f.read()
cmd = [self.bin]
if self.opts: cmd += self.opts
cmd += ["-ansi"]
if self.includes: cmd += ["-I"+inc for inc in self.includes]
cmd += [filepath]
cmd = " ".join(cmd)
if self.verbose:
print(cmd)
from subprocess import Popen, PIPE
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode:
raise self.Error("C-preprocessor returned %d\n stderr:\n%s" % (p.returncode, stderr))
# Remove leading hash symbols added by CPP
if not remove_lhash:
return stdout
else:
return "\n".join([l for l in stdout.splitlines() if not l.startswith("#")])
class FortranBacktrace(object):
def __init__(self, text):
self.text = text
self.trace = []
self.parse()
def __str__(self):
return str(self.trace)
def parse(self):
raise NotImplementedError("parse method must be implemented by the subclass")
def locate_srcfile(self, base_name):
top = find_top_build_tree(start_path=".", with_abinit=True)
top = os.path.join(top, "src")
for dirpath, dirnames, filenames in os.walk(top):
if base_name in filenames:
apath = os.path.join(dirpath, base_name)
return apath
else:
print("cannot find file:", base_name)
return None
def edit_source(self, editor=None):
if not self.trace: return
if editor is None: editor = Editor()
src_file, lineno = self.trace[0]
src_file = self.locate_srcfile(src_file)
return editor.edit_file(src_file, lineno=lineno)
class NagBacktrace(FortranBacktrace):
def parse(self):
# Example
#
# Runtime Error: opernl4a_cpp.f90, line 871: INTEGER(int32) overflow for 2146435072 * 3
# Program terminated by fatal error
# opernl4a_cpp.f90, line 871: Error occurred in OPERNL4A
if not self.text: return
#MAGIC = "Program terminated by fatal error"
#for i, line in enumerate(self.text):
# if MAGIC in line: break
#else:
# return
re_nagline = re.compile("(\w+\.f90), line (\d+): (.+)")
for line in self.text:
m = re_nagline.match(line)
if not m: continue
src_file, lineno = m.group(1), m.group(2)
self.trace.append((src_file, int(lineno)))
class BuildEnvironment(object):
"""Store information on the build environment."""
def __init__(self, build_dir, cygwin_instdir=None):
"""
Args:
build_dir:
Path to the top level directory of the build.
cygwin_instdir:
Installation directory of cygwin. Defaults to '/cygwin'
"""
# Try to figure out the top level directory of the build tree.
try:
build_dir = find_top_build_tree(build_dir)
except:
raise
self.uname = platform.uname()
self.hostname = gethostname().split(".")[0]
try:
self.username = os.getlogin()
except:
self.username = "No_username"
self.build_dir = os.path.abspath(build_dir)
self.configh_path = os.path.join(self.build_dir, "config.h")
self.binary_dir = os.path.join(self.build_dir, "src", "98_main")
self._cygwin_instdir = ""
if cygwin_instdir is not None:
self._cygwin_instdir = cygwin_instdir
# Binaries that are not located in src/98_main
self._external_bins = {
"atompaw": os.path.join(self.build_dir, "fallbacks", "exports", "bin", "atompaw-abinit"),
"timeout": os.path.join(self.build_dir, "tests", "Timeout", "timeout"),
}
# Check if this is a valid ABINIT build tree.
if not (os.path.isfile(self.configh_path) and os.path.isfile(self.path_of_bin("abinit"))):
raise ValueError("%s is not a valid ABINIT build tree." % self.build_dir)
# Get the list of CPP variables defined in the build.
self.defined_cppvars = parse_configh_file(self.configh_path)
# Get info on the compilers
self.fortran_compiler = FortranCompiler.from_defined_cpp_vars(self.defined_cppvars)
print(self.fortran_compiler)
if not self.has_bin("timeout"):
warnings.warn("Cannot find timeout executable!")
@lazy__str__
def __str__(self): pass
def issrctree(self):
"""True if this is a source tree."""
configac_path = os.path.join(self.build_dir, "configure.ac")
abinitF90_path = os.path.join(self.build_dir, "src", "98_main", "abinit.F90")
return os.path.isfile(configac_path) and os.path.isfile(abinitF90_path)
def iscygwin(self):
"""True if we are running under CYGWIN"""
return "CYGWIN" in self.uname[0].upper()
def _addext(self, string):
"""Append .exe extension, needed for cygwin"""
if self.iscygwin(): string += ".exe"
return string
def path_of_bin(self, bin_name, try_syspath=True):
"""Return the absolute path of bin_name."""
if bin_name in self._external_bins:
bin_path = self._external_bins[bin_name]
else:
bin_path = os.path.join(self.binary_dir, bin_name) # It's in src/98_main
bin_path = self._addext(bin_path)
# Handle external bins that are installed system wide (such as atompaw on woopy)
if bin_name in self._external_bins and not os.path.isfile(bin_path):
if not try_syspath: return ""
# Search it in PATH.
paths = os.getenv("PATH").split(os.pathsep)
for p in paths:
bin_path = os.path.join(p, bin_name)
if os.path.isfile(bin_path): break
else:
err_msg = ("Cannot find path of bin_name %s, neither in the build directory nor in PATH %s" %
(bin_name, paths))
warnings.warn(err_msg)
bin_path = ""
return bin_path
def cygwin_path_of_bin(self, bin_name):
"""
Mangle the name of the executable. Needed for Windows
when we have to call an executable that is not located
within the CYGWIN filesystem (aka $Win$ application).
"""
path = self.path_of_bin(bin_name)
if self.iscygwin(): path = self._cygwin_instdir + path
return path
def has_bin(self, bin_name, try_syspath=True):
"""True if binary bin_name is present in the build."""
return os.path.isfile(self.path_of_bin(bin_name, try_syspath=try_syspath))
def cygwin_path(self, path):
apath = os.path.abspath(path)
if self.iscygwin(): apath = self._cygwin_instdir + apath
return apath
def parse_configh_file(fname):
"""
Parse the configuration file config.h,
Returns a list with the CCP variables that are #defined.
Note:
Not very robust. It does not handle instructions such as:
#ifdef HAVE_FOO
# define HAVE_BAR 1
#endif
Handling this case would require a real preprocessing with CPP and then the parsing.
Not easy to implement in a portable way especially on IBM machines with XLF.
"""
with open(fname, "r") as fh:
#defined_cppvars = []
#for l in fh:
# l = l.lstrip()
# if l.startswith("#define "):
# tokens = l.split()
# varname = tokens[1]
# if varname.startswith("HAVE_") and len(tokens) >= 3:
# value = int(tokens[2])
# if value != 0: defined_cppvars.append(varname)
defined_cppvars = {}
for l in fh:
l = l.lstrip()
if l.startswith("#define "):
tokens = l.split()
varname = tokens[1]
if len(tokens) >= 3:
value = tokens[2]
defined_cppvars[varname] = value
return defined_cppvars
def input_file_has_vars(fname, ivars, comment="#", mode="any"):
"""
Primitive parser that searches for the occurrence of input variables in the input file fname
Args:
fname:
Input file
ivars:
dictionary whose keys are strings with the input variables to search.
ivar[varname] can be either None or an integer
if ivar[varname] is None, we have a match if varname is present
if ivar[varname] is int, we have a match if varname is present and it has value int
mode: "all" or "any"
return:
(bool, d)
bool is True is the input file contains the specified variables
d is a dictionary with the matching lines (empty dict if no occurence).
"""
# This algorithm is not very robust as it assumes that the variable and the line
# are placed on the same line.
with open(fname, "r") as fh:
lines = []
for line in fh:
line = line.lower().strip()
idx = line.find(comment)
if idx != -1: line = line[:idx]
lines.append(line)
matches = {}
for k in ivars:
matches[k] = []
items = ivars.items()
re_ivars = {}
for varname in ivars:
re_ivars[varname] = re.compile(varname + "\d*\s*(\d+)\s*")
nfound = 0
for line in lines:
for varname, varvalue in items:
re_match = re_ivars[varname].match(line)
#print("match")
if varvalue is None and varname in line:
nfound += 1
matches[varname].append(line)
elif re_match:
num = int(re_match.group(1))
if num == int(varvalue):
#print line
matches[varname].append(line)
nfound += 1
if nfound == 0:
return False, {}
if mode == "all":
return all(bool(v) for v in matches.values()), matches
elif mode == "any":
return any(bool(v) for v in matches.values()), matches
else:
raise ValueError("Wrong mode %s" % mode)
class FldiffResult(object):
"""Store the results produced by fldiff.pl."""
_attrbs = {
"fname1": "first file provided to fldiff.",
"fname2": "second file provided to fldiff.",
"options": "options passed to fldiff.",
"summary_line": "Summary given by fldiff.",
"fatal_error": "True if file comparison cannot be done.",
"ndiff_lines": "Number of different lines.",
"abs_error": "Max absolute error.",
"rel_error": "Max relative error.",
"max_absdiff_ln": "Line number where the Max absolute error occurs.",
"max_reldiff_ln": "Line number where the Max relative error occurs.",
}
def __init__(self, summary_line, err_msg, fname1, fname2, options):
self.summary_line = summary_line.strip()
self.err_msg = err_msg.strip()
self.fname1 = fname1
self.fname2 = fname2
self.options = options
self.fatal_error = False
self.success = False
if "fatal" in summary_line:
self.fatal_error = True
elif "no significant difference" in summary_line:
self.success = True
self.ndiff_lines = 0
self.abs_error = 0.0
self.rel_error = 0.0
elif "different lines=" in summary_line:
#Summary Case_84 : different lines= 5 , max abs_diff= 1.000e-03 (l.1003), max rel_diff= 3.704e-02 (l.1345)
tokens = summary_line.split(",")
for tok in tokens:
if "different lines=" in tok:
self.ndiff_lines = int(tok.split("=")[1])
if "max abs_diff=" in tok:
vals = tok.split("=")[1].split()
self.abs_error = float(vals[0])
if "max rel_diff=" in tok:
vals = tok.split("=")[1].split()
self.rel_error = float(vals[0])
else:
err_msg = "Wrong summary_line: " + str(summary_line)
#raise ValueError(err_msg)
warnings.warn(err_msg)
self.fatal_error = True
@lazy__str__
def __str__(self): pass
def passed_within_tols(self, tolnlines, tolabs, tolrel):
"""
Check if the test passed withing the specified tolerances.
Returns:
(isok, status, msg)
"""
status = "succeeded"; msg = ""
if self.fatal_error:
status = "failed"
msg = "fldiff.pl fatal error:\n" + self.err_msg
elif self.success:
msg = "succeeded"
else:
abs_error = self.abs_error
rel_error = self.rel_error
ndiff_lines = self.ndiff_lines
status = "failed"; fact = 1.0
locs = locals()
if abs_error > tolabs * fact and rel_error < tolrel:
msg = "failed: absolute error %(abs_error)s > %(tolabs)s" % locs
elif rel_error > tolrel * fact and abs_error < tolabs:
msg = "failed: relative error %(rel_error)s > %(tolrel)s" % locs
elif ndiff_lines > tolnlines:
msg = "failed: erroneous lines %(ndiff_lines)s > %(tolnlines)s" % locs
elif abs_error > tolabs * fact and rel_error > tolrel * fact:
msg = "failed: absolute error %(abs_error)s > %(tolabs)s, relative error %(rel_error)s > %(tolrel)s" % locs
# FIXME passed or failed?
elif abs_error > tolabs:
msg = "within 1.5 of tolerance (absolute error %(abs_error)s, accepted %(tolabs)s )" % locs
elif rel_error > tolrel:
msg = "within 1.5 of tolerance (relative error %(rel_error)s, accepted %(tolrel)s )" % locs
else:
status = "passed"
msg = "passed: absolute error %(abs_error)s < %(tolabs)s, relative error %(rel_error)s < %(tolrel)s" % locs
isok = status in ["passed", "succeeded"]
return isok, status, msg
def wrap_fldiff(fldiff_path, fname1, fname2, opts=None, label=None, timebomb=None, out_filobj=sys.stdout):
"""
Wraps fldiff.pl script, returns (fld_result, got_summary)
fld_result is a FldiffResult instance, got_summary is set to False if fldiff.pl didn't return any final summary
Usage: fldiff [-context] [ -ignore | -include ] [ -ignoreP | -includeP ] [ -easy | -medium | -ridiculous ] file1 file2 [label]
"""
# Default options for fldiff script.
fld_options = "-ignore -ignoreP"
if opts: fld_options = " ".join([fld_options] + [o for o in opts])
fld_options = [s for s in fld_options.split()]
if label is None: label = ""
args = ["perl", fldiff_path] + fld_options + [fname1, fname2, label]
cmd_str = " ".join(args)
logger.info("about to execute %s" % cmd_str)
if True or timebomb is None:
p = Popen(cmd_str, shell=True, stdout=PIPE, stderr=PIPE)
stdout_data, stderr_data = p.communicate()
ret_code = p.returncode
#ret_code = p.wait()
else:
p, ret_code = timebomb.run(cmd_str, shell=True, stdout=PIPE, stderr=PIPE)
# fldiff returns this value when some difference is found.
# perl programmers have a different understanding of exit_status!
MAGIC_FLDEXIT = 4
err_msg = ""
if ret_code not in [0, MAGIC_FLDEXIT]:
#err_msg = p.stderr.read()
err_msg = stderr_data
lines = stdout_data.splitlines(True)
#lines = p.stdout.readlines()
if out_filobj and not hasattr(out_filobj, "writelines"):
# Assume string
lazy_writelines(out_filobj, lines)
else:
out_filobj.writelines(lines)
# Parse the last line.
# NOTE:
# on woopy fldiff returns to the parent process without producing
# any output. In this case, we set got_summary to False so that
# the caller can make another attempt.
got_summary = True
try:
summary_line = lines[-1]
except IndexError:
got_summary = False
try:
logger.critical("Trying to kill fldiff process, cmd %s" % cmd_str)
p.kill()
except Exception as exc:
logger.critical("p.kill failed with exc %s" % str(exc))
pass
summary_line = "fatal error: no summary line received from fldiff"
return FldiffResult(summary_line, err_msg, fname1, fname2, fld_options), got_summary
def make_abitest_from_input(inp_fname, abenv, keywords=None, need_cpp_vars=None, with_np=1):
"""
Factory function to generate a Test object from the input file inp_fname
"""
inp_fname = os.path.abspath(inp_fname)
try: # Assumes some_path/Input/t30.in
inpdir_path, x = os.path.split(inp_fname)
except:
raise ValueError("%s is not a valid path" % inp_fname)
parser = AbinitTestInfoParser(inp_fname)
nprocs_to_test = parser.nprocs_to_test
ntests = len(nprocs_to_test)
if ntests == 0:
nprocs_to_test = [1]
ntests = 1
test_info = parser.generate_testinfo_nprocs(with_np)
# Add global cpp variables.
test_info.add_cpp_vars(need_cpp_vars)
# Add global keywords.
test_info.add_keywords(keywords)
# Single test with np processors.
# Istanciate the appropriate subclass depending on the name of the executable. Default is BaseTest.
cls = exec2class(test_info.executable)
return cls(test_info, abenv)
def make_abitests_from_inputs(input_fnames, abenv, keywords=None, need_cpp_vars=None):
"""
Factory function. Return a list of tests generated from the TEST_INFO section reported
in the input files inp_fnames.
"""
if is_string(input_fnames):
input_fnames = [input_fnames]
inp_fnames = [os.path.abspath(p) for p in input_fnames]
out_tests = []
while True:
try:
inp_fname = inp_fnames.pop(0)
except IndexError:
break
try:
# Assumes some_path/Input/t30.in
inpdir_path, x = os.path.split(inp_fname)
except:
raise ValueError("%s is not a valid path" % inp_fname)
parser = AbinitTestInfoParser(inp_fname)
nprocs_to_test = parser.nprocs_to_test
if len(nprocs_to_test) == 0:
nprocs_to_test = [1]
if not parser.is_testchain:
# No dependency --> generate a list of test by changing the number np of MPI processors.
for np in nprocs_to_test:
test_info = parser.generate_testinfo_nprocs(np)
test_info.add_cpp_vars(need_cpp_vars) # Add global cpp variables.
test_info.add_keywords(keywords) # Add global keywords.
# Istanciate the appropriate subclass depending on the name of the executable. Default is BaseTest.
cls = exec2class(test_info.executable)
out_tests.append(cls(test_info, abenv))
else:
logger.info("got chain input %s" % inp_fname)
#print(parser.chain_inputs())
# Build the test chain with np nprocessors.
for np in nprocs_to_test:
tchain_list = []
for cht_fname in parser.chain_inputs():
t = make_abitest_from_input(cht_fname, abenv, keywords=keywords, need_cpp_vars=need_cpp_vars, with_np=np)
tchain_list.append(t)
if not tchain_list:
raise RuntimeError("tchain_list is empty, inp_fname %s" % inp_fname)
out_tests.append(ChainOfTests(tchain_list))
# Remove the input files of the chain
for s in parser.chain_inputs()[1:]:
try:
idx = inp_fnames.index(s)
except ValueError:
raise RuntimeError("%s not found in inp_fnames" % inp_fnames)
inp_fnames.pop(idx)
return out_tests
class Status(int):
"""
This object is an integer representing the status of the `Test`.
Statuses are ordered, negative values are used for positive outcomes,
positive values for failures.
"""
# Possible status of the node.
_STATUS2STR = OrderedDict([
(-3, "Skipped"), # Test has been skipped because test requirements are not fulfilled
(-2, "Succeeded"), # fldiff returned succeeded
(-1, "Passed"), # fldiff returned passed
(0, "None"), # Initial status of the test.
(1, "FileDifferError"), # File comparison could not be performed but the calculation terminated
# (e.g. different number of lines in ref and out files)
(2, "NumericalError"), # File comparison detected too large numerical errors.
(3, "ExecutionError"), # Run didn't complete due to some error in the code e.g. segmentation fault
(4, "PythonError"), # A python exception was raised in the driver code.
])
def __repr__(self):
return "<%s: %s, at %s>" % (self.__class__.__name__, str(self), id(self))
def __str__(self):
"""String representation."""
return self._STATUS2STR[self]
@classmethod
def from_string(cls, s):
"""Return a `Status` instance from its string representation."""
for num, text in cls._STATUS2STR.items():
if text == s:
return cls(num)
else:
raise ValueError("Wrong string %s" % s)
@property
def is_problematic(self):
"""True if test was not successful."""
return self > 0
@property
def info(self):
"""Human-readable string with info on the outcome of the test."""
try:
return self._info
except AttributeError:
return "None"
def set_info(self, info):
"""info setter."""
self._info = info
class BaseTestError(Exception):
"""Base Error class raised by Test objects"""
class BaseTest(object):
"""
Base class describing a single test. Tests associated to other executables should
sublcass BaseTest and redefine the method make_stdin.
Then change exec2cls so that the appropriate instance is returned.
"""
Error = BaseTestError
# Possible status of the test.
_possible_status = ["failed", "passed", "succeeded", "skipped", "disabled"]
#S_SKIPPED = Status(-3)
#S_SUCCEDED = Status(-2)
#S_PASSED = Status(-1)
#S_NODE = Status(0)
#S_DIFF_ERROR = Status(2)
#S_NUM_ERROR = Status(2)
#S_EXEC_ERROR = Status(3)
#S_PY_ERROR = Status(4)
#ALL_STATUS = [
# S_SKIPPED,
# S_SUCCEDED,
# S_PASSED,
# S_NODE,
# S_NUM_ERROR,
# S_EXEC_ERROR,
# S_PY_ERROR,
#]
def __init__(self, test_info, abenv):
logger.info("Initializing BaseTest from inp_fname: ", test_info.inp_fname)
self.inp_fname = os.path.abspath(test_info.inp_fname)
self.abenv = abenv
self.id = test_info.make_test_id() # The test identifier (takes into account the multi_parallel case)
self.nprocs = 1 # Start with 1 MPI process.
# FIXME Assumes inp_fname is in the form tests/suite_name/Input/name.in
suite_name = os.path.dirname(self.inp_fname)
suite_name = os.path.dirname(suite_name)
self.suite_name = os.path.basename(suite_name)
self.ref_dir = abenv.apath_of("tests", suite_name, "Refs")
self.inp_dir = abenv.apath_of("tests", suite_name, "Input")
self._executed = False
self._status = None
if os.path.basename(self.inp_fname).startswith("-"):
self._status = "disabled"
# Initial list of local files that should not be removed.
self._files_to_keep = []
# Default values.
self.make_html_diff = 0 # 0 => Do not produce diff files in HTML format
# 1 => Produced HTML diff but only if test failed
# 2 => Produce HTML diff independently of the final status
self.sub_timeout = 30 # Timeout for subprocesses (in seconds)
self.erase_files = 2 # 0 => Keep all files.
# 1 => Remove files but only if the test passes or succeeds
# 2 => Remove files even when the test fail.
# Incorporate the attributes of test_info in self.
err_msg = ""
for k in test_info.__dict__:
if k in self.__dict__ and test_info.__dict__[k] != self.__dict__[k]:
err_msg += "Cannot overwrite key %s\n" % k
#print(test_info.__dict__[k], self.__dict__[k])
if err_msg:
raise self.Error(err_msg)
self.__dict__.update(test_info.__dict__)
# Save authors' second names to speed up the search.
# Well, let's hope that we don't have authors with the same second name!
second_names = []
for string in self.authors:
idx = string.rfind(".")
f, s = ("", string)
if idx != -1:
try:
f, s = string[:idx+1], string[idx+2:]
except IndexError:
raise ValueError("Wrong author(s) name")
if not f and s and s != "Unknown":
print("author(s) first name is missing in file %s, string = %s " % (self.full_id, string))
second_names.append(s)
self._authors_snames = set(second_names)
def __repr__(self):
return self.full_id
def __str__(self):
return repr(self)
#@lazy__str__
#def __str__(self): pass
def stdin_readlines(self):
return lazy_readlines(self.stdin_fname)
def stdin_read(self):
return lazy_read(self.stdin_fname)
def stdout_readlines(self):
return lazy_readlines(self.stdout_fname)
def stdout_read(self):
return lazy_read(self.stdout_fname)
def stderr_readlines(self):
return lazy_readlines(self.stderr_fname)
def stderr_read(self):
return lazy_read(self.stderr_fname)
@property
def has_empty_stderr(self):
return not bool(self.stderr_read())
@property
def full_id(self):
"""Full identifier of the test."""
#return "["+self.suite_name+"]["+self.id+"]"
return "["+self.suite_name+"]["+self.id+"][np="+str(self.nprocs)+"]"
@property
def bin_path(self):
"""The absolute path of the executable needed to run the test."""
return self.build_env.path_of_bin(self.executable)
@property
def cygwin_bin_path(self):
return self.build_env.cygwin_path_of_bin(self.executable)
def cygwin_path(self, path):
return self.build_env.cygwin_path(path)
def cpkl_dump(self, protocol=-1):
"""Save the instance in a pickle file"""
self.cpkl_fname = os.path.join(self.workdir, self.id + ".cpkl")
with open(self.cpkl_fname, "wb") as fh:
pickle.dump(self, fh, protocol=protocol)
self.keep_files(self.cpkl_fname)
def has_keywords(self, keywords, mode="any"):
"""
True if test has keywords
mode == "all" --> check if all keywords are present
mode == "any" --> check if at least one keyword is present
"""
if mode == "all":
return set(keywords).issubset(self.keywords)
elif mode == "any":
return set(keywords).intersection(self.keywords)
else:
raise ValueError("wrong mode %s" % mode)
def has_authors(self, authors, mode="any"):
"""
True if test has authors
mode == "all" --> check if all authors are present
mode == "any" --> check if at least one author is present
"""
if mode == "all":
return set(authors).issubset(self._authors_snames)
elif mode == "any":
return set(authors).intersection(self._authors_snames)
else:
raise ValueError("wrong mode %s" % mode)
def has_variables(self, ivars, mode="any"):
"""True if test has the input variables ivars (dict {varname:varvalue})"""
found, d = input_file_has_vars(self.inp_fname, ivars, mode=mode)
return found
def edit_input(self, editor=None):
"""
Call editor to edit the input file of the test.
A default editor is provided if editor is None (use $EDITOR shell variable)
"""
if editor is None: editor = Editor()
try:
editor.edit_file(self.inp_fname)
except:
raise
def listoftests(self, width=100, html=True, abslink=True):
string = self.description.lstrip()
if self.references:
string += "References:\n" + "\n".join(self.references)
string = textwrap.dedent(string)
string = textwrap.fill(string, width=width)
if not html:
return self.full_id + ":\n" + string
else:
if abslink:
link = html_link(self.full_id, self.inp_fname)
else:
# Use relative path so that we can upload the HTML file on
# the buildbot master and browse the pages.
link = html_link(self.full_id, os.path.basename(self.inp_fname))
string = link + "<br>" + string.replace("\n","<br>") + "\n"
return string
def make_stdin(self):
"""
Generate the standard input of the test.
The base implementation writes the content of inp_fname to stdin.
Subclasses should redefine this method according to their needs.
"""
t_stdin = StringIO()
with open(self.inp_fname, "r") as fh:
t_stdin.writelines(fh)
return t_stdin.getvalue()
def get_extra_inputs(self):
"""Copy extra inputs from inp_dir to workdir."""
# First copy the main input file (useful for debugging the test)
# Avoid raising exceptions as python threads do not handle them correctly.
try:
src = self.inp_fname
dest = os.path.join(self.workdir, os.path.basename(self.inp_fname))
shutil.copy(src, dest)
self.keep_files(dest) # Do not remove it after the test.
except:
self.exceptions.append(self.Error("copying %s => %s" % (src,dest)))
for extra in self.extra_inputs:
src = os.path.join(self.inp_dir, extra)
dest = os.path.join(self.workdir, extra)
if not os.path.isfile(src):
self.exceptions.append(self.Error("%s: no such file" % src) )
continue
shutil.copy(src, dest)
if dest.endswith(".gz"): # Decompress the file
unzip(dest)
dest = dest[:-3]
#self.keep_files(dest) # Do not remove dest after the test.
@property
def inputs_used(self):
"""List with the input files used by the test."""
inputs = [self.inp_fname] + [os.path.join(self.inp_dir, f) for f in self.extra_inputs]
#
# Add files appearing in the shell sections.
for cmd_str in (self.pre_commands + self.post_commands):
if cmd_str.startswith("iw_"):
tokens = cmd_str.split()
inp = os.path.join(self.inp_dir, tokens[1])
inputs.append(inp)
return inputs
@property
def status(self):
"""The status of the test"""
if self._status in ["disabled", "skipped", "failed"]: return self._status
all_fldstats = [f.fld_status for f in self.files_to_test]
if "failed" in all_fldstats: return "failed"
if "passed" in all_fldstats: return "passed"
assert all([s == "succeeded" for s in all_fldstats])
return "succeeded"
@property
def isok(self):
"""Return true if test is OK (test passed and not python exceptions."""
return self.fld_isok and not self.exceptions
@property
def files_to_keep(self):
"""List with the files that should not be erased once the test completed"""
return self._files_to_keep
def keep_files(self, files):
"""Add files to the list of paths that should not be erased"""
if is_string(files):
self._files_to_keep.append(files)
else:
self._files_to_keep.extend(files)
def compute_nprocs(self, build_env, nprocs, runmode):
"""
Compute the number of MPI processes that can be used for the test from the initial guess nprocs
Return: (nprocs, string)
where nprocs = 0 if the test cannot be executed.
string contains a human-readable message explaining the reason why the test will be skipped.
A test cannot be executed if:
1) It requires CPP variables that are not defined in the build.
2) The user asks for more MPI nodes than max_nprocs (this value is reported in the TEST_INFO section).
3) We have a multiparallel test (e.g. paral/tA.in) and nprocs is not in in nprocs_to_test
4) nprocs is in exclude_nprocs
"""
# !HAVE_FOO --> HAVE_FOO should not be present.
err_msg = ""
for var in self.need_cpp_vars:
if not var.startswith("!") and var not in build_env.defined_cppvars:
err_msg += "Build environment does not define the CPP variable %s\n" % var
elif var[1:] in build_env.defined_cppvars:
err_msg += "Build environment defines the CPP variable %s\n" % var[1:]
# Remove this check to run the entire test suite in parallel
#runmode ="dynamic"
if runmode == "static":
if nprocs > self.max_nprocs:
err_msg += "nprocs: %s > max_nprocs: %s\n" % (nprocs, self.max_nprocs)
elif runmode == "dynamic":
# Will select the minimum between max_nprocs and nprocs
pass
else:
raise ValueError("Wrong runmode %s" % runmode)
if self.nprocs_to_test and nprocs != self.nprocs_to_test[0]:
err_msg += "nprocs: %s != nprocs_to_test: %s\n" % (nprocs, self.nprocs_to_test[0])
if nprocs in self.exclude_nprocs:
err_msg += "nprocs: %s in exclude_nprocs: %s\n" % (nprocs, self.exclude_nprocs)
if err_msg:
real_nprocs = 0
else:
real_nprocs = min(self.max_nprocs, nprocs)
#if err_msg: print(err_msg)
return real_nprocs, err_msg
def skip_host(self):
"""
Return True if the test should be skipped since we are running on a banned host.
"""
compilers, slaves = [], []
for s in self.exclude_hosts:
compiler, host = None, s
if "@" in s:
compiler, host = s.split("@")
else:
# TODO: validate TEST_INFO at the level of the parser.
warnings.warn("Wrong string %s in exclude_hosts" % s)
compilers.append(compiler)
slaves.append(host)
# Find the slave and compare the name of the compiler.
try:
idx = slaves.index(self.build_env.hostname)
except ValueError:
return False
return compilers[idx] == self.build_env.fortran_compiler.name
def run(self, build_env, runner, workdir, nprocs=1, runmode="static", **kwargs):
"""
Run the test with nprocs MPI nodes in the build environment build_env using the `JobRunner` runner.
Results are produced in directory workdir. kwargs is used to pass additional options
kwargs:
pedantic
erase_file
make_html_diff
sub_timeout
.. warning:
This method must be thread-safe, DO NOT change build_env or runner.
"""
import copy
runner = copy.deepcopy(runner)
start_time = time.time()
# Mark tests as failed if stderr is not empty.
self.pedantic = kwargs.get("pedantic", False)
workdir = os.path.abspath(workdir)
if not os.path.exists(workdir): os.mkdir(workdir)
self.workdir = workdir
self.build_env = build_env
self.exceptions = []
self.fld_isok = True # False if at least one file comparison fails.
# Extract options from kwargs
self.erase_files = kwargs.get("erase_files", self.erase_files)
self.make_html_diff = kwargs.get("make_html_diff", self.make_html_diff)
self.sub_timeout = kwargs.get("sub_timeout", self.sub_timeout)
timeout = self.sub_timeout
if self.build_env.has_bin("timeout") and timeout > 0.0:
exec_path = self.build_env.path_of_bin("timeout")
self.timebomb = TimeBomb(timeout, delay=0.05, exec_path = exec_path)
else:
self.timebomb = TimeBomb(timeout, delay=0.05)
str_colorizer = StringColorizer(sys.stdout)
status2txtcolor = {
"succeeded": lambda string: str_colorizer(string, "green"),
"passed": lambda string: str_colorizer(string, "blue"),
"failed": lambda string: str_colorizer(string, "red"),
"disabled": lambda string: str_colorizer(string, "cyan"),
"skipped": lambda string: str_colorizer(string, "cyan"),
}
# Check whether the test can be executed.
can_run = True
if self._status == "disabled":
msg = self.full_id + ": Disabled"
can_run = False
print(status2txtcolor[self._status](msg))
# Here we get the number of MPI nodes for test.
self.nprocs, self.skip_msg = self.compute_nprocs(self.build_env, nprocs, runmode=runmode)
if self.skip_msg:
self._status = "skipped"
msg = self.full_id + ": Skipped: " + self.skip_msg
print(status2txtcolor[self._status](msg))
can_run = False
if self.skip_host():
self._status = "skipped"
msg = self.full_id + ": Skipped: hostname exception"
print(status2txtcolor[self._status](msg))
can_run = False
self.run_etime = 0.0
if can_run:
# Execute pre_commands in workdir.
rshell = RestrictedShell(self.inp_dir, self.workdir, self.abenv.psps_dir)
for cmd_str in self.pre_commands:
rshell.execute(cmd_str)
if rshell.exceptions:
self.exceptions.extend(rshell.exceptions)
rshell.empty_exceptions()
# Copy extra inputs in workdir (if any).
self.get_extra_inputs()
# Create stdin file in the workdir.
self.stdin_fname = os.path.join(workdir, self.id + ".stdin")
self.stdout_fname = os.path.join(workdir, self.id + ".stdout")
self.stderr_fname = os.path.join(workdir, self.id + ".stderr")
self.keep_files([self.stdin_fname, self.stdout_fname, self.stderr_fname])
# Create input file.
t_stdin = self.make_stdin()
with open(self.stdin_fname, "w") as fh:
fh.writelines(t_stdin)
# Run the code (run_etime is the wall time spent to execute the test)
if runner.has_mpirun:
bin_path = self.cygwin_bin_path
else:
bin_path = self.bin_path
self.run_etime = runner.run(self.nprocs, bin_path,
self.stdin_fname, self.stdout_fname, self.stderr_fname,
cwd=workdir)
# Save exceptions (if any).
if runner.exceptions:
self.exceptions.extend(runner.exceptions)
if not self.expected_failure:
for exc in runner.exceptions: print(exc)
# Execute post_commands in workdir.
for cmd_str in self.post_commands:
rshell.execute(cmd_str)
# Save exceptions (if any).
if rshell.exceptions:
self.exceptions.extend(rshell.exceptions)
rshell.empty_exceptions()
# Check final results:
# 1) use fldiff to compare ref and output files.
# 2) fldiff stdout is redirected to fldiff_fname.
for f in self.files_to_test:
fldiff_fname = os.path.join(self.workdir, f.name + ".fldiff")
self.keep_files(fldiff_fname)
with open(fldiff_fname,"w") as fh:
f.fldiff_fname = fldiff_fname
isok, status, msg = f.compare(self.abenv.fldiff_path, self.ref_dir, self.workdir,
timebomb=self.timebomb, outf=fh)
self.keep_files(os.path.join(self.workdir, f.name))
self.fld_isok = self.fld_isok and isok
msg = ": ".join([self.full_id, msg])
print(status2txtcolor[status](msg))
# Check if the test is expected to fail.
if runner.retcode != 0 and not self.expected_failure:
self._status = "failed"
msg = (self.full_id + "Test was not expected to fail but subrocesses returned %s" % runner.retcode)
print(status2txtcolor["failed"](msg))
# If pedantic, stderr must be empty unless the test is expected to fail!
if self.pedantic and not self.expected_failure:
try:
errout = self.stderr_read()
if errout:
# TODO: Not very clean, I should introduce a new status and a setter method.
self._status = "failed"
except Exception as exc:
self.exceptions.append(exc)
# Check stderr for presence of valgrind errors.
if runner.has_valgrind:
try:
# Build a parser from the command line options and parse the stderr.
parser = runner.build_valgrind_parser()
parser.parse(self.stderr_fname)
if parser.error_report:
# TODO: Not very clean, I should introduce a new status and a setter method.
self._status = "failed"
msg = " ".join([self.full_id, "VALGRIND ERROR:", parser.error_report])
print(status2txtcolor["failed"](msg))
except Exception as exc:
# Py threads do not like exceptions.
# Store the exception and continue.
self.exceptions.append(exc)
if self.status == "failed":
# Print the first line of the stderr if it's not empty.
try:
errout= self.stderr_read()
if errout:
print(status2txtcolor["failed"](errout))
except Exception as exc:
self.exceptions.append(exc)
self._executed = True
self.tot_etime = time.time() - start_time
def clean_workdir(self, other_test_files=None):
"""Remove the files produced in self.workdir."""
assert self._executed
if not os.path.exists(self.workdir) or self.erase_files == 0: return
save_files = self._files_to_keep[:]
if other_test_files is not None: save_files += other_test_files
# Add harcoded list of files
hard_files = ["perf.data"]
save_files += [os.path.join(self.workdir, f) for f in hard_files]
if (self.erase_files == 1 and self.isok) or self.erase_files == 2:
entries = [os.path.join(self.workdir, e) for e in os.listdir(self.workdir)]
for entry in entries:
if entry in save_files: continue
if os.path.isfile(entry):
os.remove(entry)
else:
raise NotImplementedError("Found directory: %s in workdir!!" % entry)
def patch(self, patcher=None):
"""
Patch the output files of the test with the specified patcher.
A default patcher is provided if patcher is None (use $PATCHER shell variable)
"""
assert self._executed
for f in self.files_to_test:
ref_fname = os.path.abspath(os.path.join(self.ref_dir, f.name))
out_fname = os.path.abspath(os.path.join(self.workdir,f.name) )
raise NotImplementedError("patcher should be tested")
from tests.pymods import Patcher
Patcher(patcher).patch(out_fname, ref_fname)
def make_html_diff_files(self):
"""Generate and write diff files in HTML format."""
assert self._executed
if (self.make_html_diff == 0 or
self._status in ["disabled", "skipped"]): return
diffpy = self.abenv.apath_of("tests", "pymods", "diff.py")
for f in self.files_to_test:
if f.fld_isok and self.make_html_diff == 1:
continue
ref_fname = os.path.abspath(os.path.join(self.ref_dir, f.name))
if not os.path.isfile(ref_fname) and ref_fname.endswith(".stdout"):
ref_fname = ref_fname[:-7] + ".out" # FIXME Hack due to the stdout-out ambiguity
out_fname = os.path.abspath(os.path.join(self.workdir,f.name))
# Check whether output and ref file exist.
out_exists = os.path.isfile(out_fname)
ref_exists = os.path.isfile(ref_fname)
hdiff_fname = os.path.abspath(os.path.join(self.workdir, f.name+".diff.html"))
f.hdiff_fname = hdiff_fname
x, ext = os.path.splitext(f.name)
safe_hdiff = ext in [".out", ".stdout"] # Create HTML diff file only for these files
if ref_exists and out_exists and safe_hdiff:
out_opt = "-u"
#out_opt = "-t" # For simple HTML table. (can get stuck)
#args = ["python", diffpy, out_opt, "-f " + hdiff_fname, out_fname, ref_fname ]
args = [diffpy, out_opt, "-f " + hdiff_fname, out_fname, ref_fname ]
cmd = " ".join(args)
#print("Diff", cmd)
p, ret_code = self.timebomb.run(cmd, shell=True, cwd=self.workdir)
if ret_code != 0:
err_msg = "Timeout error (%s s) while executing %s, retcode = %s" % (
self.timebomb.timeout, str(args), ret_code)
self.exceptions.append(self.Error(err_msg))
else:
self.keep_files(hdiff_fname)
def make_txt_diff_files(self):
"""Generate and write diff files in txt format."""
assert self._executed
if self._status in ["disabled", "skipped"]:
return
#print(self._status)
#if self._status not in ["failed", "passed"]:
# return
diffpy = self.abenv.apath_of("tests", "pymods", "diff.py")
for f in self.files_to_test:
#print(f, f.fld_isok)
if f.fld_isok:
continue
ref_fname = os.path.abspath(os.path.join(self.ref_dir, f.name))
if not os.path.isfile(ref_fname) and ref_fname.endswith(".stdout"):
ref_fname = ref_fname[:-7] + ".out" # FIXME Hack due to the stdout-out ambiguity
out_fname = os.path.abspath(os.path.join(self.workdir, f.name))
# Check whether output and ref file exist.
out_exists = os.path.isfile(out_fname)
ref_exists = os.path.isfile(ref_fname)
diff_fname = os.path.abspath(os.path.join(self.workdir, f.name + ".diff"))
f.diff_fname = diff_fname
x, ext = os.path.splitext(f.name)
if ref_exists and out_exists:
# n is for ndiff format, c for context, u for unified
#for out_opt in ["-n", "-c"]:
#out_opt = "-n"
#out_opt = "-c"
out_opt = "-u"
args = [diffpy, out_opt, "-f " + diff_fname, out_fname, ref_fname]
cmd = " ".join(args)
(p, ret_code) = self.timebomb.run(cmd, shell=True, cwd=self.workdir)
if ret_code != 0:
err_msg = "Timeout error (%s s) while executing %s, retcode = %s" % (
self.timebomb.timeout, str(args), ret_code)
self.exceptions.append(self.Error(err_msg))
else:
self.keep_files(diff_fname)
def write_html_report(self, fh=None, oc="oc"):
"""Write the HTML file summarizing the results of the test."""
assert self._executed
close_fh = False
if fh is None:
close_fh = True
html_report = os.path.join(self.workdir, "test_report.html")
fh = open(html_report, "w")
self.keep_files(fh.name)
self.make_html_diff_files()
self.make_txt_diff_files()
# Try to read stdout and stderr.
# Ignore errors (fock takes years to flush the stdout)
stdout_text, stderr_text = 2*("",)
nlast = 120
if not self.fld_isok:
try:
stderr_text = str2html(self.stderr_read())
stdout_text = str2html(tail_file(self.stdout_fname, nlast))
except:
pass
##################################################
# Document Name Space that serves as the substitution
# namespace for instantiating a doc template.
try:
username = os.getlogin()
except:
username = "No_username"
DNS = {
"self": self,
"page_title": "page_title",
"user_name": username,
"hostname": gethostname(),
"Headings": ['File_to_test', 'Status', 'fld_output', 'fld_options', 'txt_diff', 'html_diff',] ,
"nlast": nlast,
"stderr_text": stderr_text,
"stdout_text": stdout_text,
# Functions and modules available in the template.
"time": time,
"pj": os.path.join,
"basename": os.path.basename,
"str2html": str2html,
"sec2str": sec2str,
"args2htmltr": args2htmltr,
"html_link" : html_link,
"status2html": status2html
}
header = """
<html>
<head><title>$page_title</title></head>
<body bgcolor="#FFFFFF" text="#000000">
"""
if self.status in ["skipped", "disabled"]:
if self.status == "skipped":
template = str2html(self.skip_msg)
else:
template = "This test has been disabled!"
else:
template = """
<hr>
<h1>Results of test ${self.full_id}</h1>
MPI nprocs = ${self.nprocs},
run_etime = ${sec2str(self.run_etime)} s,
tot_etime = ${sec2str(self.tot_etime)} s
<br>
${html_link("stdin", basename(self.stdin_fname))},
${html_link("stdout", basename(self.stdout_fname))},
${html_link("stderr", basename(self.stderr_fname))}
<p>
<table width="100%" border="0" cellspacing="0" cellpadding="2">
<tr valign="top" align="left">
<py-open code = "for h in Headings:"> </py-open>
<th>${h}</th>
<py-close/>
</tr>
<py-open>for idx, f in enumerate(self.files_to_test):</py-open>
<tr valign="top" align="left">
<py-line code = "fld_link = html_link(basename(f.fldiff_fname))"/>
<py-line code = "txt_diff_link = html_link(basename(f.diff_fname))"/>
<py-line code = "html_diff_link = html_link(basename(f.hdiff_fname))"/>
<py-line code = "tab_row = args2htmltr(f.name, status2html(f.fld_status), fld_link, f.fld_options, txt_diff_link, html_diff_link)"/>
${tab_row}
</tr>
<py-close/>
</table>
<py-open>for idx, f in enumerate(self.files_to_test):</py-open>
<py-open code="if f.fld_status != 'succeeded':"/>
<p> ${f.name} ${f.fld_msg} </p>
<py-close/>
<py-open code="if not self.fld_isok:"/>
<py-open code="if self.exceptions:"/>
<hr><p>
<h1>Exceptions raised at run-time:</h1>
<py-open code="for idx, e in enumerate(self.exceptions):"/>
<p> $idx) ${str2html(str(e))}</p>
<py-close/>
<br>
<py-close/>
<hr><p>
<h1>Standard Error of test ${self.id}:</h1>
${stderr_text}
<hr><p>
<h1>Standard output of test ${self.id} (last ${nlast} lines):</h1>
${stdout_text}
<br>
<py-close/>
<p>
<h3>Extra Information</h3>
<py-line code = "authors = ', '.join([a for a in self.authors])" />
<p>Authors = ${authors}</p>
<py-line code = "keys = ', '.join([k for k in self.keywords])" />
<p>Keywords = ${keys}</p>
<p>${self.listoftests(abslink=False)}</p>
"""
footer = """
<hr>
Automatically generated by %s on %s. Logged on as %s@%s
<hr>
</body>
</html> """ % (_MY_NAME, time.asctime(), username, gethostname())
if "o" in oc: template = header + template
if "c" in oc: template += footer
# Set a file-like object to template
template_stream = StringIO(template)
# Initialise an xyaptu xcopier, and call xcopy
xcp = xcopier(DNS, ouf=fh)
xcp.xcopy(template_stream)
if close_fh: fh.close()
def _get_one_backtrace(self):
return NagBacktrace(self.stderr_readlines())
def get_backtraces(self):
return [self._get_one_backtrace()]
#############################################################################################################
# Subclasses needed to handle the different executables
#############################################################################################################
class AbinitTest(BaseTest):
"""
Class for Abinit tests. Redefine the make_stdin method of BaseTest
"""
def make_stdin(self):
t_stdin = StringIO()
inp_fname = self.cygwin_path(self.inp_fname)
# Use the basename instead of the absolute path because the input has been already copied
# and we might want to change it especially if we are debugging the code
inp_fname = os.path.basename(inp_fname)
t_stdin.write(inp_fname + "\n")
out_fname = self.id + ".out"
t_stdin.write(out_fname + "\n")
# Prefix for input-output-temporary files
if self.input_prefix:
i_prefix = self.input_prefix
else:
i_prefix = self.id + "i"
# Prefix for input-output-temporary files
if self.output_prefix:
o_prefix = self.output_prefix
else:
o_prefix = self.id + "o"
t_prefix = self.id #+ "t"
t_stdin.writelines([l + "\n" for l in [i_prefix, o_prefix, t_prefix]])
# Path to the pseudopotential files.
# 1) pp files are searched in pspd_dir first then in workdir.
psp_paths = [os.path.join(self.abenv.psps_dir, pname) for pname in self.psp_files]
for idx, psp in enumerate(psp_paths):
if not os.path.isfile(psp):
pname = os.path.join(self.workdir, os.path.basename(psp))
if os.path.isfile(pname):
# Use local pseudo.
psp_paths[idx] = pname
else:
err_msg = "Cannot find pp file %s, neither in Psps_for_tests nor in self.workdir" % pname
self.exceptions.append(self.Error(err_msg))
psp_paths = [self.cygwin_path(p) for p in psp_paths] # Cygwin
t_stdin.writelines([p + "\n" for p in psp_paths])
return t_stdin.getvalue()
class AnaddbTest(BaseTest):
"""
Class for Anaddb tests. Redefine the make_stdin method of BaseTest
"""
def make_stdin(self):
t_stdin = StringIO()
inp_fname = self.cygwin_path(self.inp_fname) # cygwin
t_stdin.write( inp_fname + "\n") # 1) formatted input file
t_stdin.write( self.id + ".out" + "\n") # 2) formatted output file e.g. t13.out
iddb_fname = self.id + ".ddb.in"
if self.input_ddb:
iddb_fname = os.path.join(self.workdir, self.input_ddb) # Use output DDB of a previous run.
if not os.path.isfile(iddb_fname):
self.exceptions.append(self.Error("%s no such DDB file: " % iddb_fname))
iddb_fname = self.cygwin_path(iddb_fname) # cygwin
t_stdin.write( iddb_fname + "\n") # 3) input derivative database e.g. t13.ddb.in
t_stdin.write( self.id + ".md" + "\n") # 4) output molecular dynamics e.g. t13.md
input_gkk = self.id + ".gkk"
if self.input_gkk:
input_gkk = os.path.join(self.workdir, self.input_gkk) # Use output GKK of a previous run.
if not os.path.isfile(input_gkk):
self.exceptions.append(self.Error("%s no such GKK file: " % input_gkk) )
input_gkk = self.cygwin_path(input_gkk) # cygwin
t_stdin.write(input_gkk + "\n") # 5) input elphon matrix elements (GKK file) :
t_stdin.write(self.id + "\n") # 6) base name for elphon output files e.g. t13
input_ddk = self.id + ".ddk"
if not os.path.isfile(input_ddk): # Try in input directory:
input_ddk = os.path.join(self.inp_dir, input_ddk)
# FIXME: Someone has to rewrite the treatment of the anaddb files file
input_ddk = self.cygwin_path(input_ddk)
t_stdin.write(input_ddk + "\n") # 7) file containing ddk filenames for elphon/transport :
return t_stdin.getvalue()
class AimTest(BaseTest):
"""
Class for Aim tests. Redefine the make_stdin method of BaseTest
"""
def make_stdin(self):
t_stdin = StringIO()
inp_fname = self.cygwin_path(self.inp_fname)
t_stdin.write(inp_fname + "\n") # formatted input file e.g. .../Input/t57.in
iden_fname = self.id + "i_DEN"
t_stdin.write(iden_fname + "\n") # input density e.g. t57i_DEN
t_stdin.write(self.id + "\n") # t57
# Path to the pseudopotential files.
psp_paths = [os.path.join(self.abenv.psps_dir, pname) for pname in self.psp_files]
psp_paths = [self.cygwin_path(p) for p in psp_paths] # Cygwin
t_stdin.writelines([p + "\n" for p in psp_paths])
return t_stdin.getvalue()
class ConductiTest(BaseTest):
"""
Class for Conducti tests. Redefine the make_stdin method of BaseTest
"""
def make_stdin(self):
t_stdin = StringIO()
inp_fname = self.cygwin_path(self.inp_fname)
t_stdin.write(inp_fname + "\n") # formatted input file e.g. .../Input/t57.in
t_stdin.write(self.id + "\n") # will be used as the prefix of the log file names e.g. t57
return t_stdin.getvalue()
class OpticTest(BaseTest):
"""
Class for Optic tests. Redefine the make_stdin method of BaseTest
"""
def make_stdin(self):
t_stdin = StringIO()
inp_fname = self.cygwin_path(self.inp_fname)
t_stdin.write( inp_fname + "\n") # optic input file e.g. .../Input/t57.in
t_stdin.write(self.id + ".out\n") # Output. e.g t57.out
t_stdin.write(self.id + "\n") # Used as suffix to diff and prefix to log file names,
# and also for roots for temporaries
return t_stdin.getvalue()
class Band2epsTest(BaseTest):
"""How to waste lines of code just to test a F90 code that can be implemented with a few python commands!"""
def make_stdin(self):
t_stdin = StringIO()
inp_fname = self.cygwin_path(self.inp_fname)
t_stdin.write( inp_fname + "\n") # input file e.g. .../Input/t51.in
t_stdin.write( self.id + ".out.eps\n") # output file e.g. t51.out.eps
inp_freq = os.path.join(self.inp_dir, self.id + ".in_freq")
inp_freq = self.cygwin_path(inp_freq)
t_stdin.write(inp_freq + "\n") # input freq file e.g Input/t51.in_freq
inp_displ = os.path.join(self.inp_dir, self.id + ".in_displ")
if not os.path.isfile(inp_displ):
inp_displ = "no"
else:
inp_displ = self.cygwin_path(inp_displ)
t_stdin.write(inp_displ + "\n") # input displ file e.g Input/t51.in_displ
return t_stdin.getvalue()
class AtompawTest(BaseTest):
"""
Class for Atompaw tests. Redefine the methods clean_workdir and bin_path provided by BaseTest
"""
def clean_workdir(self, other_test_files=None):
"""Keep all atompaw output files."""
@property
def bin_path(self):
"""atompaw is not located in src/98_main"""
return self.build_env.path_of_bin("atompaw")
def exec2class(exec_name):
"""
Return the test class associated to the executable. Default is BaseTest.
"""
return {
"abinit": AbinitTest,
"anaddb": AnaddbTest,
"aim": AimTest,
"conducti": ConductiTest,
"atompaw": AtompawTest,
"band2eps": Band2epsTest,
"optic": OpticTest,
}.get(exec_name, BaseTest)
class ChainOfTests(object):
"""
A list of tests that should be executed together due to inter-dependencies.
It provides the same interface as the one given by BaseTest
"""
Error = BaseTestError
def __init__(self, tests):
self.tests = tuple([t for t in tests])
self.inp_dir = tests[0].inp_dir
self.suite_name = tests[0].suite_name
#
# Consistency check.
for t in tests:
if self.inp_dir != t.inp_dir or self.suite_name != t.suite_name:
raise self.Error("All tests should be located in the same directory")
all_keys = [t.keywords for t in self.tests]
self.keywords = set()
for ks in all_keys:
self.keywords = self.keywords.union(ks)
all_cpp_vars = [t.need_cpp_vars for t in self.tests]
self.need_cpp_vars = set()
for vs in all_cpp_vars:
self.need_cpp_vars = self.need_cpp_vars.union(vs)
self._files_to_keep = []
def __len__(self):
return len(self.tests)
def __str__(self):
return "\n".join([ str(t) for t in self ])
def __iter__(self):
for t in self.tests: yield t
def info_on_chain(self):
attr_names = ["extra_inputs", "pre_commands", "post_commands"]
string = "Info on chain: %s\n" % self.full_id
nlinks = 0
for test in self:
string += test.full_id + "executable " + test.executable + ":\n"
for (attr, value) in test.__dict__.items():
if (value and (attr in attr_names or
attr.startswith("input_") or attr.startswith("output_"))):
string += " %s = %s\n" % (attr, value)
nlinks += 1
return string, nlinks
# A lot of boilerplate code!
# See the doc strings of BaseTest
@property
def id(self):
return "-".join([test.id for test in self])
@property
def full_id(self):
return "["+self.suite_name+"]["+self.id+"]"
@property
def max_nprocs(self):
return max([test.max_nprocs for test in self])
@property
def _executed(self):
return all([test._executed for test in self])
@property
def ref_dir(self):
ref_dirs = [test.ref_dir for test in self]
assert all([dir == ref_dirs[0] for dir in ref_dirs])
return ref_dirs[0]
def listoftests(self, width=100, html=True, abslink=True):
string = ""
if not html:
string += "\n".join( [test.listoftests(width, html, abslink) for test in self] )
string = self.full_id + ":\n" + string
else:
string += "<br>".join( [test.listoftests(width, html, abslink) for test in self] )
string = "Test Chain " + self.full_id + ":<br>" + string
return string
@property
def files_to_test(self):
files = []
for test in self: files.extend(test.files_to_test)
return files
@property
def extra_inputs(self):
extra_inputs = []
for test in self: extra_inputs.extend(test.extra_inputs)
return extra_inputs
@property
def inputs_used(self):
inputs = []
for test in self: inputs.extend(test.inputs_used)
return inputs
@property
def run_etime(self):
return sum([test.run_etime for test in self])
@property
def tot_etime(self):
return sum([test.tot_etime for test in self])
@property
def isok(self):
return all([test.isok for test in self])
@property
def exceptions(self):
excs = []
for test in self:
excs.extend(test.exceptions)
return excs
@property
def status(self):
_stats = [test._status for test in self]
if "disabled" in _stats or "skipped" in _stats:
if any([s != _stats[0] for s in _stats]):
#print(self)
#print("WARNING, expecting all(s == _stats[0] but got\n %s" % str(_stats))
return "failed"
return _stats[0]
all_fldstats = [f.fld_status for f in self.files_to_test]
if "failed" in all_fldstats: return "failed"
if "passed" in all_fldstats: return "passed"
if any([s != "succeeded" for s in all_fldstats]):
print(self)
print("WARNING, expecting all(s == 'succeeded' but got\n %s" % str(all_fldstats))
return "failed"
return "succeeded"
def keep_files(self, files):
if is_string(files):
self._files_to_keep.append(files)
else:
self._files_to_keep.extend(files)
@property
def files_to_keep(self):
# The files produced by the individual tests.
files_of_tests = []
for test in self:
files_of_tests.extend(test.files_to_keep)
# Add the files produced by self.
self._files_to_keep += files_of_tests
return self._files_to_keep
def cpkl_dump(self, protocol=-1):
self.cpkl_fname = os.path.join(self.workdir, self.id + ".cpkl")
with open(self.cpkl_fname, "wb") as fh:
pickle.dump(self, fh, protocol=protocol)
self.files_to_keep.append(self.cpkl_fname)
def has_keywords(self, keywords, mode="any"):
if mode == "all":
return set(keywords).issubset(self.keywords)
elif mode == "any":
return set(keywords).intersection(self.keywords)
else:
raise ValueError("wrong mode %s" % mode)
def has_variables(self, ivars):
for test in self:
matches = test.has_variables(ivars)
if matches: break
return matches
def edit_input(self, editor=None):
if editor is None: editor = Editor()
for test in self:
try:
test.edit_input(editor=editor)
except:
raise
@property
def _authors_snames(self):
snames = set()
for test in self:
snames = snames.union(test._authors_snames)
return snames
def has_authors(self, authors, mode="any"):
#return set(authors).issubset(self._authors_snames)
if mode == "all":
return set(authors).issubset(self._authors_snames)
elif mode == "any":
return set(authors).intersection(self._authors_snames)
else:
raise ValueError("wrong mode %s" % mode)
def write_html_report(self):
html_report = os.path.join(self.workdir, "test_report.html")
with open(html_report, "w") as fh:
for idx, test in enumerate(self):
oc = ""
if idx == 0: oc += "o"
if idx == (len(self)-1): oc += "c"
test.write_html_report(fh=fh, oc=oc)
def run(self, build_env, runner, workdir, nprocs=1, **kwargs):
workdir = os.path.abspath(workdir)
if not os.path.exists(workdir): os.mkdir(workdir)
self.workdir = workdir
for test in self:
test.run(build_env, runner, workdir=self.workdir, nprocs=nprocs, **kwargs)
def clean_workdir(self, other_test_files=None):
for test in self:
test.clean_workdir(other_test_files=self.files_to_keep)
def patch(self, patcher=None):
for test in self:
test.patch(patcher)
def get_backtraces(self):
return [test._get_one_backtrace() for test in self]
class AbinitTestSuite(object):
"""
List of BaseTest instances. Provide methods to:
1) select subset of tests according to keywords, authors, numbers
2) run tests in parallel with python threads
3) analyze the final results
"""
def __init__(self, abenv, inp_files=None, test_list=None, keywords=None, need_cpp_vars=None):
# Check arguments.
args = [inp_files, test_list]
no_of_notnone = [arg is not None for arg in args].count(True)
if no_of_notnone != 1:
raise ValueError("Wrong args: " + str(args))
self._executed = False
self.abenv = abenv
self.exceptions = []
if inp_files is not None:
self.tests = make_abitests_from_inputs(
inp_files, abenv,
keywords=keywords, need_cpp_vars=need_cpp_vars)
elif test_list is not None:
assert keywords is None
assert need_cpp_vars is None
self.tests = tuple(test_list)
else:
raise ValueError("Either inp_files or test_list must be specified!")
def __str__(self):
return "\n".join([str(t) for t in self.tests])
def __add__(self, other):
test_list = [t for t in self] + [t for t in other]
return self.__class__(self.abenv, test_list=test_list)
def __len__(self):
return len(self.tests)
def __iter__(self):
for t in self.tests:
yield t
def __getitem__(self, key): # FIXME: this won't work for tutorial, paral and other test suites.
"""Called by self[key]."""
if isinstance(key, slice):
return self.__getslice(key)
else:
raise NotImplementedError("__getitem__ expects a slice instance")
def __getslice(self, slice):
start = slice.start
if start is None: start = 0
stop = slice.stop
if stop is None: stop = 10000 # Not very elegant, but cannot use len(self) since indices are not contiguous
assert slice.step is None # Slices with steps (e.g. [1:4:2]) are not supported.
# Rules for the test id:
# Simple case: t01, tgw1_1
# test chain (no MPI): t81-t82-t83-t84, tudet_1-tudet_2-tudet_3
# multi-parallel tests: t74_MPI2, t51_MPI1-t52_MPI1-t53_MPI1, tdfpt_01_MPI2 ...
test_list = []
for test in self:
#print("ID",test.id)
# extract the ID of the first test (if test_chain)
tokens = test.id.split("-")
assert tokens[0][0] == "t" # Assume first character is "t"
num = tokens[0][1:]
if "_MPI" in test.id:
# Handle multi-parallel tests.
#print(test.id)
idx = test.id.find("_MPI")
tok = test.id[1:idx]
#print(tok)
idx = tok.rfind("_")
if idx != -1:
# Handle tdfpt_01_MPI2 ...
# FIXME: this will fail if _OMP2_MPI2
tok = tok[idx+1:]
try:
num = int(tok)
except ValueError:
raise ValueError("Cannot convert %s to integer" % tok)
else:
# Simple case or test_chain
idx = num.rfind("_")
if idx != -1:
num = int(num[idx+1:])
num = int(num)
if num in range(start, stop):
#print "got", test.id
test_list.append(test)
return self.__class__(self.abenv, test_list=test_list)
@property
def full_length(self):
one = lambda : 1
return sum([getattr(test, "__len__", one)() for test in self])
@property
def run_etime(self):
"""Total elapsed time i.e. the wall-time spent in the sub-processes e.g. abinit)"""
assert self._executed
return sum([test.run_etime for test in self])
@property
def keywords(self):
keys = []
for test in self: keys.extend(test.keywords)
return set(keys)
def has_keywords(self, keywords):
return set(keywords).issubset(self.keywords)
@property
def need_cpp_vars(self):
vars = []
for test in self: vars.extend(test.need_cpp_vars)
return set(vars)
def on_refslave(self):
"""True if we are running on a reference slave e.g. testf."""
try:
return self._on_ref_slave
except AttributeError:
return False
def set_on_refslave(self, value=True):
"""Attribute setter"""
self._on_ref_slave = bool(value)
def all_exceptions(self):
"""Return my exceptions + test exceptions."""
all_excs = self.exceptions
for test in self:
all_excs.extend(test.exceptions)
return all_excs
def cpkl_dump(self, protocol=-1):
self.cpkl_fname = os.path.join(self.workdir, "test_suite.cpkl")
with open(self.cpkl_fname, "wb") as fh:
pickle.dump(self, fh, protocol=protocol)
def _tests_with_status(self, status):
assert self._executed
assert status in BaseTest._possible_status
return [test for test in self if test.status == status]
def succeeded_tests(self): return self._tests_with_status("succeeded")
def passed_tests(self): return self._tests_with_status("passed")
def failed_tests(self): return self._tests_with_status("failed")
def skipped_tests(self): return self._tests_with_status("skipped")
def disabled_tests(self): return self._tests_with_status("disabled")
@property
def targz_fname(self):
"""
Location of the tarball file with the results in HTML format
Returns None if the tarball has not been created.
"""
try:
return self._targz_fname
except:
return None
def create_targz_results(self):
"""Create the tarball file results.tar.gz in the working directory."""
assert self._executed
exclude_exts = [".cpkl", ".py", "pyc", ]
self._targz_fname = None
ofname = os.path.join(self.workdir,"results.tar.gz")
# The most delicate part here is the treatment of the exceptions
# since the test might not have produced the reference files
# we want to copy to the server. If something goes wrong, we simply
# register the exception and we continue the execution.
try:
targz = tarfile.open(ofname, "w:gz")
for test in self:
# Don't try to collect files for tests that are disabled or skipped.
if test.status in ["disabled", "skipped"]:
continue
files = set(test.files_to_keep)
save_files = [f for f in files if not has_exts(f, exclude_exts)]
#print(save_files)
# Store stdout files only if the test failed.
important_status = ["failed",]
# Special treatment for reference machines
if self.on_refslave:
important_status = ["passed", "failed",]
if test.status not in important_status:
if isinstance(test, ChainOfTests):
for t in test:
#print "Removing Test Chain", t.stdout_fname
save_files.remove(t.stdout_fname)
else:
#print "Removing", test.stdout_fname
save_files.remove(test.stdout_fname)
for p in save_files:
#if not os.path.exists(os.path.join(self.workdir, p)):
# continue
try:
# /foo/bar/suite_workdir/test_workdir/file --> test_workdir/t01/file
rpath = os.path.relpath(p, start=self.workdir)
#print "Adding to tarball:\n", p," with arcname ", rpath
#p = p.encode("ascii", "ignore")
#print(p)
targz.add(p, arcname=rpath.encode("ascii", "ignore"))
except:
# Handle the case in which the output file has not been produced.
exc = sys.exc_info()[1]
warnings.warn("exception while creating tarball file: %s" % str(exc))
self.exceptions.append(exc)
targz.close()
# Save the name of the tarball file.
self._targz_fname = ofname
except:
exc = sys.exc_info()[1]
warnings.warn("exception while creating tarball file: %s" % str(exc))
self.exceptions.append(exc)
def sanity_check(self):
all_full_ids = [test.full_id for test in self]
if len(all_full_ids) != len(set(all_full_ids)):
raise ValueError("Cannot have more than two tests with the same full_id")
def run_tests(self, build_env, workdir, runner, nprocs=1, nthreads=1, runmode="static", **kwargs):
"""
Execute the list of tests (main entry point for client code)
Args:
build_env:
`BuildEnv` instance with info on the build environment.
workdir:
Working directory (string)
runner:
`JobRunner` instance
nprocs:
number of MPI processes to use for a single test.
nthreads:
number of OpenMP threads for tests
"""
self.sanity_check()
if len(self) == 0:
warnings.warn("len(self) == 0")
return
workdir = os.path.abspath(workdir)
if not os.path.exists(workdir): os.mkdir(workdir)
self.workdir = workdir
# Acquire the lock file.
self.lock = FileLock(os.path.join(self.workdir,"__run_tests_lock__"), timeout=3)
try:
self.lock.acquire()
except self.lock.Error:
msg = ("Timeout occured while trying to acquire the directory lock in %s.\n " % self.workdir +
"Remove directory with `rm -rf` and rerun")
warnings.warn(msg)
return
# Remove all stale files present in workdir (except the lock!)
rmrf(self.workdir, exclude_paths=self.lock.lockfile)
self.nprocs = nprocs
self.nthreads = nthreads
def run_and_check_test(test):
"""Helper function to execute the test. Must be thread-safe."""
testdir = os.path.abspath(os.path.join(self.workdir, test.suite_name + "_" + test.id))
#print(" testsuite.py (line 2445) run_and_check_test : %s Running in %s" % (test.full_id, testdir))
# Run the test
test.run(build_env, runner, testdir, nprocs=nprocs, runmode=runmode, **kwargs)
# Write HTML summary
test.write_html_report()
# Dump the object with pickle.
test.cpkl_dump()
# Remove useless files in workdir.
test.clean_workdir()
##############################
# And now let's run the tests
##############################
start_time = time.time()
if nthreads == 1:
logger.info("Sequential version")
for test in self:
run_and_check_test(test)
elif nthreads > 1:
logger.info("Threaded version with nthreads = %s" % nthreads)
from threading import Thread
# Internal replacement that provides task_done, join_with_timeout (py2.4 compliant)
from pymods.myqueue import QueueWithTimeout
def worker():
while True:
test = q.get()
run_and_check_test(test)
q.task_done()
q = QueueWithTimeout()
for i in range(nthreads):
t = Thread(target=worker)
t.setDaemon(True)
t.start()
for test in self: q.put(test)
# Block until all tasks are done. Raise QueueTimeoutError after timeout seconds.
timeout_1test = float(runner.timebomb.timeout)
if timeout_1test <= 0.1: timeout_1test = 240.
queue_timeout = 1.3 * timeout_1test * self.full_length / float(nthreads)
q.join_with_timeout(queue_timeout)
# Run completed.
self._executed = True
# Collect HTML files in a tarball
self.create_targz_results()
nsucc = len(self.succeeded_tests())
npass = len(self.passed_tests())
nfail = len(self.failed_tests())
nskip = len(self.skipped_tests())
ndisa = len(self.disabled_tests())
ntot = len(self)
self.tot_etime = time.time() - start_time
mean_etime = sum([test.run_etime for test in self]) / len(self)
dev_etime = (sum([(test.run_etime - mean_etime)**2 for test in self]) / len(self))**0.5
print("Test suite completed in %.2f s (average time for test = %.2f s, stdev = %.2f s)" % (
self.tot_etime, mean_etime, dev_etime))
print("failed: %s, succeeded: %s, passed: %s, skipped: %s, disabled: %s" % (
nfail, nsucc, npass, nskip, ndisa))
for test in self:
if abs(test.run_etime - mean_etime) > 2 * dev_etime:
print("%s has run_etime %.2f s" % (test.full_id, test.run_etime))
# Print summary table.
stats_suite = {}
for test in self:
if test.suite_name not in stats_suite:
d = dict.fromkeys(BaseTest._possible_status, 0)
d["run_etime"] = 0.0
d["tot_etime"] = 0.0
stats_suite[test.suite_name] = d
for test in self:
stats_suite[test.suite_name][test.status] += 1
stats_suite[test.suite_name]["run_etime"] += test.run_etime
stats_suite[test.suite_name]["tot_etime"] += test.tot_etime
suite_names = stats_suite.keys()
suite_names.sort()
times = ["run_etime", "tot_etime"]
table = [["Suite"] + BaseTest._possible_status + times]
for suite_name in suite_names:
stats = stats_suite[suite_name]
row = [suite_name] + [str(stats[s]) for s in BaseTest._possible_status] + ["%.2f" % stats[s] for s in times]
table.append(row)
pprint_table(table)
with open(os.path.join(self.workdir, "results.txt"), "w") as fh:
pprint_table(table, out=fh)
try:
username = os.getlogin()
except:
username = "No_username"
# Create the HTML index.
DNS = {
"self": self,
"runner": runner,
"user_name": username,
"hostname": gethostname(),
"test_headings": ['ID', 'Status', 'run_etime (s)', 'tot_etime (s)'],
"suite_headings": ['failed', 'passed', 'succeeded', 'skipped', 'disabled'],
# Functions and modules available in the template.
"time": time,
"pj": os.path.join,
"basename": os.path.basename,
"str2html": str2html,
"sec2str": sec2str,
"args2htmltr": args2htmltr,
"html_link": html_link,
"status2html": status2html,
}
fname = os.path.join(self.workdir, "suite_report.html")
fh = open(fname, "w")
header = """
<html>
<head><title>Suite Summary</title></head>
<body bgcolor="#FFFFFF" text="#000000">
<hr>
<h1>Suite Summary</h1>
<table width="100%" border="0" cellspacing="0" cellpadding="2">
<tr valign="top" align="left">
<py-open code = "for h in suite_headings:"> </py-open>
<th>${status2html(h)}</th>
<py-close/>
</tr>
<tr valign="top" align="left">
<py-open code = "for h in suite_headings:"> </py-open>
<td> ${len(self._tests_with_status(h))} </td>
<py-close/>
</tr>
</table>
<p>
tot_etime = ${sec2str(self.tot_etime)} <br>
run_etime = ${sec2str(self.run_etime)} <br>
no_pythreads = ${self.nthreads} <br>
no_MPI = ${self.nprocs} <br>
${str2html(str(runner))}
<hr>
"""
table = """
<p>
<h1>Test Results</h1>
<table width="100%" border="0" cellspacing="0" cellpadding="2">
<tr valign="top" align="left">
<py-open code = "for h in test_headings:"> </py-open>
<th>$h</th>
<py-close/>
</tr>
"""
for status in BaseTest._possible_status:
table += self._pyhtml_table_section(status)
table += "</table>"
footer = """
<hr>
<h1>Suite Info</h1>
<py-line code = "keys = ', '.join(self.keywords)" />
<p>Keywords = ${keys}</p>
<py-line code = "cpp_vars = ', '.join(self.need_cpp_vars)"/>
<p>Required CPP variables = ${cpp_vars}</p>
<hr>
Automatically generated by %s on %s. Logged on as %s@%s
<hr>
</body>
</html> """ % (_MY_NAME, time.asctime(), username, gethostname() )
template = header + table + footer
template_stream = StringIO(template)
# Initialise an xyaptu xcopier, and call xcopy
xcp = xcopier(DNS, ouf=fh)
xcp.xcopy(template_stream)
fh.close()
# Release the lock.
self.lock.release()
return Results(self)
@staticmethod
def _pyhtml_table_section(status):
# ['ID', 'Status', 'run_etime', 'tot_etime'],
string = """
<py-open code="for test in self.%s_tests():"/>
<py-line code = "report_link = pj(basename(test.workdir),'test_report.html') " />
<tr valign="top" align="left">
<td> ${html_link(test.full_id, report_link)}</td>
<td> ${status2html(test.status)} </td>
<td> ${sec2str(test.run_etime)} </td>
<td> ${sec2str(test.tot_etime)} </td>
</tr>
<py-close/>
""" % status
return string
def patch(self, patcher=None):
"""
Patch the output files of the test with the specified patcher.
A default patcher is provided if patcher is None (use $PATCHER shell variable)
"""
for test in self:
test.patch(patcher)
def select_tests(self, with_keys=None, exclude_keys=None, with_authors=None, exclude_authors=None,
ivars=None, mode="any"):
"""
Extract the subset of tests matching the given conditions.
Returns:
`AbinitTestSuite` instance
"""
test_list = [test for test in self]
if with_keys:
test_list = [test for test in test_list if test.has_keywords(with_keys, mode=mode)]
if exclude_keys:
test_list = [test for test in test_list if not test.has_keywords(exclude_keys, mode=mode)]
if with_authors:
test_list = [test for test in test_list if test.has_authors(with_authors, mode=mode)]
if exclude_authors:
test_list = [test for test in test_list if not test.has_authors(exclude_authors, mode=mode)]
if ivars:
test_list = [test for test in test_list if test.has_variables(ivars)]
return AbinitTestSuite(self.abenv, test_list=test_list)
def make_listoftests(self, width=100, html=True):
"""Create the ListOfTests files."""
if not html:
return "\n\n".join([test.listoftests(width, html) for test in self])
else:
header = """
<html>
<head><title>"LIST OF TESTS" FILE</title></head>
<body bgcolor="#FFFFFF" text="#000000">
<!-- Automatically generated by %s on %s. ****DO NOT EDIT**** -->""" % (_MY_NAME, time.asctime())
body = "<hr>".join([test.listoftests(width, html) for test in self])
footer = """
<hr>
Automatically generated by %s on %s.
<hr>
</body>
</html>""" % (_MY_NAME, time.asctime())
return header + body + footer
class Results(object):
"""Stores the final results."""
def __init__(self, test_suite):
assert test_suite._executed
self.failed_tests = test_suite.failed_tests()
self.passed_tests = test_suite.passed_tests()
self.succeeded_tests = test_suite.succeeded_tests()
self.skipped_tests = test_suite.skipped_tests()
self.disabled_tests = test_suite.disabled_tests()
self.targz_fname = test_suite.targz_fname
@lazy__str__
def __str__(self): pass
def tests_with_status(self, status):
return {
"succeeded": self.succeeded_tests,
"passed": self.passed_tests,
"failed": self.failed_tests,
"disabled": self.disabled_tests,
"skipped": self.skipped_tests}[status]
@property
def nfailed(self):
"""Number of failures"""
return len(self.failed_tests)
@property
def npassed(self):
"""Number of tests marked as passed."""
return len(self.passed_tests)
def outref_files(self, status):
"""
Return (out_files, ref_files)
where out and ref are list with the output files and the reference
files of the tests with the given status.
"""
out_files, ref_files = [], []
for test in (self.tests_with_status(status)):
for f in test.files_to_test:
out_files.append(os.path.join(test.workdir, f.name))
ref_fname = os.path.join(test.ref_dir, f.name)
# FIXME Hack due to the ambiguity stdout, out!
if not os.path.exists(ref_fname) and ref_fname.endswith(".stdout"):
ref_fname = ref_fname[:-7] + ".out"
ref_files.append(ref_fname)
return out_files, ref_files
def in_files(self, status):
"""List with the input files of the tests with the given status."""
in_files = []
for test in (self.tests_with_status(status)):
if isinstance(test, ChainOfTests):
in_files.extend([t.inp_fname for t in test])
else:
in_files.append(test.inp_fname)
return in_files
def patch_refs(self, status="failed"):
"""Patch the reference files of the tests with the specified status."""
out_files, ref_files = self.outref_files(status=status)
#for r, o in zip(out_files, ref_files): print("reference: %s, output %s" % (r, o))
return Patcher().patch_files(out_files, ref_files)
def edit_inputs(self, status="failed"):
"""Edit the input files of the tests with the specified status."""
in_files = self.in_files(status=status)
#for r, o in zip(out_files, ref_files):
# print("reference: %s, output %s" % (r, o))
return Editor().edit_files(in_files)
#def inspect_stdouts(self):
# out_files, ref_files = self.outref_files()
# return Editor().edit_files(in_files)
#def inspect_diffs(self):
def inspect_stderrs(self, status="failed"):
"""Open the stderr of the tests with the give status in `Editor`."""
return Editor().edit_files(self.stderr_files(status))
def stderr_files(self, status="failed"):
"""List of non-empty error files of the tests with the specified status."""
# Loop over the tests, open the stderr to see if it's empty ot not
# and add it to the list.
err_files = []
for test in self.tests_with_status(status):
if isinstance(test, ChainOfTests):
es = [t.stderr_fname for t in test if not t.has_empty_stderr]
if es:
err_files.extend(es)
else:
if not test.has_empty_stderr: err_files.append(test.stderr_fname)
return err_files
def cpkl_dump(self, cpkl_fname, protocol=-1):
"""Save the object in pickle format."""
with open(cpkl_fname, "wb") as fh:
pickle.dump(self, fh, protocol=protocol)
if __name__ == "__main__":
# Automatic documentation of the TEST_INFO options.
doc_testcnf_format()
| SamKChang/abinit-7.10.5_multipole | tests/pymods/testsuite.py | Python | gpl-3.0 | 121,738 | [
"ABINIT"
] | a495c773acf21190cb2e1e94fa94132625c309a2ed33a186ac2c4315e4555b77 |
"""
Display a per face receptor density on a body surface modeled as
irregular dataset (vertices & triangular faces) - the receptor sheet
Display stimulation variables across the sensory receptor elements
modulated in time, controlled by a set of buttons
Pick a particular receptor and select/highlight downstream sensory
neurons and interneurons influenced by this particular receptor.
For multiple selected receptors, show overlap colorcoded
- requires microcircuitry identifiers associated with receptor
identifiers
- requires graph representation of downstream circuitry with reachable
nodes
Distinguish parallel pathways (towards central) based on receptor
or neuron type
Distinguish topological map in target structures in corresponendence
with the receptor sheet (orderly but usually deformed)
"""
| unidesigner/microcircuit | doc/examples/receptor.py | Python | bsd-3-clause | 821 | [
"NEURON"
] | 2931461fc03a3e6011fbb9632ff425fd79a40459a4b2db947cf70df7b7ac4910 |
"""Standard test images.
For more images, see
- http://sipi.usc.edu/database/database.php
"""
import os as _os
from .. import data_dir
from ..io import imread, use_plugin
from .._shared._warnings import expected_warnings
from ._binary_blobs import binary_blobs
from .. import img_as_bool
__all__ = ['load',
'astronaut',
'camera',
'checkerboard',
'chelsea',
'clock',
'coffee',
'coins',
'horse',
'hubble_deep_field',
'immunohistochemistry',
'logo',
'moon',
'page',
'text',
'rocket']
def load(f, as_grey=False):
"""Load an image file located in the data directory.
Parameters
----------
f : string
File name.
as_grey : bool, optional
Convert to greyscale.
Returns
-------
img : ndarray
Image loaded from ``skimage.data_dir``.
"""
use_plugin('pil')
return imread(_os.path.join(data_dir, f), as_grey=as_grey)
def camera():
"""Gray-level "camera" image.
Often used for segmentation and denoising examples.
"""
return load("camera.png")
def astronaut():
"""Colour image of the astronaut Eileen Collins.
Photograph of Eileen Collins, an American astronaut. She was selected
as an astronaut in 1992 and first piloted the space shuttle STS-63 in
1995. She retired in 2006 after spending a total of 38 days, 8 hours
and 10 minutes in outer space.
This image was downloaded from the NASA Great Images database
<http://grin.hq.nasa.gov/ABSTRACTS/GPN-2000-001177.html>`__.
No known copyright restrictions, released into the public domain.
"""
return load("astronaut.png")
def text():
"""Gray-level "text" image used for corner detection.
Notes
-----
This image was downloaded from Wikipedia
<http://en.wikipedia.org/wiki/File:Corner.png>`__.
No known copyright restrictions, released into the public domain.
"""
return load("text.png")
def checkerboard():
"""Checkerboard image.
Checkerboards are often used in image calibration, since the
corner-points are easy to locate. Because of the many parallel
edges, they also visualise distortions particularly well.
"""
return load("chessboard_GRAY.png")
def coins():
"""Greek coins from Pompeii.
This image shows several coins outlined against a gray background.
It is especially useful in, e.g. segmentation tests, where
individual objects need to be identified against a background.
The background shares enough grey levels with the coins that a
simple segmentation is not sufficient.
Notes
-----
This image was downloaded from the
`Brooklyn Museum Collection
<http://www.brooklynmuseum.org/opencollection/archives/image/617/image>`__.
No known copyright restrictions.
"""
return load("coins.png")
def logo():
"""Scikit-image logo, a RGBA image."""
return load("logo.png")
def moon():
"""Surface of the moon.
This low-contrast image of the surface of the moon is useful for
illustrating histogram equalization and contrast stretching.
"""
return load("moon.png")
def page():
"""Scanned page.
This image of printed text is useful for demonstrations requiring uneven
background illumination.
"""
return load("page.png")
def horse():
"""Black and white silhouette of a horse.
This image was downloaded from
`openclipart <http://openclipart.org/detail/158377/horse-by-marauder>`
Released into public domain and drawn and uploaded by Andreas Preuss
(marauder).
"""
with expected_warnings(['Possible precision loss', 'Possible sign loss']):
return img_as_bool(load("horse.png", as_grey=True))
def clock():
"""Motion blurred clock.
This photograph of a wall clock was taken while moving the camera in an
aproximately horizontal direction. It may be used to illustrate
inverse filters and deconvolution.
Released into the public domain by the photographer (Stefan van der Walt).
"""
return load("clock_motion.png")
def immunohistochemistry():
"""Immunohistochemical (IHC) staining with hematoxylin counterstaining.
This picture shows colonic glands where the IHC expression of FHL2 protein
is revealed with DAB. Hematoxylin counterstaining is applied to enhance the
negative parts of the tissue.
This image was acquired at the Center for Microscopy And Molecular Imaging
(CMMI).
No known copyright restrictions.
"""
return load("ihc.png")
def chelsea():
"""Chelsea the cat.
An example with texture, prominent edges in horizontal and diagonal
directions, as well as features of differing scales.
Notes
-----
No copyright restrictions. CC0 by the photographer (Stefan van der Walt).
"""
return load("chelsea.png")
def coffee():
"""Coffee cup.
This photograph is courtesy of Pikolo Espresso Bar.
It contains several elliptical shapes as well as varying texture (smooth
porcelain to course wood grain).
Notes
-----
No copyright restrictions. CC0 by the photographer (Rachel Michetti).
"""
return load("coffee.png")
def hubble_deep_field():
"""Hubble eXtreme Deep Field.
This photograph contains the Hubble Telescope's farthest ever view of
the universe. It can be useful as an example for multi-scale
detection.
Notes
-----
This image was downloaded from
`HubbleSite
<http://hubblesite.org/newscenter/archive/releases/2012/37/image/a/>`__.
The image was captured by NASA and `may be freely used in the public domain
<http://www.nasa.gov/audience/formedia/features/MP_Photo_Guidelines.html>`_.
"""
return load("hubble_deep_field.jpg")
def rocket():
"""Launch photo of DSCOVR on Falcon 9 by SpaceX.
This is the launch photo of Falcon 9 carrying DSCOVR lifted off from
SpaceX's Launch Complex 40 at Cape Canaveral Air Force Station, FL.
Notes
-----
This image was downloaded from
`SpaceX Photos
<https://www.flickr.com/photos/spacexphotos/16511594820/in/photostream/>`__.
The image was captured by SpaceX and `released in the public domain
<http://arstechnica.com/tech-policy/2015/03/elon-musk-puts-spacex-photos-into-the-public-domain/>`_.
"""
return load("rocket.jpg")
| ofgulban/scikit-image | skimage/data/__init__.py | Python | bsd-3-clause | 6,497 | [
"ESPResSo"
] | 08a13e769f69e86ecd0ec7052dd053050cacbb0710003df0dbbc98b50963f7bb |
import os
import sys
import inspect
from time import time
from copy import deepcopy
import numpy as np
from ase import Atoms
from ase.io import read, write
from flare.utils.learner import get_max_cutoff
class FLARE_Atoms(Atoms):
"""
The `FLARE_Atoms` class is a child class of ASE `Atoms`,
which has completely the same usage as the primitive ASE `Atoms`, and
in the meanwhile mimic `Structure` class. It is used in the `OTF` module
with ASE engine (by `OTF_ASE` module). It enables attributes to be
obtained by both the name from ASE `Atoms` and `Structure`.
The input arguments are the same as ASE `Atoms`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prev_positions = np.zeros_like(self.positions)
@staticmethod
def from_ase_atoms(atoms):
"""
Args:
atoms (ASE Atoms): the ase atoms to build from
"""
new_atoms = deepcopy(atoms)
new_atoms.__class__ = FLARE_Atoms
new_atoms.prev_positions = np.zeros_like(new_atoms.positions)
return new_atoms
@property
def nat(self):
return len(self)
@property
def species_labels(self):
return self.symbols
@property
def coded_species(self):
return self.numbers
@property
def forces(self):
return self.get_forces()
@forces.setter
def forces(self, forces_array):
pass
@property
def potential_energy(self):
return self.get_potential_energy()
@property
def stress(self):
return self.get_stress()
@property
def stress_stds(self):
return None # TODO: to implement
@property
def local_energy_stds(self):
return None # TODO: to implement
@property
def stds(self):
try: # when self.calc is not FLARE, there's no get_uncertainties()
stds = self.calc.results["stds"]
except:
stds = np.zeros_like(self.positions)
return stds
def wrap_positions(self):
return self.get_positions(wrap=True)
@property
def wrapped_positions(self):
return self.get_positions(wrap=True)
@property
def max_cutoff(self):
return get_max_cutoff(self.cell)
def as_dict(self):
return self.todict()
@staticmethod
def from_dict(dct):
atoms = Atoms.fromdict(dct)
return FLARE_Atoms.from_ase_atoms(atoms)
| mir-group/flare | flare/ase/atoms.py | Python | mit | 2,449 | [
"ASE"
] | 7461672c4f3991dd9ee9d005250019d6df07e1bfba03dbefd1bf3ed91d857cd2 |
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010-2013 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Implements structural navigation."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2013 The Orca Team"
__license__ = "LGPL"
import pyatspi
from . import cmdnames
from . import debug
from . import guilabels
from . import input_event
from . import keybindings
from . import messages
from . import object_properties
from . import orca
from . import orca_gui_navlist
from . import orca_state
from . import settings
from . import settings_manager
from . import speech
_settingsManager = settings_manager.getManager()
#############################################################################
# #
# MatchCriteria #
# #
#############################################################################
class MatchCriteria:
"""Contains the criteria which will be used to generate a collection
matchRule. We don't want to create the rule until we need it and
are ready to use it. In addition, the creation of an AT-SPI match
rule requires you specify quite a few things (see the __init__),
most of which are irrelevant to the search at hand. This class
makes it possible for the StructuralNavigationObject creator to just
specify the few criteria that actually matter.
"""
def __init__(self,
collection,
states = [],
matchStates = None,
objAttrs = [],
matchObjAttrs = None,
roles = [],
matchRoles = None,
interfaces = "",
matchInterfaces = None,
invert = False,
applyPredicate = False):
"""Creates a new match criteria object.
Arguments:
- collection: the collection interface for the document in
which the accessible objects can be found.
- states: a list of pyatspi states of interest
- matchStates: whether an object must have all of the states
in the states list, any of the states in the list, or none
of the states in the list. Must be one of the collection
interface MatchTypes if provided.
- objAttrs: a list of object attributes (not text attributes)
- matchObjAttrs: whether an object must have all of the
attributes in the objAttrs list, any of the attributes in
the list, or none of the attributes in the list. Must be
one of the collection interface MatchTypes if provided.
- interfaces: (We aren't using this. According to the at-spi
idl, it is a string.)
- matchInterfaces: The collection MatchType for matching by
interface.
- invert: If true the match rule will find objects that don't
match. We always use False.
- applyPredicate: whether or not a predicate should be applied
as an additional check to see if an item is indeed a match.
This is necessary, for instance, when one of the things we
care about is a text attribute, something the collection
interface doesn't include in its criteria.
"""
self.collection = collection
self.matchStates = matchStates or collection.MATCH_ANY
self.objAttrs = objAttrs
self.matchObjAttrs = matchObjAttrs or collection.MATCH_ANY
self.roles = roles
self.matchRoles = matchRoles or collection.MATCH_ANY
self.interfaces = interfaces
self.matchInterfaces = matchInterfaces or collection.MATCH_ALL
self.invert = invert
self.applyPredicate = applyPredicate
self.states = pyatspi.StateSet()
for state in states:
self.states.add(state)
###########################################################################
# #
# StructuralNavigationObject #
# #
###########################################################################
class StructuralNavigationObject:
"""Represents a document object which has identifiable characteristics
which can be used for the purpose of navigation to and among instances
of that object. These characteristics may be something as simple as a
role and/or a state of interest. Or they may be something more complex
such as character counts, text attributes, and other object attributes.
"""
def __init__(self, structuralNavigation, objType, bindings, predicate,
criteria, presentation, dialogData):
"""Creates a new structural navigation object.
Arguments:
- structuralNavigation: the StructuralNavigation class associated
with this object.
- objType: the type (e.g. BLOCKQUOTE) associated with this object.
- bindings: a dictionary of all of the possible bindings for this
object. In the case of all but the "atLevel" bindings, each
binding takes the form of [keysymstring, modifiers, description].
The goPreviousAtLevel and goNextAtLevel bindings are each a list
of bindings in that form.
- predicate: the predicate to use to determine if a given accessible
matches this structural navigation object. Used when a search via
collection is not possible or practical.
- criteria: a method which returns a MatchCriteria object which
can in turn be used to locate the next/previous matching accessible
via collection.
- presentation: the method which should be called after performing
the search for the structural navigation object.
- dialogData: the method which returns the title, column headers,
and row data which should be included in the "list of" dialog for
the structural navigation object.
"""
self.structuralNavigation = structuralNavigation
self.objType = objType
self.bindings = bindings
self.predicate = predicate
self.criteria = criteria
self.present = presentation
self._dialogData = dialogData
self.inputEventHandlers = {}
self.keyBindings = keybindings.KeyBindings()
self.functions = []
self._setUpHandlersAndBindings()
def _setUpHandlersAndBindings(self):
"""Adds the inputEventHandlers and keyBindings for this object."""
# Set up the basic handlers. These are our traditional goPrevious
# and goNext functions.
#
previousBinding = self.bindings.get("previous")
if previousBinding:
[keysymstring, modifiers, description] = previousBinding
handlerName = "%sGoPrevious" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.goPrevious, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.goPrevious)
nextBinding = self.bindings.get("next")
if nextBinding:
[keysymstring, modifiers, description] = nextBinding
handlerName = "%sGoNext" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.goNext, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.goNext)
listBinding = self.bindings.get("list")
if listBinding:
[keysymstring, modifiers, description] = listBinding
handlerName = "%sShowList" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.showList, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.showList)
# Set up the "at level" handlers (e.g. to navigate among headings
# at the specified level).
#
previousAtLevel = self.bindings.get("previousAtLevel") or []
for i, binding in enumerate(previousAtLevel):
level = i + 1
handler = self.goPreviousAtLevelFactory(level)
handlerName = "%sGoPreviousLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
nextAtLevel = self.bindings.get("nextAtLevel") or []
for i, binding in enumerate(nextAtLevel):
level = i + 1
handler = self.goNextAtLevelFactory(level)
handlerName = "%sGoNextLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
listAtLevel = self.bindings.get("listAtLevel") or []
for i, binding in enumerate(listAtLevel):
level = i + 1
handler = self.showListAtLevelFactory(level)
handlerName = "%sShowListAtLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
# Set up the "directional" handlers (e.g. for table cells. Live
# region support has a handler to go to the last live region,
# so we'll handle that here as well).
#
directions = {}
directions["Left"] = self.bindings.get("left")
directions["Right"] = self.bindings.get("right")
directions["Up"] = self.bindings.get("up")
directions["Down"] = self.bindings.get("down")
directions["First"] = self.bindings.get("first")
directions["Last"] = self.bindings.get("last")
for direction in directions:
binding = directions.get(direction)
if not binding:
continue
handler = self.goDirectionFactory(direction)
handlerName = "%sGo%s" % (self.objType, direction)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
def addHandlerAndBinding(self, binding, handlerName, function):
"""Adds a custom inputEventHandler and keybinding to the object's
handlers and bindings. Right now this is unused, but here in
case a creator of a StructuralNavigationObject had some other
desired functionality in mind.
Arguments:
- binding: [keysymstring, modifiers, description]
- handlerName: a string uniquely identifying the handler
- function: the function associated with the binding
"""
[keysymstring, modifiers, description] = binding
handler = input_event.InputEventHandler(function, description)
keyBinding = keybindings.KeyBinding(
keysymstring,
keybindings.defaultModifierMask,
modifiers,
handler)
self.inputEventHandlers[handlerName] = handler
self.structuralNavigation.inputEventHandlers[handlerName] = handler
self.functions.append(function)
self.structuralNavigation.functions.append(function)
self.keyBindings.add(keyBinding)
self.structuralNavigation.keyBindings.add(keyBinding)
def goPrevious(self, script, inputEvent):
"""Go to the previous object."""
self.structuralNavigation.goObject(self, False)
def goNext(self, script, inputEvent):
"""Go to the next object."""
self.structuralNavigation.goObject(self, True)
def showList(self, script, inputEvent):
"""Show a list of all the items with this object type."""
try:
objects, criteria = self.structuralNavigation._getAll(self)
except:
script.presentMessage(messages.NAVIGATION_DIALOG_ERROR)
return
def _isValidMatch(x):
return not (script.utilities.isHidden(x) or script.utilities.isEmpty(x))
objects = list(filter(_isValidMatch, objects))
if criteria.applyPredicate:
objects = list(filter(self.predicate, objects))
title, columnHeaders, rowData = self._dialogData()
count = len(objects)
title = "%s: %s" % (title, messages.itemsFound(count))
if not count:
script.presentMessage(title)
return
currentObject, offset = script.utilities.getCaretContext()
try:
index = objects.index(currentObject)
except:
index = 0
rows = [[obj, -1] + rowData(obj) for obj in objects]
orca_gui_navlist.showUI(title, columnHeaders, rows, index)
def goPreviousAtLevelFactory(self, level):
"""Generates a goPrevious method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def goPreviousAtLevel(script, inputEvent):
self.structuralNavigation.goObject(self, False, arg=level)
return goPreviousAtLevel
def goNextAtLevelFactory(self, level):
"""Generates a goNext method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def goNextAtLevel(script, inputEvent):
self.structuralNavigation.goObject(self, True, arg=level)
return goNextAtLevel
def showListAtLevelFactory(self, level):
"""Generates a showList method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def showListAtLevel(script, inputEvent):
try:
objects, criteria = self.structuralNavigation._getAll(self, arg=level)
except:
script.presentMessage(messages.NAVIGATION_DIALOG_ERROR)
return
def _isValidMatch(x):
return not (script.utilities.isHidden(x) or script.utilities.isEmpty(x))
objects = list(filter(_isValidMatch, objects))
if criteria.applyPredicate:
objects = list(filter(self.predicate, objects))
title, columnHeaders, rowData = self._dialogData(arg=level)
count = len(objects)
title = "%s: %s" % (title, messages.itemsFound(count))
if not count:
script.presentMessage(title)
return
currentObject, offset = script.utilities.getCaretContext()
try:
index = objects.index(currentObject)
except:
index = 0
rows = [[obj, -1] + rowData(obj) for obj in objects]
orca_gui_navlist.showUI(title, columnHeaders, rows, index)
return showListAtLevel
def goDirectionFactory(self, direction):
"""Generates the methods for navigation in a particular direction
(i.e. left, right, up, down, first, last). Right now, this is
primarily for table cells, but it may have applicability for other
objects. For example, when navigating in an outline, one might
want the ability to navigate to the next item at a given level,
but then work his/her way up/down in the hierarchy.
Arguments:
- direction: the direction in which to navigate as a string.
"""
def goCell(script, inputEvent):
obj, offset = script.utilities.getCaretContext()
thisCell = self.structuralNavigation.getCellForObj(obj)
currentCoordinates = \
self.structuralNavigation.getCellCoordinates(thisCell)
if direction == "Left":
desiredCoordinates = [currentCoordinates[0],
currentCoordinates[1] - 1]
elif direction == "Right":
desiredCoordinates = [currentCoordinates[0],
currentCoordinates[1] + 1]
elif direction == "Up":
desiredCoordinates = [currentCoordinates[0] - 1,
currentCoordinates[1]]
elif direction == "Down":
desiredCoordinates = [currentCoordinates[0] + 1,
currentCoordinates[1]]
elif direction == "First":
desiredCoordinates = [0, 0]
else:
desiredCoordinates = [-1, -1]
table = self.structuralNavigation.getTableForCell(thisCell)
if table:
iTable = table.queryTable()
lastRow = iTable.nRows - 1
lastCol = iTable.nColumns - 1
desiredCoordinates = [lastRow, lastCol]
self.structuralNavigation.goCell(self,
thisCell,
currentCoordinates,
desiredCoordinates)
def goLastLiveRegion(script, inputEvent):
"""Go to the last liveRegion."""
if settings.inferLiveRegions:
script.liveRegionManager.goLastLiveRegion()
else:
script.presentMessage(messages.LIVE_REGIONS_OFF)
if self.objType == StructuralNavigation.TABLE_CELL:
return goCell
elif self.objType == StructuralNavigation.LIVE_REGION \
and direction == "Last":
return goLastLiveRegion
#############################################################################
# #
# StructuralNavigation #
# #
#############################################################################
class StructuralNavigation:
"""This class implements the structural navigation functionality which
is available to scripts. Scripts interested in implementing structural
navigation need to override getEnabledStructuralNavigationTypes() and
return a list of StructuralNavigation object types which should be
enabled.
"""
# The available object types.
#
# Convenience methods have been put into place whereby one can
# create an object (FOO = "foo"), and then provide the following
# methods: _fooBindings(), _fooPredicate(), _fooCriteria(), and
# _fooPresentation(). With these in place, and with the object
# FOO included among the object types returned by the script's
# getEnabledStructuralNavigationTypes(), the StructuralNavigation
# object should be created and set up automagically. At least that
# is the idea. :-) This hopefully will also enable easy re-definition
# of existing StructuralNavigationObjects on a script-by-script basis.
# For instance, in the soffice script, overriding _blockquotePredicate
# should be all that is needed to implement navigation by blockquote
# in OOo Writer documents.
#
BLOCKQUOTE = "blockquote"
BUTTON = "button"
CHECK_BOX = "checkBox"
CHUNK = "chunk"
CLICKABLE = "clickable"
COMBO_BOX = "comboBox"
ENTRY = "entry"
FORM_FIELD = "formField"
HEADING = "heading"
IMAGE = "image"
LANDMARK = "landmark"
LINK = "link"
LIST = "list" # Bulleted/numbered lists
LIST_ITEM = "listItem" # Bulleted/numbered list items
LIVE_REGION = "liveRegion"
PARAGRAPH = "paragraph"
RADIO_BUTTON = "radioButton"
SEPARATOR = "separator"
TABLE = "table"
TABLE_CELL = "tableCell"
UNVISITED_LINK = "unvisitedLink"
VISITED_LINK = "visitedLink"
# Roles which are recognized as being a form field. Note that this
# is for the purpose of match rules and predicates and refers to
# AT-SPI roles.
#
FORM_ROLES = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_DOCUMENT_FRAME, # rich text editing
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_SPIN_BUTTON,
pyatspi.ROLE_TEXT]
# Roles which are recognized as being potential "large objects"
# or "chunks." Note that this refers to AT-SPI roles.
#
OBJECT_ROLES = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_LIST,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_DOCUMENT_EMAIL,
pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_PRESENTATION,
pyatspi.ROLE_DOCUMENT_SPREADSHEET,
pyatspi.ROLE_DOCUMENT_TEXT,
pyatspi.ROLE_DOCUMENT_WEB]
IMAGE_ROLES = [pyatspi.ROLE_IMAGE,
pyatspi.ROLE_IMAGE_MAP]
def __init__(self, script, enabledTypes, enabled=False):
"""Creates an instance of the StructuralNavigation class.
Arguments:
- script: the script which which this instance is associated.
- enabledTypes: a list of StructuralNavigation object types
which the script is interested in supporting.
- enabled: Whether structural navigation should start out
enabled. For instance, in Gecko by default we do what it
enabled; in soffice, we would want to start out with it
disabled and have the user enable it via a keystroke when
desired.
"""
self._script = script
self.enabled = enabled
# Create all of the StructuralNavigationObject's in which the
# script is interested, using the convenience method
#
self.enabledObjects = {}
for objType in enabledTypes:
self.enabledObjects[objType] = \
self.structuralNavigationObjectCreator(objType)
self.functions = []
self.inputEventHandlers = {}
self.setupInputEventHandlers()
self.keyBindings = self.getKeyBindings()
# When navigating in a non-uniform table, one can move to a
# cell which spans multiple rows and/or columns. When moving
# beyond that cell, into a cell that does NOT span multiple
# rows/columns, we want to be sure we land in the right place.
# Therefore, we'll store the coordinates from "our perspective."
#
self.lastTableCell = [-1, -1]
self._objectCache = {}
def clearCache(self, document=None):
if document:
self._objectCache[hash(document)] = {}
else:
self._objectCache = {}
def structuralNavigationObjectCreator(self, name):
"""This convenience method creates a StructuralNavigationObject
with the specified name and associated characterists. (See the
"Objects" section of code near the end of this class. Creators
of StructuralNavigationObject's can still do things the old
fashioned way should they so choose, by creating the instance
and then adding it via addObject().
Arguments:
- name: the name/objType associated with this object.
"""
# We're going to assume bindings. After all, a structural
# navigation object is by defintion an object which one can
# navigate to using the associated keybindings. For similar
# reasons we'll also assume a predicate and a presentation
# method. (See the Objects section towards the end of this
# class for examples of each.)
#
bindings = eval("self._%sBindings()" % name)
criteria = eval("self._%sCriteria" % name)
predicate = eval("self._%sPredicate" % name)
presentation = eval("self._%sPresentation" % name)
try:
dialogData = eval("self._%sDialogData" % name)
except:
dialogData = None
return StructuralNavigationObject(self, name, bindings, predicate,
criteria, presentation, dialogData)
def addObject(self, objType, structuralNavigationObject):
"""Adds structuralNavigationObject to the dictionary of enabled
objects.
Arguments:
- objType: the name/object type of the StructuralNavigationObject.
- structuralNavigationObject: the StructuralNavigationObject to
add.
"""
self.enabledObjects[objType] = structuralNavigationObject
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for a script."""
if not len(self.enabledObjects):
return
self.inputEventHandlers["toggleStructuralNavigationHandler"] = \
input_event.InputEventHandler(
self.toggleStructuralNavigation,
cmdnames.STRUCTURAL_NAVIGATION_TOGGLE)
for structuralNavigationObject in list(self.enabledObjects.values()):
self.inputEventHandlers.update(\
structuralNavigationObject.inputEventHandlers)
self.functions.extend(structuralNavigationObject.functions)
def getKeyBindings(self):
"""Defines the structural navigation key bindings for a script.
Returns: an instance of keybindings.KeyBindings.
"""
keyBindings = keybindings.KeyBindings()
if not len(self.enabledObjects):
return keyBindings
keyBindings.add(
keybindings.KeyBinding(
"z",
keybindings.defaultModifierMask,
keybindings.ORCA_MODIFIER_MASK,
self.inputEventHandlers["toggleStructuralNavigationHandler"]))
for structuralNavigationObject in list(self.enabledObjects.values()):
bindings = structuralNavigationObject.keyBindings.keyBindings
for keybinding in bindings:
keyBindings.add(keybinding)
return keyBindings
#########################################################################
# #
# Input Event Handler Methods #
# #
#########################################################################
def toggleStructuralNavigation(self, script, inputEvent, presentMessage=True):
"""Toggles structural navigation keys."""
self.enabled = not self.enabled
if self.enabled:
string = messages.STRUCTURAL_NAVIGATION_KEYS_ON
else:
string = messages.STRUCTURAL_NAVIGATION_KEYS_OFF
debug.println(debug.LEVEL_CONFIGURATION, string)
if presentMessage:
self._script.presentMessage(string)
#########################################################################
# #
# Methods for Moving to Objects #
# #
#########################################################################
def goCell(self, structuralNavigationObject, thisCell,
currentCoordinates, desiredCoordinates):
"""The method used for navigation among cells in a table.
Arguments:
- structuralNavigationObject: the StructuralNavigationObject which
represents the table cell.
- thisCell: the pyatspi accessible TABLE_CELL we're currently in
- currentCoordinates: the [row, column] of thisCell. Note, we
cannot just get the coordinates because in table cells which
span multiple rows and/or columns, the value returned by
table.getRowAtIndex() is the first row the cell spans. Likewise,
the value returned by table.getColumnAtIndex() is the left-most
column. Therefore, we keep track of the row and column from
our perspective to ensure we stay in the correct row and column.
- desiredCoordinates: the [row, column] where we think we'd like to
be.
"""
table = self.getTableForCell(thisCell)
try:
iTable = table.queryTable()
except:
self._script.presentMessage(messages.TABLE_NOT_IN_A)
return None
currentRow, currentCol = currentCoordinates
desiredRow, desiredCol = desiredCoordinates
rowDiff = desiredRow - currentRow
colDiff = desiredCol - currentCol
oldRowHeaders = self._script.utilities.rowHeadersForCell(thisCell)
oldColHeaders = self._script.utilities.columnHeadersForCell(thisCell)
cell = thisCell
while cell:
cell = iTable.getAccessibleAt(desiredRow, desiredCol)
if not cell:
if desiredCol < 0:
self._script.presentMessage(messages.TABLE_ROW_BEGINNING)
desiredCol = 0
elif desiredCol > iTable.nColumns - 1:
self._script.presentMessage(messages.TABLE_ROW_END)
desiredCol = iTable.nColumns - 1
if desiredRow < 0:
self._script.presentMessage(messages.TABLE_COLUMN_TOP)
desiredRow = 0
elif desiredRow > iTable.nRows - 1:
self._script.presentMessage(messages.TABLE_COLUMN_BOTTOM)
desiredRow = iTable.nRows - 1
elif thisCell == cell or (settings.skipBlankCells and self._isBlankCell(cell)):
if colDiff < 0:
desiredCol -= 1
elif colDiff > 0:
desiredCol += 1
if rowDiff < 0:
desiredRow -= 1
elif rowDiff > 0:
desiredRow += 1
else:
break
self.lastTableCell = [desiredRow, desiredCol]
if cell:
arg = [rowDiff, colDiff, oldRowHeaders, oldColHeaders]
structuralNavigationObject.present(cell, arg)
def _getAll(self, structuralNavigationObject, arg=None):
"""Returns all the instances of structuralNavigationObject."""
if not structuralNavigationObject.criteria:
return [], None
document = self._script.utilities.documentFrame()
cache = self._objectCache.get(hash(document), {})
key = "%s:%s" % (structuralNavigationObject.objType, arg)
matches, criteria = cache.get(key, ([], None))
if matches:
return matches.copy(), criteria
col = document.queryCollection()
criteria = structuralNavigationObject.criteria(col, arg)
rule = col.createMatchRule(criteria.states.raw(),
criteria.matchStates,
criteria.objAttrs,
criteria.matchObjAttrs,
criteria.roles,
criteria.matchRoles,
criteria.interfaces,
criteria.matchInterfaces,
criteria.invert)
matches = col.getMatches(rule, col.SORT_ORDER_CANONICAL, 0, True)
col.freeMatchRule(rule)
rv = matches.copy(), criteria
cache[key] = matches, criteria
self._objectCache[hash(document)] = cache
return rv
def goObject(self, structuralNavigationObject, isNext, obj=None, arg=None):
"""The method used for navigation among StructuralNavigationObjects
which are not table cells.
Arguments:
- structuralNavigationObject: the StructuralNavigationObject which
represents the object of interest.
- isNext: If True, we're interested in the next accessible object
which matches structuralNavigationObject. If False, we're
interested in the previous accessible object which matches.
- obj: the current object (typically the locusOfFocus).
- arg: optional arguments which may need to be passed along to
the predicate, presentation method, etc. For instance, in the
case of navigating amongst headings at a given level, the level
is needed and passed in as arg.
"""
matches, criteria = list(self._getAll(structuralNavigationObject, arg))
if not matches:
structuralNavigationObject.present(None, arg)
return
if not isNext:
matches.reverse()
def _isValidMatch(obj):
if self._script.utilities.isHidden(obj) or self._script.utilities.isEmpty(obj):
return False
if not criteria.applyPredicate:
return True
return structuralNavigationObject.predicate(obj)
def _getMatchingObjAndIndex(obj):
while obj:
if obj in matches:
return obj, matches.index(obj)
obj = obj.parent
return None, -1
if not obj:
obj, offset = self._script.utilities.getCaretContext()
thisObj, index = _getMatchingObjAndIndex(obj)
if thisObj:
matches = matches[index:]
obj = thisObj
currentPath = pyatspi.utils.getPath(obj)
for i, match in enumerate(matches):
if not _isValidMatch(match):
continue
if match.parent == obj:
comparison = self._script.utilities.characterOffsetInParent(match) - offset
else:
path = pyatspi.utils.getPath(match)
comparison = self._script.utilities.pathComparison(path, currentPath)
if (comparison > 0 and isNext) or (comparison < 0 and not isNext):
structuralNavigationObject.present(match, arg)
return
if not settings.wrappedStructuralNavigation:
structuralNavigationObject.present(None, arg)
return
if not isNext:
self._script.presentMessage(messages.WRAPPING_TO_BOTTOM)
else:
self._script.presentMessage(messages.WRAPPING_TO_TOP)
matches, criteria = list(self._getAll(structuralNavigationObject, arg))
if not isNext:
matches.reverse()
for match in matches:
if _isValidMatch(match):
structuralNavigationObject.present(match, arg)
return
structuralNavigationObject.present(None, arg)
#########################################################################
# #
# Methods for Presenting Objects #
# #
#########################################################################
def _getTableCaption(self, obj):
"""Returns a string which contains the table caption, or
None if a caption could not be found.
Arguments:
- obj: the accessible table whose caption we want.
"""
caption = obj.queryTable().caption
try:
caption.queryText()
except:
return None
else:
return self._script.utilities.displayedText(caption)
def _getTableDescription(self, obj):
"""Returns a string which describes the table."""
nonUniformString = ""
nonUniform = self._script.utilities.isNonUniformTable(obj)
if nonUniform:
nonUniformString = messages.TABLE_NON_UNIFORM + " "
table = obj.queryTable()
sizeString = messages.tableSize(table.nRows, table.nColumns)
return (nonUniformString + sizeString)
def getCellForObj(self, obj):
"""Looks for a table cell in the ancestry of obj, if obj is not a
table cell.
Arguments:
- obj: the accessible object of interest.
"""
cellRoles = [pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_ROW_HEADER]
isCell = lambda x: x and x.getRole() in cellRoles
if obj and not isCell(obj):
obj = pyatspi.utils.findAncestor(obj, isCell)
return obj
def getTableForCell(self, obj):
"""Looks for a table in the ancestry of obj, if obj is not a table.
Arguments:
- obj: the accessible object of interest.
"""
isTable = lambda x: x and x.getRole() == pyatspi.ROLE_TABLE
if obj and not isTable(obj):
obj = pyatspi.utils.findAncestor(obj, isTable)
return obj
def _isBlankCell(self, obj):
"""Returns True if the table cell is empty or consists of whitespace.
Arguments:
- obj: the accessible table cell to examime
"""
if obj and (obj.name or obj.childCount):
return False
try:
text = obj.queryText()
except:
pass
else:
if text.getText(0, -1).strip():
return False
return True
def _getCellText(self, obj):
"""Looks at the table cell and tries to get its text.
Arguments:
- obj: the accessible table cell to examime
"""
text = ""
if obj and not obj.childCount:
text = self._script.utilities.displayedText(obj)
else:
for child in obj:
childText = self._script.utilities.displayedText(child)
text = self._script.utilities.appendString(text, childText)
return text
def _presentCellHeaders(self, cell, oldCellInfo):
"""Speaks the headers of the accessible table cell, cell.
Arguments:
- cell: the accessible table cell whose headers we wish to
present.
- oldCellInfo: [rowDiff, colDiff, oldRowHeaders, oldColHeaders]
"""
if not cell or not oldCellInfo:
return
rowDiff, colDiff, oldRowHeaders, oldColHeaders = oldCellInfo
if not (oldRowHeaders or oldColHeaders):
return
if rowDiff:
rowHeaders = self._script.utilities.rowHeadersForCell(cell)
for header in rowHeaders:
if not header in oldRowHeaders:
text = self._getCellText(header)
speech.speak(text)
if colDiff:
colHeaders = self._script.utilities.columnHeadersForCell(cell)
for header in colHeaders:
if not header in oldColHeaders:
text = self._getCellText(header)
speech.speak(text)
def getCellCoordinates(self, obj):
"""Returns the [row, col] of a ROLE_TABLE_CELL or [-1, -1]
if the coordinates cannot be found.
Arguments:
- obj: the accessible table cell whose coordinates we want.
"""
cell = self.getCellForObj(obj)
table = self.getTableForCell(cell)
thisRow, thisCol = self._script.utilities.coordinatesForCell(cell)
# If we're in a cell that spans multiple rows and/or columns,
# thisRow and thisCol will refer to the upper left cell in
# the spanned range(s). We're storing the lastTableCell that
# we're aware of in order to facilitate more linear movement.
# Therefore, if the lastTableCell and this table cell are the
# same cell, we'll go with the stored coordinates.
lastRow, lastCol = self.lastTableCell
lastCell = self._script.utilities.cellForCoordinates(table, lastRow, lastCol)
if lastCell == cell:
return lastRow, lastCol
return thisRow, thisCol
def _getCaretPosition(self, obj):
"""Returns the [obj, characterOffset] where the caret should be
positioned. For most scripts, the object should not change and
the offset should be 0. That's not always the case with Gecko.
Arguments:
- obj: the accessible object in which the caret should be
positioned.
"""
return [obj, 0]
def _setCaretPosition(self, obj, characterOffset):
"""Sets the caret at the specified offset within obj."""
self._script.utilities.setCaretPosition(obj, characterOffset)
def _presentLine(self, obj, offset):
"""Presents the first line of the object to the user.
Arguments:
- obj: the accessible object to be presented.
- offset: the character offset within obj.
"""
if not obj:
return
if self._presentWithSayAll(obj, offset):
return
self._script.updateBraille(obj)
self._script.sayLine(obj)
def _presentObject(self, obj, offset):
"""Presents the entire object to the user.
Arguments:
- obj: the accessible object to be presented.
- offset: the character offset within obj.
"""
if not obj:
return
if self._presentWithSayAll(obj, offset):
return
self._script.presentObject(obj, offset)
def _presentWithSayAll(self, obj, offset):
if self._script.inSayAll() \
and _settingsManager.getSetting('structNavInSayAll'):
self._script.sayAll(obj, offset)
return True
return False
def _getRoleName(self, obj):
# Another case where we'll do this for now, and clean it up when
# object presentation is refactored.
return self._script.speechGenerator.getLocalizedRoleName(obj)
def _getSelectedItem(self, obj):
# Another case where we'll do this for now, and clean it up when
# object presentation is refactored.
if obj.getRole() == pyatspi.ROLE_COMBO_BOX:
obj = obj[0]
try:
selection = obj.querySelection()
except NotImplementedError:
return None
return selection.getSelectedChild(0)
def _getText(self, obj):
# Another case where we'll do this for now, and clean it up when
# object presentation is refactored.
text = self._script.utilities.displayedText(obj)
if not text:
text = self._script.utilities.expandEOCs(obj)
if not text:
item = self._getSelectedItem(obj)
if item:
text = item.name
if not text and obj.getRole() == pyatspi.ROLE_IMAGE:
try:
image = obj.queryImage()
except:
text = obj.description
else:
text = image.imageDescription or obj.description
if not text and obj.parent.getRole() == pyatspi.ROLE_LINK:
text = self._script.utilities.linkBasename(obj.parent)
if not text and obj.getRole() == pyatspi.ROLE_LIST:
children = [x for x in obj if x.getRole() == pyatspi.ROLE_LIST_ITEM]
text = " ".join(list(map(self._getText, children)))
return text
def _getLabel(self, obj):
# Another case where we'll do this for now, and clean it up when
# object presentation is refactored.
label = self._script.utilities.displayedLabel(obj)
if not label:
label, objects = self._script.labelInference.infer(
obj, focusedOnly=False)
return label
def _getState(self, obj):
# Another case where we'll do this for now, and clean it up when
# object presentation is refactored.
try:
state = obj.getState()
role = obj.getRole()
except RuntimeError:
return ''
# For now, we'll just grab the spoken indicator from settings.
# When object presentation is refactored, we can clean this up.
if role == pyatspi.ROLE_CHECK_BOX:
unchecked, checked, partially = object_properties.CHECK_BOX_INDICATORS_SPEECH
if state.contains(pyatspi.STATE_INDETERMINATE):
return partially
if state.contains(pyatspi.STATE_CHECKED):
return checked
return unchecked
if role == pyatspi.ROLE_RADIO_BUTTON:
unselected, selected = object_properties.RADIO_BUTTON_INDICATORS_SPEECH
if state.contains(pyatspi.STATE_CHECKED):
return selected
return unselected
if role == pyatspi.ROLE_LINK:
if state.contains(pyatspi.STATE_VISITED):
return object_properties.STATE_VISITED
else:
return object_properties.STATE_UNVISITED
return ''
def _getValue(self, obj):
# Another case where we'll do this for now, and clean it up when
# object presentation is refactored.
return self._getState(obj) or self._getText(obj)
#########################################################################
# #
# Objects #
# #
#########################################################################
# All structural navigation objects have the following essential
# characteristics:
#
# 1. Keybindings for goPrevious, goNext, and other such methods
# 2. A means of identification (at least a predicate and possibly
# also criteria for generating a collection match rule)
# 3. A definition of how the object should be presented (both
# when another instance of that object is found as well as
# when it is not)
#
# Convenience methods have been put into place whereby one can
# create an object (FOO = "foo"), and then provide the following
# methods: _fooBindings(), _fooPredicate(), _fooCriteria(), and
# _fooPresentation(). With these in place, and with the object
# FOO included among the StructuralNavigation.enabledTypes for
# the script, the structural navigation object should be created
# and set up automagically. At least that is the idea. :-) This
# hopefully will also enable easy re-definition of existing
# objects on a script-by-script basis. For instance, in the
# StarOffice script, overriding the _blockquotePredicate should
# be all that is needed to implement navigation by blockquote
# in OOo Writer documents.
#
########################
# #
# Blockquotes #
# #
########################
def _blockquoteBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating among blockquotes.
"""
bindings = {}
prevDesc = cmdnames.BLOCKQUOTE_PREV
bindings["previous"] = ["q", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.BLOCKQUOTE_NEXT
bindings["next"] = ["q", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.BLOCKQUOTE_LIST
bindings["list"] = ["q", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _blockquoteCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating blockquotes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
attrs = ['tag:BLOCKQUOTE']
return MatchCriteria(collection, objAttrs=attrs)
def _blockquotePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a blockquote.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not obj:
return False
attributes = obj.getAttributes()
if attributes:
for attribute in attributes:
if attribute == "tag:BLOCKQUOTE":
return True
return False
def _blockquotePresentation(self, obj, arg=None):
"""Presents the blockquote or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_BLOCKQUOTES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _blockquoteDialogData(self):
columnHeaders = [guilabels.SN_HEADER_BLOCKQUOTE]
def rowData(obj):
return [self._getText(obj)]
return guilabels.SN_TITLE_BLOCKQUOTE, columnHeaders, rowData
########################
# #
# Buttons #
# #
########################
def _buttonBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst buttons.
"""
bindings = {}
prevDesc = cmdnames.BUTTON_PREV
bindings["previous"] = ["b", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.BUTTON_NEXT
bindings["next"] = ["b", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.BUTTON_LIST
bindings["list"] = ["b", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _buttonCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating buttons
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_PUSH_BUTTON]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _buttonPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a button.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_PUSH_BUTTON:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _buttonPresentation(self, obj, arg=None):
"""Presents the button or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_BUTTONS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _buttonDialogData(self):
columnHeaders = [guilabels.SN_HEADER_BUTTON]
def rowData(obj):
return [self._getText(obj)]
return guilabels.SN_TITLE_BUTTON, columnHeaders, rowData
########################
# #
# Check boxes #
# #
########################
def _checkBoxBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst check boxes.
"""
bindings = {}
prevDesc = cmdnames.CHECK_BOX_PREV
bindings["previous"] = ["x", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.CHECK_BOX_NEXT
bindings["next"] = ["x", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.CHECK_BOX_LIST
bindings["list"] = ["x", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _checkBoxCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating check boxes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_CHECK_BOX]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _checkBoxPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a check box.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_CHECK_BOX:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _checkBoxPresentation(self, obj, arg=None):
"""Presents the check box or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_CHECK_BOXES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _checkBoxDialogData(self):
columnHeaders = [guilabels.SN_HEADER_CHECK_BOX]
columnHeaders.append(guilabels.SN_HEADER_STATE)
def rowData(obj):
return [self._getLabel(obj), self._getState(obj)]
return guilabels.SN_TITLE_CHECK_BOX, columnHeaders, rowData
########################
# #
# Chunks/Large Objects #
# #
########################
def _chunkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst chunks/large objects.
"""
bindings = {}
prevDesc = cmdnames.LARGE_OBJECT_PREV
bindings["previous"] = ["o", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.LARGE_OBJECT_NEXT
bindings["next"] = ["o", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.LARGE_OBJECT_LIST
bindings["list"] = ["o", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _chunkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating chunks/
large objects by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = self.OBJECT_ROLES
roleMatch = collection.MATCH_ANY
return MatchCriteria(collection,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _chunkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a chunk.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj and obj.getRole() in self.OBJECT_ROLES:
text = self._script.utilities.queryNonEmptyText(obj)
if not (text and text.characterCount > settings.largeObjectTextLength):
return False
string = text.getText(0, -1)
eocs = string.count(self._script.EMBEDDED_OBJECT_CHARACTER)
if eocs/text.characterCount < 0.05:
return True
return False
def _chunkPresentation(self, obj, arg=None):
"""Presents the chunk or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
full = messages.NO_MORE_CHUNKS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _chunkDialogData(self):
columnHeaders = [guilabels.SN_HEADER_OBJECT]
columnHeaders.append(guilabels.SN_HEADER_ROLE)
def rowData(obj):
return [self._getText(obj), self._getRoleName(obj)]
return guilabels.SN_TITLE_LARGE_OBJECT, columnHeaders, rowData
########################
# #
# Combo Boxes #
# #
########################
def _comboBoxBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst combo boxes.
"""
bindings = {}
prevDesc = cmdnames.COMBO_BOX_PREV
bindings["previous"] = ["c", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.COMBO_BOX_NEXT
bindings["next"] = ["c", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.COMBO_BOX_LIST
bindings["list"] = ["c", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _comboBoxCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating combo boxes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_COMBO_BOX]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _comboBoxPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a combo box.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_COMBO_BOX:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _comboBoxPresentation(self, obj, arg=None):
"""Presents the combo box or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_COMBO_BOXES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _comboBoxDialogData(self):
columnHeaders = [guilabels.SN_HEADER_COMBO_BOX]
columnHeaders.append(guilabels.SN_HEADER_SELECTED_ITEM)
def rowData(obj):
return [self._getLabel(obj), self._getText(obj)]
return guilabels.SN_TITLE_COMBO_BOX, columnHeaders, rowData
########################
# #
# Entries #
# #
########################
def _entryBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst entries.
"""
bindings = {}
prevDesc = cmdnames.ENTRY_PREV
bindings["previous"] = ["e", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.ENTRY_NEXT
bindings["next"] = ["e", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.ENTRY_LIST
bindings["list"] = ["e", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _entryCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating entries
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TEXT]
roleMatch = collection.MATCH_ANY
state = [pyatspi.STATE_FOCUSABLE,
pyatspi.STATE_SENSITIVE,
pyatspi.STATE_EDITABLE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _entryPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an entry.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TEXT]:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE) \
and state.contains(pyatspi.STATE_EDITABLE)
return isMatch
def _entryPresentation(self, obj, arg=None):
"""Presents the entry or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_ENTRIES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _entryDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LABEL]
columnHeaders.append(guilabels.SN_HEADER_TEXT)
def rowData(obj):
return [self._getLabel(obj), self._getText(obj)]
return guilabels.SN_TITLE_ENTRY, columnHeaders, rowData
########################
# #
# Form Fields #
# #
########################
def _formFieldBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst form fields.
"""
bindings = {}
prevDesc = cmdnames.FORM_FIELD_PREV
bindings["previous"] = ["Tab",
keybindings.ORCA_SHIFT_MODIFIER_MASK,
prevDesc]
nextDesc = cmdnames.FORM_FIELD_NEXT
bindings["next"] = ["Tab", keybindings.ORCA_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.FORM_FIELD_LIST
bindings["list"] = ["f", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _formFieldCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating form fields
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = self.FORM_ROLES
roleMatch = collection.MATCH_ANY
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _formFieldPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a form field.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not obj:
return False
role = obj.getRole()
if not role in self.FORM_ROLES:
return False
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
if role == pyatspi.ROLE_DOCUMENT_FRAME:
isMatch = isMatch and state.contains(pyatspi.STATE_EDITABLE)
return isMatch
def _formFieldPresentation(self, obj, arg=None):
"""Presents the form field or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
if obj.getRole() == pyatspi.ROLE_TEXT and obj.childCount:
obj = obj[0]
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_FORM_FIELDS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _formFieldDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LABEL]
columnHeaders.append(guilabels.SN_HEADER_ROLE)
columnHeaders.append(guilabels.SN_HEADER_VALUE)
def rowData(obj):
return [self._getLabel(obj),
self._getRoleName(obj),
self._getValue(obj)]
return guilabels.SN_TITLE_FORM_FIELD, columnHeaders, rowData
########################
# #
# Headings #
# #
########################
def _headingBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst headings.
"""
bindings = {}
prevDesc = cmdnames.HEADING_PREV
bindings["previous"] = ["h", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.HEADING_NEXT
bindings["next"] = ["h", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.HEADING_LIST
bindings["list"] = ["h", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
prevAtLevelBindings = []
nextAtLevelBindings = []
listAtLevelBindings = []
minLevel, maxLevel = self._headingLevels()
for i in range(minLevel, maxLevel + 1):
prevDesc = cmdnames.HEADING_AT_LEVEL_PREV % i
prevAtLevelBindings.append([str(i),
keybindings.SHIFT_MODIFIER_MASK,
prevDesc])
nextDesc = cmdnames.HEADING_AT_LEVEL_NEXT % i
nextAtLevelBindings.append([str(i),
keybindings.NO_MODIFIER_MASK,
nextDesc])
listDesc = cmdnames.HEADING_AT_LEVEL_LIST %i
listAtLevelBindings.append([str(i),
keybindings.SHIFT_ALT_MODIFIER_MASK,
listDesc])
bindings["previousAtLevel"] = prevAtLevelBindings
bindings["nextAtLevel"] = nextAtLevelBindings
bindings["listAtLevel"] = listAtLevelBindings
return bindings
def _headingLevels(self):
"""Returns the [minimum heading level, maximum heading level]
which should be navigable via structural navigation.
"""
return [1, 6]
def _headingCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating headings
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_HEADING]
attrs = []
if arg:
attrs.append('level:%d' % arg)
return MatchCriteria(collection,
roles=role,
objAttrs=attrs)
def _headingPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a heading.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_HEADING:
if arg:
isMatch = arg == self._script.utilities.headingLevel(obj)
else:
isMatch = True
return isMatch
def _headingPresentation(self, obj, arg=None):
"""Presents the heading or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
elif not arg:
full = messages.NO_MORE_HEADINGS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
else:
full = messages.NO_MORE_HEADINGS_AT_LEVEL % arg
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _headingDialogData(self, arg=None):
columnHeaders = [guilabels.SN_HEADER_HEADING]
if not arg:
title = guilabels.SN_TITLE_HEADING
columnHeaders.append(guilabels.SN_HEADER_LEVEL)
def rowData(obj):
return [self._getText(obj),
str(self._script.utilities.headingLevel(obj))]
else:
title = guilabels.SN_TITLE_HEADING_AT_LEVEL % arg
def rowData(obj):
return [self._getText(obj)]
return title, columnHeaders, rowData
########################
# #
# Images #
# #
########################
def _imageBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst images."""
bindings = {}
prevDesc = cmdnames.IMAGE_PREV
bindings["previous"] = ["g", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.IMAGE_NEXT
bindings["next"] = ["g", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.IMAGE_LIST
bindings["list"] = ["g", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _imageCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating images
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return MatchCriteria(collection, roles=self.IMAGE_ROLES)
def _imagePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an image.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return (obj and obj.getRole() in self.IMAGE_ROLES)
def _imagePresentation(self, obj, arg=None):
"""Presents the image/graphic or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
full = messages.NO_MORE_IMAGES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _imageDialogData(self):
columnHeaders = [guilabels.SN_HEADER_IMAGE]
def rowData(obj):
return [self._getText(obj) or self._getRoleName(obj)]
return guilabels.SN_TITLE_IMAGE, columnHeaders, rowData
########################
# #
# Landmarks #
# #
########################
def _landmarkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst landmarks.
"""
bindings = {}
prevDesc = cmdnames.LANDMARK_PREV
bindings["previous"] = ["m", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.LANDMARK_NEXT
bindings["next"] = ["m", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.LANDMARK_LIST
bindings["list"] = ["m", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _landmarkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating landmarks
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# NOTE: there is a limitation in the AT-SPI Collections interface
# when it comes to an attribute whose value can be a list. For
# example, the xml-roles attribute can be a space-separate list
# of roles. We'd like to make a match if the xml-roles attribute
# has one (or any) of the roles we care about. Instead, we're
# restricted to an exact match. So, the below will only work in
# the cases where the xml-roles attribute value consists solely of a
# single role. In practice, this seems to be the case that we run
# into for the landmark roles.
#
attrs = []
for landmark in settings.ariaLandmarks:
attrs.append('xml-roles:' + landmark)
return MatchCriteria(collection, objAttrs=attrs)
def _landmarkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a landmark.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj is None:
return False
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
try:
if set(attrs['xml-roles']).intersection(\
set(settings.ariaLandmarks)):
return True
else:
return False
except KeyError:
return False
def _landmarkPresentation(self, obj, arg=None):
"""Presents the landmark or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_LANDMARK_FOUND
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _landmarkDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LANDMARK]
def rowData(obj):
return [self._getText(obj)]
return guilabels.SN_TITLE_LANDMARK, columnHeaders, rowData
########################
# #
# Lists #
# #
########################
def _listBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst (un)ordered lists.
"""
bindings = {}
prevDesc = cmdnames.LIST_PREV
bindings["previous"] = ["l", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.LIST_NEXT
bindings["next"] = ["l", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.LIST_LIST
bindings["list"] = ["l", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _listCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating (un)ordered
lists by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LIST]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _listPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an (un)ordered list.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LIST:
isMatch = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _listPresentation(self, obj, arg=None):
"""Presents the (un)ordered list or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
speech.speak(self._script.speechGenerator.generateSpeech(obj))
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentLine(obj, characterOffset)
else:
full = messages.NO_MORE_LISTS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _listDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LIST]
def rowData(obj):
return [self._getText(obj)]
return guilabels.SN_TITLE_LIST, columnHeaders, rowData
########################
# #
# List Items #
# #
########################
def _listItemBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst items in an (un)ordered list.
"""
bindings = {}
prevDesc = cmdnames.LIST_ITEM_PREV
bindings["previous"] = ["i", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.LIST_ITEM_NEXT
bindings["next"] = ["i", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.LIST_ITEM_LIST
bindings["list"] = ["i", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _listItemCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating items in an
(un)ordered list by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LIST_ITEM]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _listItemPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an item in an (un)ordered list.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LIST_ITEM:
isMatch = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _listItemPresentation(self, obj, arg=None):
"""Presents the (un)ordered list item or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentLine(obj, characterOffset)
else:
full = messages.NO_MORE_LIST_ITEMS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _listItemDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LIST_ITEM]
def rowData(obj):
return [self._getText(obj)]
return guilabels.SN_TITLE_LIST_ITEM, columnHeaders, rowData
########################
# #
# Live Regions #
# #
########################
def _liveRegionBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst live regions.
"""
bindings = {}
prevDesc = cmdnames.LIVE_REGION_PREV
bindings["previous"] = ["d", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.LIVE_REGION_NEXT
bindings["next"] = ["d", keybindings.NO_MODIFIER_MASK, nextDesc]
desc = cmdnames.LIVE_REGION_LAST
bindings["last"] = ["y", keybindings.NO_MODIFIER_MASK, desc]
return bindings
def _liveRegionCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating live regions
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# Matches based on object attributes assume unique name-value pairs
# because pyatspi creates a dictionary from the list. In addition,
# wildcard matching is not possible. As a result, we cannot search
# for any object which has an attribute named container-live.
return MatchCriteria(collection, applyPredicate=True)
def _liveRegionPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a live region.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
regobjs = self._script.liveRegionManager.getLiveNoneObjects()
if self._script.liveRegionManager.matchLiveRegion(obj) or obj in regobjs:
isMatch = True
return isMatch
def _liveRegionPresentation(self, obj, arg=None):
"""Presents the live region or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_LIVE_REGIONS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
########################
# #
# Paragraphs #
# #
########################
def _paragraphBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst paragraphs.
"""
bindings = {}
prevDesc = cmdnames.PARAGRAPH_PREV
bindings["previous"] = ["p", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.PARAGRAPH_NEXT
bindings["next"] = ["p", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.PARAGRAPH_LIST
bindings["list"] = ["p", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _paragraphCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating paragraphs
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_PARAGRAPH]
return MatchCriteria(collection, roles=role, applyPredicate=True)
def _paragraphPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a paragraph.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
try:
text = obj.queryText()
# We're choosing 3 characters as the minimum because some
# paragraphs contain a single image or link and a text
# of length 2: An embedded object character and a space.
# We want to skip these.
#
isMatch = text.characterCount > 2
except:
pass
return isMatch
def _paragraphPresentation(self, obj, arg=None):
"""Presents the paragraph or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
full = messages.NO_MORE_PARAGRAPHS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _paragraphDialogData(self):
columnHeaders = [guilabels.SN_HEADER_PARAGRAPH]
def rowData(obj):
return [self._getText(obj)]
return guilabels.SN_TITLE_PARAGRAPH, columnHeaders, rowData
########################
# #
# Radio Buttons #
# #
########################
def _radioButtonBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst radio buttons.
"""
bindings = {}
prevDesc = cmdnames.RADIO_BUTTON_PREV
bindings["previous"] = ["r", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.RADIO_BUTTON_NEXT
bindings["next"] = ["r", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.RADIO_BUTTON_LIST
bindings["list"] = ["r", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _radioButtonCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating radio buttons
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_RADIO_BUTTON]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _radioButtonPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a radio button.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_RADIO_BUTTON:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _radioButtonPresentation(self, obj, arg=None):
"""Presents the radio button or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_RADIO_BUTTONS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _radioButtonDialogData(self):
columnHeaders = [guilabels.SN_HEADER_RADIO_BUTTON]
columnHeaders.append(guilabels.SN_HEADER_STATE)
def rowData(obj):
return [self._getLabel(obj), self._getState(obj)]
return guilabels.SN_TITLE_RADIO_BUTTON, columnHeaders, rowData
########################
# #
# Separators #
# #
########################
def _separatorBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst separators.
"""
bindings = {}
prevDesc = cmdnames.SEPARATOR_PREV
bindings["previous"] = ["s", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.SEPARATOR_NEXT
bindings["next"] = ["s", keybindings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _separatorCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating separators
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_SEPARATOR]
return MatchCriteria(collection, roles=role, applyPredicate=False)
def _separatorPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a separator.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return obj and obj.getRole() == pyatspi.ROLE_SEPARATOR
def _separatorPresentation(self, obj, arg=None):
"""Presents the separator or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
full = messages.NO_MORE_SEPARATORS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
########################
# #
# Tables #
# #
########################
def _tableBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst tables.
"""
bindings = {}
prevDesc = cmdnames.TABLE_PREV
bindings["previous"] = ["t", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.TABLE_NEXT
bindings["next"] = ["t", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.TABLE_LIST
bindings["list"] = ["t", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _tableCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating tables
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_TABLE]
return MatchCriteria(collection, roles=role, applyPredicate=True)
def _tablePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a table.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not (obj and obj.childCount and obj.getRole() == pyatspi.ROLE_TABLE):
return False
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
if attrs.get('layout-guess') == 'true':
return False
try:
return obj.queryTable().nRows > 0
except:
pass
return False
def _tablePresentation(self, obj, arg=None):
"""Presents the table or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
caption = self._getTableCaption(obj)
if caption:
self._script.presentMessage(caption)
self._script.presentMessage(self._getTableDescription(obj))
cell = obj.queryTable().getAccessibleAt(0, 0)
self.lastTableCell = [0, 0]
self._presentObject(cell, 0)
[cell, characterOffset] = self._getCaretPosition(cell)
self._setCaretPosition(cell, characterOffset)
else:
full = messages.NO_MORE_TABLES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _tableDialogData(self):
columnHeaders = [guilabels.SN_HEADER_CAPTION]
columnHeaders.append(guilabels.SN_HEADER_DESCRIPTION)
def rowData(obj):
return [self._getTableCaption(obj) or '',
self._getTableDescription(obj)]
return guilabels.SN_TITLE_TABLE, columnHeaders, rowData
########################
# #
# Table Cells #
# #
########################
def _tableCellBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating spatially amongst table cells.
"""
bindings = {}
desc = cmdnames.TABLE_CELL_LEFT
bindings["left"] = ["Left", keybindings.SHIFT_ALT_MODIFIER_MASK, desc]
desc = cmdnames.TABLE_CELL_RIGHT
bindings["right"] = ["Right", keybindings.SHIFT_ALT_MODIFIER_MASK, desc]
desc = cmdnames.TABLE_CELL_UP
bindings["up"] = ["Up", keybindings.SHIFT_ALT_MODIFIER_MASK, desc]
desc = cmdnames.TABLE_CELL_DOWN
bindings["down"] = ["Down", keybindings.SHIFT_ALT_MODIFIER_MASK, desc]
desc = cmdnames.TABLE_CELL_FIRST
bindings["first"] = ["Home", keybindings.SHIFT_ALT_MODIFIER_MASK, desc]
desc = cmdnames.TABLE_CELL_LAST
bindings["last"] = ["End", keybindings.SHIFT_ALT_MODIFIER_MASK, desc]
return bindings
def _tableCellCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating table cells
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_ROW_HEADER]
return MatchCriteria(collection, roles=role)
def _tableCellPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a table cell.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return (obj and obj.getRole() in [pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_ROW_HEADER,
pyatspi.ROLE_TABLE_CELL])
def _tableCellPresentation(self, cell, arg):
"""Presents the table cell or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not cell:
return
if settings.speakCellHeaders:
self._presentCellHeaders(cell, arg)
[obj, characterOffset] = self._getCaretPosition(cell)
self._setCaretPosition(obj, characterOffset)
self._script.updateBraille(obj)
blank = self._isBlankCell(cell)
if not blank:
self._presentObject(cell, 0)
else:
speech.speak(messages.BLANK)
if settings.speakCellCoordinates:
[row, col] = self.getCellCoordinates(cell)
self._script.presentMessage(messages.TABLE_CELL_COORDINATES \
% {"row" : row + 1, "column" : col + 1})
rowspan, colspan = self._script.utilities.rowAndColumnSpan(cell)
spanString = messages.cellSpan(rowspan, colspan)
if spanString and settings.speakCellSpan:
self._script.presentMessage(spanString)
########################
# #
# Unvisited Links #
# #
########################
def _unvisitedLinkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst unvisited links.
"""
bindings = {}
prevDesc = cmdnames.UNVISITED_LINK_PREV
bindings["previous"] = ["u", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.UNVISITED_LINK_NEXT
bindings["next"] = ["u", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.UNVISITED_LINK_LIST
bindings["list"] = ["u", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _unvisitedLinkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating unvisited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_VISITED]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
applyPredicate=True)
def _unvisitedLinkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an unvisited link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
state = obj.getState()
isMatch = not state.contains(pyatspi.STATE_VISITED) \
and state.contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _unvisitedLinkPresentation(self, obj, arg=None):
"""Presents the unvisited link or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_UNVISITED_LINKS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _unvisitedLinkDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LINK]
columnHeaders.append(guilabels.SN_HEADER_URI)
def rowData(obj):
return [self._getText(obj), self._script.utilities.uri(obj)]
return guilabels.SN_TITLE_UNVISITED_LINK, columnHeaders, rowData
########################
# #
# Visited Links #
# #
########################
def _visitedLinkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst visited links.
"""
bindings = {}
prevDesc = cmdnames.VISITED_LINK_PREV
bindings["previous"] = ["v", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.VISITED_LINK_NEXT
bindings["next"] = ["v", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.VISITED_LINK_LIST
bindings["list"] = ["v", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _visitedLinkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating visited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_VISITED, pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _visitedLinkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a visited link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_VISITED) \
and state.contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _visitedLinkPresentation(self, obj, arg=None):
"""Presents the visited link or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_VISITED_LINKS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _visitedLinkDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LINK]
columnHeaders.append(guilabels.SN_HEADER_URI)
def rowData(obj):
return [self._getText(obj), self._script.utilities.uri(obj)]
return guilabels.SN_TITLE_VISITED_LINK, columnHeaders, rowData
########################
# #
# Plain ol' Links #
# #
########################
def _linkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst links.
"""
bindings = {}
prevDesc = cmdnames.LINK_PREV
bindings["previous"] = ["k", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.LINK_NEXT
bindings["next"] = ["k", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.LINK_LIST
bindings["list"] = ["k", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _linkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating unvisited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _linkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
state = obj.getState()
isMatch = not state.contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _linkPresentation(self, obj, arg=None):
"""Presents the link or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
full = messages.NO_MORE_LINKS
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _linkDialogData(self):
columnHeaders = [guilabels.SN_HEADER_LINK]
columnHeaders.append(guilabels.SN_HEADER_STATE)
columnHeaders.append(guilabels.SN_HEADER_URI)
def rowData(obj):
return [self._getText(obj),
self._getState(obj),
self._script.utilities.uri(obj)]
return guilabels.SN_TITLE_LINK, columnHeaders, rowData
########################
# #
# Clickables #
# #
########################
def _clickableBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst "clickable" objects."""
bindings = {}
prevDesc = cmdnames.CLICKABLE_PREV
bindings["previous"] = ["a", keybindings.SHIFT_MODIFIER_MASK, prevDesc]
nextDesc = cmdnames.CLICKABLE_NEXT
bindings["next"] = ["a", keybindings.NO_MODIFIER_MASK, nextDesc]
listDesc = cmdnames.CLICKABLE_LIST
bindings["list"] = ["a", keybindings.SHIFT_ALT_MODIFIER_MASK, listDesc]
return bindings
def _clickableCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating clickables
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# TODO - JD: At the moment, matching via interface crashes Orca.
# Until that's addressed, we'll just use the predicate approach.
# See https://bugzilla.gnome.org/show_bug.cgi?id=734805.
return MatchCriteria(collection,
applyPredicate=True)
def _clickablePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a clickable.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return self._script.utilities.isClickableElement(obj)
def _clickablePresentation(self, obj, arg=None):
"""Presents the clickable or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
elif not arg:
full = messages.NO_MORE_CLICKABLES
brief = messages.STRUCTURAL_NAVIGATION_NOT_FOUND
self._script.presentMessage(full, brief)
def _clickableDialogData(self):
columnHeaders = [guilabels.SN_HEADER_CLICKABLE]
columnHeaders.append(guilabels.SN_HEADER_ROLE)
def rowData(obj):
return [self._getText(obj), self._getRoleName(obj)]
return guilabels.SN_TITLE_CLICKABLE, columnHeaders, rowData
| pvagner/orca | src/orca/structural_navigation.py | Python | lgpl-2.1 | 119,643 | [
"ORCA"
] | fa5fa5c7669593bed4c07f3033fccf33819e85e05227fd227bd4eeed2195aa5a |
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print i
1
2
>>> g = f()
>>> g.next()
1
>>> g.next()
2
"Falling off the end" stops the generator:
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> g.next() # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(g2())
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print "creator", r.next()
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print "caller", i
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = me.next()
... yield i
>>> me = g()
>>> me.next()
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print list(f1())
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(f2())
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> k.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division by zero
>>> k.next() # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print list(f())
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = range(1, 5)
>>> for k in range(len(seq) + 2):
... print "%d-combs of %s:" % (k, seq)
... for c in gcomb(seq, k):
... print " ", c
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<type 'function'>
>>> i = g()
>>> type(i)
<type 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
>>> from test.test_support import HAVE_DOCSTRINGS
>>> print(i.next.__doc__ if HAVE_DOCSTRINGS else 'x.next() -> the next value, or raise StopIteration')
x.next() -> the next value, or raise StopIteration
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<type 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
TypeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> me.next()
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return self.generator.next()
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.WichmannHill(42)
>>> while 1:
... for s in sets:
... print "%s->%s" % (s, s.find()),
... print
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print "merged", s1, "into", s2
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged D into G
A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged C into F
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged L into A
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
merged H into E
A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged B into E
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged J into G
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
merged E into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
merged M into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
merged I into K
A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
merged K into A
A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
merged F into A
A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
merged A into G
A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [g.next() for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = ints.next()
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = g.next()
... nh = h.next()
... while 1:
... if ng < nh:
... yield ng
... ng = g.next()
... elif ng > nh:
... yield nh
... nh = h.next()
... else:
... yield ng
... ng = g.next()
... nh = h.next()
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print firstn(result, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.next
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print [m235[j] for j in range(15*i, 15*(i+1))]
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def tail(g):
... g.next() # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print firstn(it, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def _fib():
... yield 1
... yield 2
... fibTail.next() # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
>>> def f():
... return 22
... yield 1
Traceback (most recent call last):
..
File "<doctest test.test_generators.__test__.syntax[0]>", line 3
SyntaxError: 'return' with argument inside generator
>>> def f():
... yield 1
... return 22
Traceback (most recent call last):
..
File "<doctest test.test_generators.__test__.syntax[1]>", line 3
SyntaxError: 'return' with argument inside generator
"return None" is not the same as "return" in a generator:
>>> def f():
... yield 1
... return None
Traceback (most recent call last):
..
File "<doctest test.test_generators.__test__.syntax[2]>", line 3
SyntaxError: 'return' with argument inside generator
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<type 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<type 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... lambda x: x # shouldn't trigger here
... return # or here
... def f(i):
... return 2*i # or here
... if 0:
... return 3 # but *this* sucks (line 8)
... if 0:
... yield 2 # because it's a generator (line 10)
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.syntax[24]>", line 10
SyntaxError: 'return' with argument inside generator
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print g.next()
0
>>> print g.next()
1
>>> print g.next()
2
>>> print g.next()
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.func_code
True
>>> g.next()
5
>>> g.next()
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.func_code
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().next
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1L << j) | # column ordinal
(1L << (n + i-j + n-1)) | # NW-SE ordinal
(1L << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print sep
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print "|" + "|".join(squares) + "|"
print sep
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print sep
for i in range(m):
row = squares[i]
print "|" + "|".join(row) + "|"
print sep
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print c
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print count, "solutions in all."
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
>>> from test.test_support import gc_collect
Sending a value into a started generator:
>>> def f():
... print (yield 1)
... yield 2
>>> g = f()
>>> g.next()
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<type 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> c.next()
>>> print seq
[]
>>> c.send(10)
>>> print seq
[10]
>>> c.send(10)
>>> print seq
[10, 20]
>>> c.send(10)
>>> print seq
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[21]>", line 1
SyntaxError: 'yield' outside function
>>> def f(): return lambda x=(yield): 1
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[22]>", line 1
SyntaxError: 'return' with argument inside generator
>>> def f(): x = yield = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[23]>", line 1
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[24]>", line 1
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
File "<doctest test.test_generators.__test__.coroutine[25]>", line 1
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print (yield)
... except ValueError,v:
... print "caught ValueError (%s)" % (v),
>>> import sys
>>> g = f()
>>> g.next()
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print g.gi_frame
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
>>> f().throw("abc") # throw on just-opened generator
Traceback (most recent call last):
...
TypeError: exceptions must be old-style classes or derived from BaseException, not str
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print "exiting"
>>> g = f()
>>> g.next()
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> g.next()
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print "exiting"
>>> g = f()
>>> g.next()
>>> del g; gc_collect()
exiting
>>> class context(object):
... def __enter__(self): pass
... def __exit__(self, *args): print 'exiting'
>>> def f():
... with context():
... yield
>>> g = f()
>>> g.next()
>>> del g; gc_collect()
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception: print 'except'
... finally: print 'finally'
>>> g = f()
>>> g.next()
>>> del g; gc_collect()
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, StringIO
>>> old, sys.stderr = sys.stderr, StringIO.StringIO()
>>> g = f()
>>> g.next()
>>> del g
>>> gc_collect()
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError: 'generator ignored GeneratorExit' in "
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<type 'generator'>
>>> def f(): x = yield
>>> type(f())
<type 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<type 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<type 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<type 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> from test.test_support import gc_collect
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def next(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = it.next()
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, StringIO
>>> old = sys.stderr
>>> try:
... sys.stderr = StringIO.StringIO()
... class Leaker:
... def __del__(self):
... raise RuntimeError
...
... l = Leaker()
... del l
... gc_collect()
... err = sys.stderr.getvalue().strip()
... err.startswith(
... "Exception RuntimeError: RuntimeError() in "
... )
... err.endswith("> ignored")
... len(err.splitlines())
... finally:
... sys.stderr = old
True
True
1
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import test_support, test_generators
test_support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| jedie/pypyjs-standalone | website/js/pypy.js-0.3.0/lib/modules/test/test_generators.py | Python | mit | 50,985 | [
"VisIt"
] | a4ca60ea1e759d77973514174ab0fe8369c0faef9369b20bc990e8b02ccf8efc |
# This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.test_depmgr
=========================
Test definitions for plainbox.impl.depmgr module
"""
from unittest import TestCase
from plainbox.impl.depmgr import DependencyCycleError
from plainbox.impl.depmgr import DependencyDuplicateError
from plainbox.impl.depmgr import DependencyMissingError
from plainbox.impl.depmgr import DependencySolver
from plainbox.impl.job import JobDefinition
from plainbox.impl.testing_utils import make_job
class DependencyCycleErrorTests(TestCase):
def setUp(self):
self.A = make_job("A", depends="B")
self.B = make_job("B", depends="A")
self.exc = DependencyCycleError([self.A, self.B, self.A])
def test_job_list(self):
self.assertEqual(self.exc.job_list, [self.A, self.B, self.A])
def test_affected_job(self):
self.assertIs(self.exc.affected_job, self.A)
def test_str(self):
expected = "dependency cycle detected: A -> B -> A"
observed = str(self.exc)
self.assertEqual(expected, observed)
def test_repr(self):
expected = ("<DependencyCycleError job_list:["
"<JobDefinition name:'A' plugin:'dummy'>, "
"<JobDefinition name:'B' plugin:'dummy'>, "
"<JobDefinition name:'A' plugin:'dummy'>]>")
observed = repr(self.exc)
self.assertEqual(expected, observed)
class DependencyMissingErrorTests(TestCase):
def setUp(self):
self.A = make_job("A")
self.exc_direct = DependencyMissingError(
self.A, 'B', DependencyMissingError.DEP_TYPE_DIRECT)
self.exc_resource = DependencyMissingError(
self.A, 'B', DependencyMissingError.DEP_TYPE_RESOURCE)
def test_job(self):
self.assertIs(self.exc_direct.job, self.A)
self.assertIs(self.exc_resource.job, self.A)
def test_affected_job(self):
self.assertIs(self.exc_direct.affected_job, self.A)
self.assertIs(self.exc_resource.affected_job, self.A)
def test_missing_job_name(self):
self.assertEqual(self.exc_direct.missing_job_name, 'B')
self.assertEqual(self.exc_resource.missing_job_name, 'B')
def test_str_direct(self):
expected = "missing dependency: 'B' (direct)"
observed = str(self.exc_direct)
self.assertEqual(expected, observed)
def test_str_resoucee(self):
expected = "missing dependency: 'B' (resource)"
observed = str(self.exc_resource)
self.assertEqual(expected, observed)
def test_repr_direct(self):
expected = ("<DependencyMissingError "
"job:<JobDefinition name:'A' plugin:'dummy'> "
"missing_job_name:'B' "
"dep_type:'direct'>")
observed = repr(self.exc_direct)
self.assertEqual(expected, observed)
def test_repr_resource(self):
expected = ("<DependencyMissingError "
"job:<JobDefinition name:'A' plugin:'dummy'> "
"missing_job_name:'B' "
"dep_type:'resource'>")
observed = repr(self.exc_resource)
self.assertEqual(expected, observed)
class DependencyDuplicateErrorTests(TestCase):
def setUp(self):
self.A = make_job("A")
self.another_A = make_job("A")
self.exc = DependencyDuplicateError(self.A, self.another_A)
def test_job(self):
self.assertIs(self.exc.job, self.A)
def test_duplicate_job(self):
self.assertIs(self.exc.duplicate_job, self.another_A)
def test_affected_job(self):
self.assertIs(self.exc.affected_job, self.A)
def test_str(self):
expected = "duplicate job name: 'A'"
observed = str(self.exc)
self.assertEqual(expected, observed)
def test_repr(self):
expected = ("<DependencyDuplicateError "
"job:<JobDefinition name:'A' plugin:'dummy'> "
"duplicate_job:<JobDefinition name:'A' plugin:'dummy'>>")
observed = repr(self.exc)
self.assertEqual(expected, observed)
class DependencySolverInternalsTests(TestCase):
def test_get_dependency_set_empty(self):
A = make_job('A')
expected = set()
observed = DependencySolver._get_dependency_set(A)
self.assertEqual(expected, observed)
def test_get_dependency_set_direct_one(self):
A = make_job('A', depends='B')
expected = set([("direct", 'B')])
observed = DependencySolver._get_dependency_set(A)
self.assertEqual(expected, observed)
def test_get_dependency_set_direct_two(self):
A = make_job('A', depends='B, C')
expected = set([("direct", 'B'), ("direct", 'C')])
observed = DependencySolver._get_dependency_set(A)
self.assertEqual(expected, observed)
def test_get_job_map_produces_map(self):
A = make_job('A')
B = make_job('B')
expected = {'A': A, 'B': B}
observed = DependencySolver._get_job_map([A, B])
self.assertEqual(expected, observed)
def test_get_job_map_find_duplicates(self):
A = make_job('A')
another_A = make_job('A')
with self.assertRaises(DependencyDuplicateError) as call:
DependencySolver._get_job_map([A, another_A])
self.assertIs(call.exception.job, A)
self.assertIs(call.exception.duplicate_job, another_A)
class TestDependencySolver(TestCase):
def test_empty(self):
observed = DependencySolver.resolve_dependencies([])
expected = []
self.assertEqual(expected, observed)
def test_direct_deps(self):
# This tests the following simple job chain
# A -> B -> C
A = make_job(name='A', depends='B')
B = make_job(name='B', depends='C')
C = make_job(name='C')
job_list = [A, B, C]
expected = [C, B, A]
observed = DependencySolver.resolve_dependencies(job_list)
self.assertEqual(expected, observed)
def test_independent_groups_deps(self):
# This tests two independent job chains
# A1 -> B1
# A2 -> B2
A1 = make_job(name='A1', depends='B1')
B1 = make_job(name='B1',)
A2 = make_job(name='A2', depends='B2')
B2 = make_job(name='B2')
job_list = [A1, B1, A2, B2]
expected = [B1, A1, B2, A2]
observed = DependencySolver.resolve_dependencies(job_list)
self.assertEqual(expected, observed)
def test_visiting_blackend_node(self):
# This tests a visit to already visited job
# A
# B -> A
# A will be visited twice
A = make_job(name='A')
B = make_job(name='B', depends='A')
job_list = [A, B]
expected = [A, B]
observed = DependencySolver.resolve_dependencies(job_list)
self.assertEqual(expected, observed)
def test_resource_deps(self):
# This tests resource deps
# A ~> R
A = make_job(name='A', requires='R.foo == "bar"')
R = make_job(name='R', plugin='resource')
job_list = [A, R]
expected = [R, A]
observed = DependencySolver.resolve_dependencies(job_list)
self.assertEqual(expected, observed)
def test_missing_direct_dependency(self):
# This tests missing dependencies
# A -> (inexisting B)
A = make_job(name='A', depends='B')
job_list = [A]
with self.assertRaises(DependencyMissingError) as call:
DependencySolver.resolve_dependencies(job_list)
self.assertIs(call.exception.job, A)
self.assertEqual(call.exception.missing_job_name, 'B')
self.assertEqual(call.exception.dep_type,
call.exception.DEP_TYPE_DIRECT)
def test_missing_resource_dependency(self):
# This tests missing resource dependencies
# A ~> (inexisting R)
A = make_job(name='A', requires='R.attr == "value"')
job_list = [A]
with self.assertRaises(DependencyMissingError) as call:
DependencySolver.resolve_dependencies(job_list)
self.assertIs(call.exception.job, A)
self.assertEqual(call.exception.missing_job_name, 'R')
self.assertEqual(call.exception.dep_type,
call.exception.DEP_TYPE_RESOURCE)
def test_dependency_cycle_self(self):
# This tests dependency loops
# A -> A
A = make_job(name='A', depends='A')
job_list = [A]
with self.assertRaises(DependencyCycleError) as call:
DependencySolver.resolve_dependencies(job_list)
self.assertEqual(call.exception.job_list, [A, A])
def test_dependency_cycle_simple(self):
# This tests dependency loops
# A -> B -> A
A = make_job(name='A', depends='B')
B = make_job(name='B', depends='A')
job_list = [A, B]
with self.assertRaises(DependencyCycleError) as call:
DependencySolver.resolve_dependencies(job_list)
self.assertEqual(call.exception.job_list, [A, B, A])
def test_dependency_cycle_longer(self):
# This tests dependency loops
# A -> B -> C -> D -> B
A = make_job(name='A', depends='B')
B = make_job(name='B', depends='C')
C = make_job(name='C', depends='D')
D = make_job(name='D', depends='B')
job_list = [A, B, C, D]
with self.assertRaises(DependencyCycleError) as call:
DependencySolver.resolve_dependencies(job_list)
self.assertEqual(call.exception.job_list, [B, C, D, B])
def test_dependency_cycle_via_resource(self):
# This tests dependency loops
# A -> R -> A
A = make_job(name='A', requires='R.key == "value"')
R = make_job(name='R', depends='A', plugin="resource")
job_list = [A, R]
with self.assertRaises(DependencyCycleError) as call:
DependencySolver.resolve_dependencies(job_list)
self.assertEqual(call.exception.job_list, [A, R, A])
| zyga/plainbox | plainbox/impl/test_depmgr.py | Python | gpl-3.0 | 10,748 | [
"VisIt"
] | 4f730ee2d378b837830ae2b77d617e7382334fec3e50f09bb6e7b30ddf5856db |
# -*- coding: utf-8 -*-
"""
sphinx.cmdline
~~~~~~~~~~~~~~
sphinx-build command-line handling.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import optparse
import traceback
from os import path
from six import text_type, binary_type
from docutils.utils import SystemMessage
from sphinx import __display_version__
from sphinx.errors import SphinxError
from sphinx.application import Sphinx
from sphinx.util import Tee, format_exception_cut_frames, save_traceback
from sphinx.util.console import red, nocolor, color_terminal
from sphinx.util.osutil import abspath, fs_encoding
from sphinx.util.pycompat import terminal_safe
USAGE = """\
Sphinx v%s
Usage: %%prog [options] sourcedir outdir [filenames...]
Filename arguments:
without -a and without filenames, write new and changed files.
with -a, write all files.
with filenames, write these.
""" % __display_version__
EPILOG = """\
For more information, visit <http://sphinx-doc.org/>.
"""
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
return usage
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
def main(argv):
if not color_terminal():
nocolor()
parser = optparse.OptionParser(USAGE, epilog=EPILOG, formatter=MyFormatter())
parser.add_option('--version', action='store_true', dest='version',
help='show version information and exit')
group = parser.add_option_group('General options')
group.add_option('-b', metavar='BUILDER', dest='builder', default='html',
help='builder to use; default is html')
group.add_option('-a', action='store_true', dest='force_all',
help='write all files; default is to only write new and '
'changed files')
group.add_option('-E', action='store_true', dest='freshenv',
help='don\'t use a saved environment, always read '
'all files')
group.add_option('-d', metavar='PATH', default=None, dest='doctreedir',
help='path for the cached environment and doctree files '
'(default: outdir/.doctrees)')
group.add_option('-j', metavar='N', default=1, type='int', dest='jobs',
help='build in parallel with N processes where possible')
# this option never gets through to this point (it is intercepted earlier)
# group.add_option('-M', metavar='BUILDER', dest='make_mode',
# help='"make" mode -- as used by Makefile, like '
# '"sphinx-build -M html"')
group = parser.add_option_group('Build configuration options')
group.add_option('-c', metavar='PATH', dest='confdir',
help='path where configuration file (conf.py) is located '
'(default: same as sourcedir)')
group.add_option('-C', action='store_true', dest='noconfig',
help='use no config file at all, only -D options')
group.add_option('-D', metavar='setting=value', action='append',
dest='define', default=[],
help='override a setting in configuration file')
group.add_option('-A', metavar='name=value', action='append',
dest='htmldefine', default=[],
help='pass a value into HTML templates')
group.add_option('-t', metavar='TAG', action='append',
dest='tags', default=[],
help='define tag: include "only" blocks with TAG')
group.add_option('-n', action='store_true', dest='nitpicky',
help='nit-picky mode, warn about all missing references')
group = parser.add_option_group('Console output options')
group.add_option('-v', action='count', dest='verbosity', default=0,
help='increase verbosity (can be repeated)')
group.add_option('-q', action='store_true', dest='quiet',
help='no output on stdout, just warnings on stderr')
group.add_option('-Q', action='store_true', dest='really_quiet',
help='no output at all, not even warnings')
group.add_option('-N', action='store_true', dest='nocolor',
help='do not emit colored output')
group.add_option('-w', metavar='FILE', dest='warnfile',
help='write warnings (and errors) to given file')
group.add_option('-W', action='store_true', dest='warningiserror',
help='turn warnings into errors')
group.add_option('-T', action='store_true', dest='traceback',
help='show full traceback on exception')
group.add_option('-P', action='store_true', dest='pdb',
help='run Pdb on exception')
# parse options
try:
opts, args = parser.parse_args(list(argv[1:]))
except SystemExit as err:
return err.code
# handle basic options
if opts.version:
print('Sphinx (sphinx-build) %s' % __display_version__)
return 0
# get paths (first and second positional argument)
try:
srcdir = abspath(args[0])
confdir = abspath(opts.confdir or srcdir)
if opts.noconfig:
confdir = None
if not path.isdir(srcdir):
print('Error: Cannot find source directory `%s\'.' % srcdir,
file=sys.stderr)
return 1
if not opts.noconfig and not path.isfile(path.join(confdir, 'conf.py')):
print('Error: Config directory doesn\'t contain a conf.py file.',
file=sys.stderr)
return 1
outdir = abspath(args[1])
if srcdir == outdir:
print('Error: source directory and destination directory are same.',
file=sys.stderr)
return 1
except IndexError:
parser.print_help()
return 1
except UnicodeError:
print(
'Error: Multibyte filename not supported on this filesystem '
'encoding (%r).' % fs_encoding, file=sys.stderr)
return 1
# handle remaining filename arguments
filenames = args[2:]
err = 0
for filename in filenames:
if not path.isfile(filename):
print('Error: Cannot find file %r.' % filename, file=sys.stderr)
err = 1
if err:
return 1
# likely encoding used for command-line arguments
try:
locale = __import__('locale') # due to submodule of the same name
likely_encoding = locale.getpreferredencoding()
except Exception:
likely_encoding = None
if opts.force_all and filenames:
print('Error: Cannot combine -a option and filenames.', file=sys.stderr)
return 1
if opts.nocolor:
nocolor()
doctreedir = abspath(opts.doctreedir or path.join(outdir, '.doctrees'))
status = sys.stdout
warning = sys.stderr
error = sys.stderr
if opts.quiet:
status = None
if opts.really_quiet:
status = warning = None
if warning and opts.warnfile:
try:
warnfp = open(opts.warnfile, 'w')
except Exception as exc:
print('Error: Cannot open warning file %r: %s' %
(opts.warnfile, exc), file=sys.stderr)
sys.exit(1)
warning = Tee(warning, warnfp)
error = warning
confoverrides = {}
for val in opts.define:
try:
key, val = val.split('=')
except ValueError:
print('Error: -D option argument must be in the form name=value.',
file=sys.stderr)
return 1
if likely_encoding and isinstance(val, binary_type):
try:
val = val.decode(likely_encoding)
except UnicodeError:
pass
confoverrides[key] = val
for val in opts.htmldefine:
try:
key, val = val.split('=')
except ValueError:
print('Error: -A option argument must be in the form name=value.',
file=sys.stderr)
return 1
try:
val = int(val)
except ValueError:
if likely_encoding and isinstance(val, binary_type):
try:
val = val.decode(likely_encoding)
except UnicodeError:
pass
confoverrides['html_context.%s' % key] = val
if opts.nitpicky:
confoverrides['nitpicky'] = True
app = None
try:
app = Sphinx(srcdir, confdir, outdir, doctreedir, opts.builder,
confoverrides, status, warning, opts.freshenv,
opts.warningiserror, opts.tags, opts.verbosity, opts.jobs)
app.build(opts.force_all, filenames)
return app.statuscode
except (Exception, KeyboardInterrupt) as err:
if opts.pdb:
import pdb
print(red('Exception occurred while building, starting debugger:'),
file=error)
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
else:
print(file=error)
if opts.verbosity or opts.traceback:
traceback.print_exc(None, error)
print(file=error)
if isinstance(err, KeyboardInterrupt):
print('interrupted!', file=error)
elif isinstance(err, SystemMessage):
print(red('reST markup error:'), file=error)
print(terminal_safe(err.args[0]), file=error)
elif isinstance(err, SphinxError):
print(red('%s:' % err.category), file=error)
print(terminal_safe(text_type(err)), file=error)
elif isinstance(err, UnicodeError):
print(red('Encoding error:'), file=error)
print(terminal_safe(text_type(err)), file=error)
tbpath = save_traceback(app)
print(red('The full traceback has been saved in %s, if you want '
'to report the issue to the developers.' % tbpath),
file=error)
elif isinstance(err, RuntimeError) and 'recursion depth' in str(err):
print(red('Recursion error:'), file=error)
print(terminal_safe(text_type(err)), file=error)
print(file=error)
print('This can happen with very large or deeply nested source '
'files. You can carefully increase the default Python '
'recursion limit of 1000 in conf.py with e.g.:', file=error)
print(' import sys; sys.setrecursionlimit(1500)', file=error)
else:
print(red('Exception occurred:'), file=error)
print(format_exception_cut_frames().rstrip(), file=error)
tbpath = save_traceback(app)
print(red('The full traceback has been saved in %s, if you '
'want to report the issue to the developers.' % tbpath),
file=error)
print('Please also report this if it was a user error, so '
'that a better error message can be provided next time.',
file=error)
print('A bug report can be filed in the tracker at '
'<https://github.com/sphinx-doc/sphinx/issues>. Thanks!',
file=error)
return 1
| neerajvashistha/pa-dude | lib/python2.7/site-packages/sphinx/cmdline.py | Python | mit | 11,788 | [
"VisIt"
] | 62c09af83dcb905068162ff2828b51b74b4dfb628f62997e9886fc384961c6f4 |
"""Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy, rk, r2k, gemm
from gpaw.utilities.lapack import diagonalize, general_diagonalize
from gpaw.utilities import unpack
from gpaw.eigensolvers.eigensolver import Eigensolver
class Davidson(Eigensolver):
"""Simple Davidson eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated.
Solution steps are:
* Subspace diagonalization
* Calculate all residuals
* Add preconditioned residuals to the subspace and diagonalize
"""
def __init__(self, niter=2):
Eigensolver.__init__(self)
self.niter = niter
def initialize(self, wfs):
Eigensolver.initialize(self, wfs)
self.overlap = wfs.overlap
# Allocate arrays
self.H_nn = np.zeros((self.nbands, self.nbands), self.dtype)
self.S_nn = np.zeros((self.nbands, self.nbands), self.dtype)
self.H_2n2n = np.empty((2 * self.nbands, 2 * self.nbands),
self.dtype)
self.S_2n2n = np.empty((2 * self.nbands, 2 * self.nbands),
self.dtype)
self.eps_2n = np.empty(2 * self.nbands)
def estimate_memory(self, mem, gd, dtype, mynbands, nbands):
Eigensolver.estimate_memory(self, mem, gd, dtype, mynbands, nbands)
itemsize = mem.itemsize[dtype]
mem.subnode('H_nn', nbands * nbands * mem.itemsize[dtype])
mem.subnode('S_nn', nbands * nbands * mem.itemsize[dtype])
mem.subnode('H_2n2n', 4 * nbands * nbands * mem.itemsize[dtype])
mem.subnode('S_2n2n', 4 * nbands * nbands * mem.itemsize[dtype])
mem.subnode('eps_2n', 2 * nbands * mem.floatsize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do Davidson iterations for the kpoint"""
niter = self.niter
nbands = self.nbands
self.subspace_diagonalize(hamiltonian, wfs, kpt)
H_2n2n = self.H_2n2n
S_2n2n = self.S_2n2n
eps_2n = self.eps_2n
psit2_nG = wfs.matrixoperator.suggest_temporary_buffer()
self.timer.start('Davidson')
R_nG = self.Htpsit_nG
self.calculate_residuals(kpt, wfs, hamiltonian, kpt.psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
for nit in range(niter):
H_2n2n[:] = 0.0
S_2n2n[:] = 0.0
error = 0.0
for n in range(nbands):
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if n < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * np.vdot(R_nG[n], R_nG[n]).real
H_2n2n[n,n] = kpt.eps_n[n]
S_2n2n[n,n] = 1.0
psit2_nG[n] = self.preconditioner(R_nG[n], kpt)
# Calculate projections
P2_ani = wfs.pt.dict(nbands)
wfs.pt.integrate(psit2_nG, P2_ani, kpt.q)
# Hamiltonian matrix
# <psi2 | H | psi>
wfs.kin.apply(psit2_nG, self.Htpsit_nG, kpt.phase_cd)
hamiltonian.apply_local_potential(psit2_nG, self.Htpsit_nG, kpt.s)
gemm(self.gd.dv, kpt.psit_nG, self.Htpsit_nG, 0.0, self.H_nn, 'c')
for a, P_ni in kpt.P_ani.items():
P2_ni = P2_ani[a]
dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
self.H_nn += np.dot(P2_ni, np.dot(dH_ii, P_ni.T.conj()))
self.gd.comm.sum(self.H_nn, 0)
H_2n2n[nbands:, :nbands] = self.H_nn
# <psi2 | H | psi2>
r2k(0.5 * self.gd.dv, psit2_nG, self.Htpsit_nG, 0.0, self.H_nn)
for a, P2_ni in P2_ani.items():
dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
self.H_nn += np.dot(P2_ni, np.dot(dH_ii, P2_ni.T.conj()))
self.gd.comm.sum(self.H_nn, 0)
H_2n2n[nbands:, nbands:] = self.H_nn
# Overlap matrix
# <psi2 | S | psi>
gemm(self.gd.dv, kpt.psit_nG, psit2_nG, 0.0, self.S_nn, "c")
for a, P_ni in kpt.P_ani.items():
P2_ni = P2_ani[a]
dO_ii = wfs.setups[a].dO_ii
self.S_nn += np.dot(P2_ni, np.inner(dO_ii, P_ni.conj()))
self.gd.comm.sum(self.S_nn, 0)
S_2n2n[nbands:, :nbands] = self.S_nn
# <psi2 | S | psi2>
rk(self.gd.dv, psit2_nG, 0.0, self.S_nn)
for a, P2_ni in P2_ani.items():
dO_ii = wfs.setups[a].dO_ii
self.S_nn += np.dot(P2_ni, np.dot(dO_ii, P2_ni.T.conj()))
self.gd.comm.sum(self.S_nn, 0)
S_2n2n[nbands:, nbands:] = self.S_nn
if self.gd.comm.rank == 0:
general_diagonalize(H_2n2n, eps_2n, S_2n2n)
self.gd.comm.broadcast(H_2n2n, 0)
self.gd.comm.broadcast(eps_2n, 0)
kpt.eps_n[:] = eps_2n[:nbands]
# Rotate psit_nG
gemm(1.0, kpt.psit_nG, H_2n2n[:nbands, :nbands],
0.0, self.Htpsit_nG)
gemm(1.0, psit2_nG, H_2n2n[:nbands, nbands:],
1.0, self.Htpsit_nG)
kpt.psit_nG, self.Htpsit_nG = self.Htpsit_nG, kpt.psit_nG
# Rotate P_uni:
for a, P_ni in kpt.P_ani.items():
P2_ni = P2_ani[a]
gemm(1.0, P_ni.copy(), H_2n2n[:nbands, :nbands], 0.0, P_ni)
gemm(1.0, P2_ni, H_2n2n[:nbands, nbands:], 1.0, P_ni)
if nit < niter - 1 :
wfs.kin.apply(kpt.psit_nG, self.Htpsit_nG, kpt.phase_cd)
hamiltonian.apply_local_potential(kpt.psit_nG, self.Htpsit_nG,
kpt.s)
R_nG = self.Htpsit_nG
self.calculate_residuals(kpt, wfs, hamiltonian, kpt.psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
self.timer.stop('Davidson')
error = self.gd.comm.sum(error)
return error
| qsnake/gpaw | gpaw/eigensolvers/davidson.py | Python | gpl-3.0 | 6,380 | [
"GPAW"
] | 8cf32b6f7bdfb4f16efc2e8693a1632d1343cd26784e020be3d6b90e316e316f |
#!/usr/bin/env python3
"""
Template by pypi-mobans
"""
import os
import sys
import codecs
import locale
import platform
from shutil import rmtree
from setuptools import Command, setup, find_packages
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
PY33 = sys.version_info < (3, 4)
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
# This work around is only if a project supports Python < 3.4
# Work around for locale not being set
try:
lc = locale.getlocale()
pf = platform.system()
if pf != "Windows" and lc == (None, None):
locale.setlocale(locale.LC_ALL, "C.UTF-8")
except (ValueError, UnicodeError, locale.Error):
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
NAME = "pyexcel"
AUTHOR = "chfw"
VERSION = "0.6.6"
EMAIL = "info@pyexcel.org"
LICENSE = "New BSD"
DESCRIPTION = (
"A wrapper library that provides one API to read, manipulate and write" +
"data in different excel formats"
)
URL = "https://github.com/pyexcel/pyexcel"
DOWNLOAD_URL = "%s/archive/0.6.6.tar.gz" % URL
FILES = ["README.rst", "CONTRIBUTORS.rst", "CHANGELOG.rst"]
KEYWORDS = [
"python",
'tsv',
'tsvz'
'csv',
'csvz',
'xls',
'xlsx',
'ods'
]
CLASSIFIERS = [
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'Development Status :: 3 - Alpha',
]
PYTHON_REQUIRES = ">=3.6"
INSTALL_REQUIRES = [
"lml>=0.0.4",
"pyexcel-io>=0.6.2",
"texttable>=0.8.2",
]
SETUP_COMMANDS = {}
PACKAGES = find_packages(exclude=["ez_setup", "examples", "tests", "tests.*"])
EXTRAS_REQUIRE = {
"xls": ['pyexcel-xls>=0.6.0'],
"xlsx": ['pyexcel-xlsx>=0.6.0'],
"ods": ['pyexcel-ods3>=0.6.0'],
}
# You do not need to read beyond this line
PUBLISH_COMMAND = "{0} setup.py sdist bdist_wheel upload -r pypi".format(sys.executable)
HERE = os.path.abspath(os.path.dirname(__file__))
GS_COMMAND = ("gease pyexcel v0.6.6 " +
"Find 0.6.6 in changelog for more details")
NO_GS_MESSAGE = ("Automatic github release is disabled. " +
"Please install gease to enable it.")
UPLOAD_FAILED_MSG = (
'Upload failed. please run "%s" yourself.' % PUBLISH_COMMAND)
class PublishCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package on github and pypi"
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds...")
rmtree(os.path.join(HERE, "dist"))
rmtree(os.path.join(HERE, "build"))
rmtree(os.path.join(HERE, "pyexcel.egg-info"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution...")
run_status = True
if has_gease():
run_status = os.system(GS_COMMAND) == 0
else:
self.status(NO_GS_MESSAGE)
if run_status:
if os.system(PUBLISH_COMMAND) != 0:
self.status(UPLOAD_FAILED_MSG)
sys.exit()
SETUP_COMMANDS.update({
"publish": PublishCommand
})
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, "r", "utf-8") as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith(".. testcode:"):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(" "):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ["|version|", "|today|"]:
if keyword in line:
break
else:
yield line
if __name__ == "__main__":
setup(
test_suite="tests",
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
python_requires=PYTHON_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=["nose"],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS,
cmdclass=SETUP_COMMANDS
)
| chfw/pyexcel | setup.py | Python | bsd-3-clause | 5,614 | [
"VisIt"
] | 30339fe3c47066a593292413d33c5209f3c92bf72b25b2b89780450f5bdcf944 |
# Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from collections import namedtuple
from pyang import plugin
_COPYRIGHT_NOTICE = """
// DO NOT EDIT
// generated by pyang using OpenConfig https://github.com/openconfig/public
//
// Copyright (C) 2014-2019 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by pyang. DO NOT EDIT.
"""
EQUAL_TYPE_LEAF = 0
EQUAL_TYPE_ARRAY = 1
EQUAL_TYPE_MAP = 2
EQUAL_TYPE_CONTAINER = 3
def pyang_plugin_init():
plugin.register_plugin(GolangPlugin())
class GolangPlugin(plugin.PyangPlugin):
def __init__(self, name=None):
super(GolangPlugin, self).__init__(name=name)
self.multiple_modules = True
def add_output_format(self, fmts):
fmts['golang'] = self
def emit(self, ctx, modules, fd):
ctx.golang_identity_map = {}
ctx.golang_typedef_map = {}
ctx.golang_struct_def = []
ctx.golang_struct_names = {}
ctx.emitted_type_names = {}
ctx.prefix_rel = {}
ctx.module_deps = []
for m in modules:
check_module_deps(ctx, m)
# visit yang statements
visit_modules(ctx)
# emit bgp_configs
emit_go(ctx, fd)
def visit_modules(ctx):
# visit typedef and identity
for mod in ctx.module_deps:
visit_typedef(ctx, mod)
visit_identity(ctx, mod)
# visit container
for mod in ctx.module_deps:
visit_children(ctx, mod, mod.i_children)
def emit_go(ctx, fd):
ctx.golang_struct_def.reverse()
done = set()
# emit
generate_header(fd)
generate_common_functions(fd)
for mod in ctx.module_deps:
if mod not in _module_excluded:
emit_typedef(ctx, mod, fd)
emit_identity(ctx, mod, fd)
for struct in ctx.golang_struct_def:
struct_name = struct.uniq_name
if struct_name in done:
continue
emit_class_def(ctx, struct, struct_name, struct.module_prefix, fd)
done.add(struct_name)
def check_module_deps(ctx, mod):
own_prefix = mod.i_prefix
for k, v in mod.i_prefixes.items():
mod = ctx.get_module(v[0])
if mod is None:
continue
if mod.i_prefix != own_prefix:
check_module_deps(ctx, mod)
ctx.prefix_rel[mod.i_prefix] = k
if (mod not in ctx.module_deps
and mod.i_modulename not in _module_excluded):
ctx.module_deps.append(mod)
def dig_leafref(type_obj):
reftype = type_obj.i_type_spec.i_target_node.search_one('type')
if is_leafref(reftype):
return dig_leafref(reftype)
else:
return reftype
def emit_class_def(ctx, stmt, struct_name, prefix, fd):
if len(stmt.i_children) == 1 and is_list(stmt.i_children[0]):
return
print('// struct for container %s:%s.' % (prefix, stmt.arg), file=fd)
emit_description(stmt, fd)
print('type %s struct {' % convert_to_golang(struct_name), file=fd)
equal_elems = []
for child in stmt.i_children:
if child.path in _path_exclude:
continue
container_or_list_name = child.uniq_name
val_name_go = convert_to_golang(child.arg)
child_prefix = get_orig_prefix(child.i_orig_module)
tag_name = child.uniq_name.lower()
equal_type = EQUAL_TYPE_LEAF
equal_data = None
print('// original -> %s:%s' % (child_prefix, container_or_list_name), file=fd)
# case leaf
if is_leaf(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
# case identityref
if is_identityref(type_obj):
emit_type_name = convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case leafref
elif is_leafref(type_obj):
if type_obj.search_one('path').arg.startswith('../config'):
continue
t = dig_leafref(type_obj)
if is_translation_required(t):
print('// %s:%s\'s original type is %s.' % (child_prefix, container_or_list_name, t.arg), file=fd)
emit_type_name = translate_type(t.arg)
elif is_identityref(t):
emit_type_name = convert_to_golang(t.search_one('base').arg.split(':')[-1])
else:
emit_type_name = t.arg
# case embeded enumeration
elif is_enum(type_obj):
emit_type_name = val_name_go
# case translation required
elif is_translation_required(type_obj):
print('// %s:%s\'s original type is %s.' % (child_prefix, container_or_list_name, type_name), file=fd)
emit_type_name = translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
# print(t.golang_name, file=sys.stderr)
emit_type_name = t.golang_name
# case 'case'
if is_case(child):
continue
if is_choice(child) and is_enum_choice(child):
emit_type_name = val_name_go
# case leaflist
if is_leaflist(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
val_name_go = val_name_go + 'List'
tag_name += '-list'
equal_type = EQUAL_TYPE_ARRAY
# case leafref
if is_leafref(type_obj):
t = dig_leafref(type_obj)
emit_type_name = '[]' + t.arg
# case identityref
elif is_identityref(type_obj):
emit_type_name = '[]' + convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case translation required
elif is_translation_required(type_obj):
print('// original type is list of %s' % type_obj.arg, file=fd)
emit_type_name = '[]' + translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = '[]' + type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
emit_type_name = '[]' + t.golang_name
# case container
elif is_container(child) or (is_choice(child) and not is_enum_choice(child)):
key = child_prefix + ':' + container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = t.golang_name
if len(t.i_children) == 1 and is_list(t.i_children[0]):
c = t.i_children[0]
emit_type_name = '[]' + c.golang_name
equal_type = EQUAL_TYPE_MAP
equal_data = c.search_one('key').arg
leaf = c.search_one('leaf').search_one('type')
if leaf.arg == 'leafref' and leaf.search_one('path').arg.startswith('../config'):
equal_data = 'config.' + equal_data
else:
emit_type_name = t.golang_name
equal_type = EQUAL_TYPE_CONTAINER
# case list
elif is_list(child):
key = child_prefix + ':' + container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = val_name_go + 'List'
tag_name += '-list'
emit_type_name = '[]' + t.golang_name
equal_type = EQUAL_TYPE_MAP
equal_data = child.search_one('key').arg
if is_container(child):
name = emit_type_name
if name.startswith(convert_to_golang(struct_name)) and name.endswith("Config"):
tag_name = 'config'
val_name_go = 'Config'
elif name.startswith(convert_to_golang(struct_name)) and name.endswith("State"):
tag_name = 'state'
val_name_go = 'State'
emit_description(child, fd=fd)
print(' {0}\t{1} `mapstructure:"{2}" json:"{2},omitempty"`'.format(val_name_go, emit_type_name, tag_name), file=fd)
equal_elems.append((val_name_go, emit_type_name, equal_type, equal_data))
print('}', file=fd)
if not struct_name.endswith('state'):
print('func (lhs *{0}) Equal(rhs *{0}) bool {{'.format(convert_to_golang(struct_name)), file=fd)
print('if lhs == nil || rhs == nil {', file=fd)
print('return false', file=fd)
print('}', file=fd)
for val_name, type_name, typ, elem in equal_elems:
if val_name == 'State':
continue
if typ == EQUAL_TYPE_LEAF:
if type_name == '[]byte':
print('if bytes.Compare(lhs.{0}, rhs.{0}) != 0 {{'.format(val_name), file=fd)
else:
print('if lhs.{0} != rhs.{0} {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
elif typ == EQUAL_TYPE_CONTAINER:
print('if !lhs.{0}.Equal(&(rhs.{0})) {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
elif typ == EQUAL_TYPE_ARRAY:
print('if len(lhs.{0}) != len(rhs.{0}) {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
print('for idx, l := range lhs.{0} {{'.format(val_name), file=fd)
if type_name == '[][]byte':
print('if bytes.Compare(l, rhs.{0}[idx]) != 0 {{'.format(val_name), file=fd)
else:
print('if l != rhs.{0}[idx] {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
print('}', file=fd)
elif typ == EQUAL_TYPE_MAP:
print('if len(lhs.{0}) != len(rhs.{0}) {{'.format(val_name), file=fd)
print('return false', file=fd)
print('}', file=fd)
print('{', file=fd)
print('lmap := make(map[string]*{0})'.format(type_name[2:]), file=fd)
print('for i, l := range lhs.{0} {{'.format(val_name), file=fd)
print('lmap[mapkey(i, string({0}))] = &lhs.{1}[i]'.format(' + '.join('l.{0}'.format(convert_to_golang(v)) for v in elem.split(' ')), val_name), file=fd)
print('}', file=fd)
print('for i, r := range rhs.{0} {{'.format(val_name), file=fd)
print('if l, y := lmap[mapkey(i, string({0}))]; !y {{'.format('+'.join('r.{0}'.format(convert_to_golang(v)) for v in elem.split(' '))), file=fd)
print('return false', file=fd)
print('} else if !r.Equal(l) {', file=fd)
print('return false', file=fd)
print('}', file=fd)
print('}', file=fd)
print('}', file=fd)
else:
sys.stderr.write("invalid equal type %s", typ)
print('return true', file=fd)
print('}', file=fd)
def get_orig_prefix(mod):
orig = mod.i_orig_module
if orig:
get_orig_prefix(orig)
else:
return mod.i_prefix
def get_path(c):
path = ''
if c.parent is not None:
p = ''
if hasattr(c, 'i_module'):
mod = c.i_module
prefix = mod.search_one('prefix')
if prefix:
p = prefix.arg + ":"
path = get_path(c.parent) + "/" + p + c.arg
return path
# define container embedded enums
def define_enum(ctx, mod, c):
prefix = mod.i_prefix
c.path = get_path(c)
c.golang_name = convert_to_golang(c.arg)
if prefix in ctx.golang_typedef_map:
ctx.golang_typedef_map[prefix][c.arg] = c
else:
ctx.golang_typedef_map[prefix] = {c.arg: c}
def visit_children(ctx, mod, children):
for c in children:
if is_case(c):
prefix = get_orig_prefix(c.parent.i_orig_module)
c.i_orig_module = c.parent.i_orig_module
else:
prefix = get_orig_prefix(c.i_orig_module)
c.uniq_name = c.arg
if c.arg == 'config':
c.uniq_name = c.parent.uniq_name + '-config'
elif c.arg == 'state':
c.uniq_name = c.parent.uniq_name + '-state'
elif c.arg == 'graceful-restart' and prefix == 'bgp-mp':
c.uniq_name = 'mp-graceful-restart'
if is_leaf(c) and is_enum(c.search_one('type')):
define_enum(ctx, mod, c)
elif is_list(c) or is_container(c) or is_choice(c):
c.golang_name = convert_to_golang(c.uniq_name)
if is_choice(c):
picks = pickup_choice(c)
c.i_children = picks
if is_enum_choice(c):
define_enum(ctx, mod, c)
continue
prefix_name = prefix + ':' + c.uniq_name
if prefix_name in ctx.golang_struct_names:
ext_c = ctx.golang_struct_names.get(prefix_name)
ext_c_child_count = len(getattr(ext_c, "i_children"))
current_c_child_count = len(getattr(c, "i_children"))
if ext_c_child_count < current_c_child_count:
c.module_prefix = prefix
ctx.golang_struct_names[prefix_name] = c
idx = ctx.golang_struct_def.index(ext_c)
ctx.golang_struct_def[idx] = c
else:
c.module_prefix = prefix
ctx.golang_struct_names[prefix_name] = c
ctx.golang_struct_def.append(c)
c.path = get_path(c)
# print(c.path, file=sys.stderr)
if hasattr(c, 'i_children'):
visit_children(ctx, mod, c.i_children)
def pickup_choice(c):
element = []
for child in c.i_children:
if is_case(child):
element = element + child.i_children
return element
def get_type_spec(stmt):
for s in stmt.substmts:
if hasattr(s, 'i_type_spec'):
return s.i_type_spec.name
return None
def visit_typedef(ctx, mod):
prefix = mod.i_prefix
child_map = {}
for stmt in mod.substmts:
if is_typedef(stmt):
stmt.path = get_path(stmt)
# print('stmt.path = "%s"' % stmt.path, file=sys.stderr)
name = stmt.arg
stmt.golang_name = convert_to_golang(name)
# print('stmt.golang_name = "%s"' % stmt.golang_name, file=sys.stderr)
child_map[name] = stmt
ctx.golang_typedef_map[prefix] = child_map
# print('ctx.golang_typedef_map["%s"] = %s' % (prefix, child_map), file=sys.stderr)
prefix_rel = ctx.prefix_rel[prefix]
ctx.golang_typedef_map[prefix_rel] = child_map
# print('ctx.golang_typedef_map["%s"] = %s' % (prefix_rel, child_map)), file=sys.stderr)
def visit_identity(ctx, mod):
prefix = mod.i_prefix
child_map = {}
for stmt in mod.substmts:
if is_identity(stmt):
name = stmt.arg
stmt.golang_name = convert_to_golang(name)
# print('stmt.golang_name = "%s"' % stmt.golang_name, file=sys.stderr)
child_map[name] = stmt
base = stmt.search_one('base')
if base:
base_name = base.arg
if ':' in base_name:
base_prefix, base_name = base_name.split(':', 1)
if base_prefix in ctx.golang_identity_map:
ctx.golang_identity_map[base_prefix][base_name].substmts.append(stmt)
else:
child_map[base_name].substmts.append(stmt)
ctx.golang_identity_map[prefix] = child_map
# print('ctx.golang_identity_map["%s"] = %s\n' % (prefix, child_map), file=sys.stderr)
prefix_rel = ctx.prefix_rel[prefix]
ctx.golang_identity_map[prefix_rel] = child_map
# print('ctx.golang_identity_map["%s"] = %s\n' % (prefix_rel, child_map), file=sys.stderr)
def lookup_identity(ctx, default_prefix, identity_name):
result = lookup(ctx.golang_identity_map, default_prefix, identity_name)
return result
def lookup_typedef(ctx, default_prefix, type_name):
result = lookup(ctx.golang_typedef_map, default_prefix, type_name)
return result
def lookup(basemap, default_prefix, key):
if ':' in key:
pref, name = key.split(':')
else:
pref = default_prefix
name = key
if pref in basemap:
return basemap[pref].get(name, None)
else:
return key
def emit_description(stmt, fd):
desc = stmt.search_one('description')
if desc is None:
return None
desc_words = desc.arg if desc.arg.endswith('.') else desc.arg + '.'
print('// %s' % desc_words.replace('\n', '\n// '), file=fd)
def emit_enum(prefix, name, stmt, substmts, fd):
type_name_org = name
type_name = stmt.golang_name
print('// typedef for identity %s:%s.' % (prefix, type_name_org), file=fd)
emit_description(stmt, fd)
print('type %s string' % type_name, file=fd)
const_prefix = convert_const_prefix(type_name_org)
print('const (', file=fd)
m = {}
if is_choice(stmt) and is_enum_choice(stmt):
n = namedtuple('Statement', ['arg'])
n.arg = 'none'
substmts = [n] + substmts
for sub in substmts:
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
m[sub.arg.lower()] = enum_name
print(' %s %s = "%s"' % (enum_name, type_name, sub.arg.lower()), file=fd)
print(')\n', file=fd)
print('var %sToIntMap = map[%s]int {' % (type_name, type_name), file=fd)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print(' %s: %d,' % (enum_name, i), file=fd)
print('}\n', file=fd)
print('var IntTo%sMap = map[int]%s {' % (type_name, type_name), file=fd)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print(' %d: %s,' % (i, enum_name), file=fd)
print('}\n', file=fd)
print('func (v %s) Validate() error {' % type_name, file=fd)
print('if _, ok := %sToIntMap[v]; !ok {' % type_name, file=fd)
print('return fmt.Errorf("invalid %s: %%s", v)' % type_name, file=fd)
print('}', file=fd)
print('return nil', file=fd)
print('}\n', file=fd)
if stmt.search_one('default'):
default = stmt.search_one('default')
print('func (v %s) Default() %s {' % (type_name, type_name), file=fd)
print('return %s' % m[default.arg.lower()], file=fd)
print('}\n', file=fd)
print('func (v %s) DefaultAsNeeded() %s {' % (type_name, type_name), file=fd)
print(' if string(v) == "" {', file=fd)
print(' return v.Default()', file=fd)
print('}', file=fd)
print(' return v', file=fd)
print('}', file=fd)
print('func (v %s) ToInt() int {' % type_name, file=fd)
print('_v := v.DefaultAsNeeded()')
print('i, ok := %sToIntMap[_v]' % type_name, file=fd)
else:
print('func (v %s) ToInt() int {' % type_name, file=fd)
print('i, ok := %sToIntMap[v]' % type_name, file=fd)
print('if !ok {', file=fd)
print('return -1', file=fd)
print('}', file=fd)
print('return i', file=fd)
print('}', file=fd)
def emit_typedef(ctx, mod, fd):
prefix = mod.i_prefix
t_map = ctx.golang_typedef_map[prefix]
for name, stmt in t_map.items():
if stmt.path in _typedef_exclude:
continue
# skip identityref type because currently skip identity
if get_type_spec(stmt) == 'identityref':
continue
type_name_org = name
type_name = stmt.golang_name
if type_name in ctx.emitted_type_names:
print("warning %s: %s has already been emitted from %s."
% (prefix + ":" + type_name_org, type_name_org, ctx.emitted_type_names[type_name]),
file=sys.stderr)
continue
ctx.emitted_type_names[type_name] = prefix + ":" + type_name_org
t = stmt.search_one('type')
if not t and is_choice(stmt):
emit_enum(prefix, type_name_org, stmt, stmt.i_children, fd)
elif is_enum(t):
emit_enum(prefix, type_name_org, stmt, t.substmts, fd)
elif is_union(t):
print('// typedef for typedef %s:%s.' % (prefix, type_name_org), file=fd)
emit_description(t, fd)
print('type %s string' % type_name, file=fd)
else:
if is_leafref(t):
t = dig_leafref(t)
print('// typedef for typedef %s:%s.' % (prefix, type_name_org), file=fd)
if is_builtin_type(t):
emit_description(t, fd)
print('type %s %s' % (type_name, t.arg), file=fd)
elif is_translation_required(t):
print('// %s:%s\'s original type is %s.' % (prefix, name, t.arg), file=fd)
emit_description(t, fd)
print('type %s %s' % (type_name, translate_type(t.arg)), file=fd)
else:
m = ctx.golang_typedef_map
for k in t.arg.split(':'):
m = m[k]
emit_description(t, fd)
print('type %s %s' % (type_name, m.golang_name), file=fd)
def emit_identity(ctx, mod, fd):
prefix = mod.i_prefix
i_map = ctx.golang_identity_map[prefix]
for name, stmt in i_map.items():
enums = stmt.search('identity')
if len(enums) > 0:
emit_enum(prefix, name, stmt, enums, fd)
def is_reference(s):
return s.arg in ['leafref', 'identityref']
def is_leafref(s):
return s.arg in ['leafref']
def is_identityref(s):
return s.arg in ['identityref']
def is_enum(s):
return s.arg in ['enumeration']
def is_union(s):
return s.arg in ['union']
def is_typedef(s):
return s.keyword in ['typedef']
def is_identity(s):
return s.keyword in ['identity']
def is_leaf(s):
return s.keyword in ['leaf']
def is_leaflist(s):
return s.keyword in ['leaf-list']
def is_list(s):
return s.keyword in ['list']
def is_container(s):
return s.keyword in ['container']
def is_case(s):
return s.keyword in ['case']
def is_choice(s):
return s.keyword in ['choice']
def is_enum_choice(s):
return all(e.search_one('type').arg in _type_enum_case for e in s.i_children)
_type_enum_case = [
'empty',
]
def is_builtin_type(t):
return t.arg in _type_builtin
def is_translation_required(t):
return t.arg in list(_type_translation_map.keys())
_type_translation_map = {
'union': 'string',
'decimal64': 'float64',
'boolean': 'bool',
'empty': 'bool',
'inet:ip-address': 'string',
'inet:ip-prefix': 'string',
'inet:ipv4-address': 'string',
'inet:as-number': 'uint32',
'bgp-set-community-option-type': 'string',
'inet:port-number': 'uint16',
'yang:timeticks': 'int64',
'ptypes:install-protocol-type': 'string',
'binary': '[]byte',
'route-family': 'bgp.RouteFamily',
'bgp-capability': 'bgp.ParameterCapabilityInterface',
'bgp-open-message': '*bgp.BGPMessage',
}
_type_builtin = [
"union",
"int8",
"int16",
"int32",
"int64",
"string",
"uint8",
"uint16",
"uint32",
"uint64",
]
_module_excluded = [
"ietf-inet-types",
"ietf-yang-types",
]
_path_exclude = [
"/rpol:routing-policy/rpol:defined-sets/rpol:neighbor-sets/rpol:neighbor-set/rpol:neighbor",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:community-sets/bgp-pol:community-set/bgp-pol:community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:ext-community-sets/bgp-pol:ext-community-set/bgp-pol:ext-community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:as-path-sets/bgp-pol:as-path-set/bgp-pol:as-path-set-member",
]
_typedef_exclude = [
"/gobgp:bgp-capability",
"/gobgp:bgp-open-message",
]
def generate_header(fd):
print(_COPYRIGHT_NOTICE, file=fd)
print('package config', file=fd)
print('', file=fd)
print('import (', file=fd)
print('"fmt"', file=fd)
print('', file=fd)
print('"github.com/osrg/gobgp/pkg/packet/bgp"', file=fd)
print(')', file=fd)
print('', file=fd)
def generate_common_functions(fd):
print('func mapkey(index int, name string) string {', file=fd)
print('if name != "" {', file=fd)
print('return name', file=fd)
print('}', file=fd)
print('return fmt.Sprintf("%v", index)', file=fd)
print('}', file=fd)
def translate_type(key):
if key in _type_translation_map.keys():
return _type_translation_map[key]
else:
return key
# 'hoge-hoge' -> 'HogeHoge'
def convert_to_golang(type_string):
a = type_string.split('.')
return '.'.join(''.join(t.capitalize() for t in x.split('-')) for x in a)
# 'hoge-hoge' -> 'HOGE_HOGE'
def convert_const_prefix(type_string):
return type_string.replace('-', '_').upper()
def chop_suf(s, suf):
if not s.endswith(suf):
return s
return s[:-len(suf)]
| j-kato/gobgp | tools/pyang_plugins/bgpyang2golang.py | Python | apache-2.0 | 26,619 | [
"VisIt"
] | 8d853b691a0d53b8c4a374db5b9d3c482b5182d288a3afcaf8b9ab24be0a62e1 |
tests = [("python", "SimilarityPickers.py", {}), ]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
| greglandrum/rdkit | rdkit/SimDivFilters/test_list.py | Python | bsd-3-clause | 224 | [
"RDKit"
] | 4ee5b64ceb2e19bffb55d5f608fbd710f07e2c56944ae7ed929cc1b4d86ab2f7 |
'''
Neuromuscular simulator in Python.
Copyright (C) 2018 Renato Naville Watanabe
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact: renato.watanabe@ufabc.edu.br
'''
from CompartmentNoChannel import CompartmentNoChannel
import numpy as np
from AxonDelay import AxonDelay
import math
from scipy.sparse import lil_matrix
import time
def calcGCoupling(cytR, lComp1, lComp2, dComp1, dComp2):
'''
Calculates the coupling conductance between two compartments.
- Inputs:
+ **cytR**: Cytoplasmatic resistivity in \f$\Omega\f$.cm.
+ **lComp1, lComp2**: length of the compartments in \f$\mu\f$m.
+ **dComp1, dComp2**: diameter of the compartments in \f$\mu\f$m.
- Output:
+ coupling conductance in \f$\mu\f$S.
The coupling conductance between compartment 1 and 2 is
computed by the following equation:
\f{equation}{
g_c = \frac{2.10^2}{\frac{R_{cyt}l_1}{\pi r_1^2}+\frac{R_{cyt}l_2}{\pi r_2^2}}
\f}
where \f$g_c\f$ is the coupling conductance [\f$\mu\f$S], \f$R_{cyt}\f$ is the
cytoplasmatic resistivity [\f$\Omega\f$.cm], \f$l_1\f$ and \f$l_2\f$
are the lengths [\f$\mu\f$m] of compartments 1 and 2, respectively and
\f$r_1\f$ and \f$r_2\f$ are the radius [\f$\mu\f$m] of compartments 1 and
2, respectively.
'''
rAxis1 = (cytR * lComp1) / (math.pi * math.pow(dComp1/2.0, 2))
rAxis2 = (cytR * lComp2) / (math.pi * math.pow(dComp2/2.0, 2))
return 200 / (rAxis1 + rAxis2)
def compGCouplingMatrix(gc):
'''
Computes the Coupling Matrix to be used in the dVdt function of the N compartments of the motor unit.
The Matrix uses the values obtained with the function calcGcoupling.
- Inputs:
+ **gc**: the vector with N elements, with the coupling conductance of each compartment of the Motor Unit.
- Output:
+ the GC matrix
\f{equation}{
GC = \left[\begin{array}{cccccccc}
-g_c[0]&g_c[0]&0&...&...&0&0&0\\
g_c[0]&-g_c[0]-g_c[1]&g_c[1]&0&...&...&0&0\\
\vdots&&\ddots&&...&&0&0 \\
0&...&g_c[i-1]&-g_c[i-1]-g_c[i]&g_c[i]&0&...&0\\
0&0&0&...&...&&&0\\
0&&...&&g_c[N-2]&-g_c[N-2]-g_c[N-1]&g_c[N-1]&0\\
0&...&0&&&0&g_c[N-1]&-g_c[N-1]\end{array}\right]
\f}
'''
GC = np.zeros((len(gc),len(gc)))
for i in xrange(0, len(gc)):
if i == 0:
GC[i,i:i+2] = [-gc[i], gc[i]]
elif i == len(gc) - 1:
GC[i,i-1:i+1] = [gc[i-1], -gc[i-1]]
else:
GC[i,i-1:i+2] = [gc[i-1], -gc[i-1]-gc[i], gc[i]]
return GC
#@profile
def runge_kutta(derivativeFunction, t, x, timeStep, timeStepByTwo, timeStepBySix):
'''
Function to implement the fourth order Runge-Kutta Method to solve numerically a
differential equation.
- Inputs:
+ **derivativeFunction**: function that corresponds to the derivative of the differential equation.
+ **t**: current instant.
+ **x**: current state value.
+ **timeStep**: time step of the solution of the differential equation, in the same unit of t.
+ **timeStepByTwo**: timeStep divided by two, for computational efficiency.
+ **timeStepBySix**: timeStep divided by six, for computational efficiency.
This method is intended to solve the following differential equation:
\f{equation}{
\frac{dx(t)}{dt} = f(t, x(t))
\f}
First, four derivatives are computed:
\f{align}{
k_1 &= f(t,x(t))\\
k_2 &= f(t+\frac{\Delta t}{2}, x(t) + \frac{\Delta t}{2}.k_1)\\
k_3 &= f(t+\frac{\Delta t}{2}, x(t) + \frac{\Delta t}{2}.k_2)\\
k_4 &= f(t+\Delta t, x(t) + \Delta t.k_3)
\f}
where \f$\Delta t\f$ is the time step of the numerical solution of the
differential equation.
Then the value of \f$x(t+\Delta t)\f$ is computed with:
\f{equation}{
x(t+\Delta t) = x(t) + \frac{\Delta t}{6}(k_1 + 2k_2 + 2k_3+k_4)
\f}
'''
k1 = derivativeFunction(t, x)
k2 = derivativeFunction(t + timeStepByTwo, x + timeStepByTwo * k1)
k3 = derivativeFunction(t + timeStepByTwo, x + timeStepByTwo * k2)
k4 = derivativeFunction(t + timeStep, x + timeStep * k3)
return x + timeStepBySix * (k1 + k2 + k2 + k3 + k3 + k4)
#return x + timeStep * (k1)
class MotorUnitNoChannel(object):
'''
Class that implements a motor unit model. Encompasses a motoneuron
and a muscle unit.
'''
def __init__(self, conf, pool, index, kind, muscleThickness, skinThickness):
'''
Constructor
- Inputs:
+ **conf**: Configuration object with the simulation parameters.
+ **pool**: string with Motor unit pool to which the motor
unit belongs.
+ **index**: integer corresponding to the motor unit order in
the pool, according to the Henneman's principle (size principle).
+ **kind**: string with the type of the motor unit. It can
be *S* (slow), *FR* (fast and resistant), and
*FF* (fast and fatigable).
'''
## Configuration object with the simulation parameters.
self.conf = conf
## String with the type of the motor unit. It can be
## *S* (slow), *FR* (fast and resistant) and
## *FF** (fast and fatigable).
self.kind = kind
self.pool = pool
# Neural compartments
## The instant of the last spike of the Motor unit
## at the Soma compartment.
self.tSomaSpike = float("-inf")
NumberOfAxonNodes = int(conf.parameterSet('NumberAxonNodes', pool, index))
compartmentsList = ['dendrite', 'soma']
for i in xrange(0, NumberOfAxonNodes):
compartmentsList.append('internode')
compartmentsList.append('node')
## Integer corresponding to the motor unit order in the pool, according to the Henneman's principle (size principle).
self.index = int(index)
## Dictionary of Compartment of the Motor Unit.
self.compartment = dict()
## Value of the membrane potential, in mV, that is considered a spike.
self.threshold_mV = conf.parameterSet('threshold', pool, index)
## Anatomical position of the neuron, in mm.
self.position_mm = conf.parameterSet('position', pool, index)
# EMG data
self.MUSpatialDistribution = conf.parameterSet('MUSpatialDistribution',pool, 0)
if self.MUSpatialDistribution == 'random':
radius = (muscleThickness/2) * np.random.uniform(0.0, 1.0)
angle = 2.0 * math.pi * np.random.uniform(0.0, 1.0)
x = radius * math.sin(angle)
y = radius * math.cos(angle)
## Anatomical coordinate of the muscle unit in a muscle section, in (mm,mm).
self.muSectionPosition_mm = [x,y]
## Distance of the MU to the EMG elctrode, in mm.
self.distance_mm = math.sqrt((x + muscleThickness/2.0 +
skinThickness)**2 + y**2)
## Attenuation of the MUAP amplitude, as measured in the electrode.
self.attenuationToSkin = math.exp(-self.distance_mm / conf.EMGAttenuation_mm1)
## Widening of the MUAP duration, as measured in the electrode.
self.timeWidening = 1 + conf.EMGWidening_mm1 * self.distance_mm
## Type of the Hermitez-Rodiguez curve. It can be 1 or 2.
self.hrType = np.random.random_integers(1,2)
## MUAP amplitude in mV.
self.ampEMG_mV = conf.parameterSet('EMGAmplitude', pool, index)
self.ampEMG_mV = self.ampEMG_mV * self.attenuationToSkin
## MUAP time constant, in ms.
self.timeCteEMG_ms = conf.parameterSet('EMGDuration', pool, index)
self.timeCteEMG_ms = self.timeCteEMG_ms * self.timeWidening
for i in xrange(len(compartmentsList)):
self.compartment[i] = CompartmentNoChannel(compartmentsList[i], conf, pool, index, self.kind)
## Number of compartments.
self.compNumber = len(self.compartment)
## Vector with membrane potential,in mV, of all compartments.
self.v_mV = np.zeros((self.compNumber), dtype = np.float64)
## Vector with the last instant of spike of all compartments.
self.tSpikes = np.zeros((self.compNumber), dtype = np.float64)
gCoupling_muS = np.zeros_like(self.v_mV, dtype = 'd')
for i in xrange(len(self.compartment)-1):
gCoupling_muS[i] = calcGCoupling(float(conf.parameterSet('cytR',pool, index)),
self.compartment[i].length_mum,
self.compartment[i + 1].length_mum,
self.compartment[i].diameter_mum,
self.compartment[i + 1].diameter_mum)
gLeak = np.zeros_like(self.v_mV, dtype = 'd')
capacitance_nF = np.zeros_like(self.v_mV, dtype = 'd')
EqPot = np.zeros_like(self.v_mV, dtype = 'd')
IPump = np.zeros_like(self.v_mV, dtype = 'd')
compLength = np.zeros_like(self.v_mV, dtype = 'd')
for i in xrange(len(self.compartment)):
capacitance_nF[i] = self.compartment[i].capacitance_nF
gLeak[i] = self.compartment[i].gLeak_muS
EqPot[i] = self.compartment[i].EqPot_mV
IPump[i] = self.compartment[i].IPump_nA
compLength[i] = self.compartment[i].length_mum
self.v_mV[i] = self.compartment[i].EqPot_mV
## Vector with the inverse of the capacitance of all compartments.
self.capacitanceInv = 1.0 / capacitance_nF
## Vector with current, in nA, of each compartment coming from other elements of the model. For example
## from ionic channels and synapses.
self.iIonic = np.full_like(self.v_mV, 0.0)
## Vector with the current, in nA, injected in each compartment.
self.iInjected = np.zeros_like(self.v_mV, dtype = 'd')
#self.iInjected = np.array([0, 10.0])
GC = compGCouplingMatrix(gCoupling_muS)
GL = -np.diag(gLeak)
## Matrix of the conductance of the motoneuron. Multiplied by the vector self.v_mV,
## results in the passive currents of each compartment.
self.G = np.float64(GC + GL)
self.EqCurrent_nA = np.dot(-GL, EqPot) + IPump
## index of the soma compartment.
self.somaIndex = compartmentsList.index('soma')
## index of the last compartment.
self.lastCompIndex = self.compNumber - 1
## Refractory period, in ms, of the motoneuron.
self.MNRefPer_ms = float(conf.parameterSet('MNSomaRefPer', pool, index))
# delay
## String with type of the nerve. It can be PTN (posterior tibial nerve) or CPN
## (common peroneal nerve).
if pool == 'SOL' or pool == 'MG' or pool == 'LG':
self.nerve = 'PTN'
elif pool == 'TA':
self.nerve = 'CPN'
## Distance, in m, of the stimulus position to the terminal.
self.stimulusPositiontoTerminal = float(conf.parameterSet('stimDistToTerm_' + self.nerve, pool, index))
## AxonDelay object of the motor unit.
if NumberOfAxonNodes == 0:
dynamicNerveLength = 0
else:
dynamicNerveLength = np.sum(compLength[2:-1]) * 1e-6
self.nerveLength = float(conf.parameterSet('nerveLength_' + self.nerve, pool, index))
delayLength = self.nerveLength - dynamicNerveLength
if self.stimulusPositiontoTerminal < delayLength:
self.Delay = AxonDelay(conf, self.nerve, pool, delayLength, self.stimulusPositiontoTerminal, index)
self.stimulusCompartment = 'delay'
else:
self.Delay = AxonDelay(conf, self.nerve, pool, delayLength, -1, index)
self.stimulusCompartment = -1
# Nerve stimulus function
self.stimulusMeanFrequency_Hz = float(conf.parameterSet('stimFrequency_' + self.nerve, pool, 0))
self.stimulusPulseDuration_ms = float(conf.parameterSet('stimPulseDuration_' + self.nerve, pool, 0))
self.stimulusIntensity_mA = float(conf.parameterSet('stimIntensity_' + self.nerve, pool, 0))
self.stimulusStart_ms = float(conf.parameterSet('stimStart_' + self.nerve, pool, 0))
self.stimulusStop_ms = float(conf.parameterSet('stimStop_' + self.nerve, pool, 0))
self.stimulusModulationStart_ms = float(conf.parameterSet('stimModulationStart_' + self.nerve, pool, 0))
self.stimulusModulationStop_ms = float(conf.parameterSet('stimModulationStop_' + self.nerve, pool, 0))
exec 'def axonStimModulation(t): return ' + conf.parameterSet('stimModulation_' + self.nerve, pool, 0)
startStep = int(np.rint(self.stimulusStart_ms / self.conf.timeStep_ms))
self.axonStimModulation = axonStimModulation
## Vector with the nerve stimulus, in mA.
self.nerveStimulus_mA = np.zeros((int(np.rint(conf.simDuration_ms/conf.timeStep_ms)), 1), dtype = float)
for i in xrange(len(self.nerveStimulus_mA)):
if (i * self.conf.timeStep_ms >= self.stimulusStart_ms and i * self.conf.timeStep_ms <= self.stimulusStop_ms):
if (i * self.conf.timeStep_ms > self.stimulusModulationStart_ms and i * self.conf.timeStep_ms < self.stimulusModulationStop_ms):
stimulusFrequency_Hz = self.stimulusMeanFrequency_Hz + axonStimModulation(i * self.conf.timeStep_ms)
else:
stimulusFrequency_Hz = self.stimulusMeanFrequency_Hz
if stimulusFrequency_Hz > 0:
stimulusPeriod_ms = 1000.0 / stimulusFrequency_Hz
numberOfSteps = int(np.rint(stimulusPeriod_ms / self.conf.timeStep_ms))
if ((i - startStep) % numberOfSteps == 0):
self.nerveStimulus_mA[i:int(np.rint(i+self.stimulusPulseDuration_ms / self.conf.timeStep_ms))] = self.stimulusIntensity_mA
#
## Vector with the instants of spikes at the soma.
self.somaSpikeTrain = []
## Vector with the instants of spikes at the last compartment.
self.lastCompSpikeTrain = []
## Vector with the instants of spikes at the terminal.
self.terminalSpikeTrain = []
# contraction DataMUnumber_S = int(conf.parameterSet('MUnumber_S_' + pool, pool, 0))
activationModel = conf.parameterSet('activationModel', pool, 0)
## Contraction time of the twitch muscle unit, in ms.
self.TwitchTc_ms = conf.parameterSet('twitchTimePeak', pool, index)
## Amplitude of the muscle unit twitch, in N.
self.TwitchAmp_N = conf.parameterSet('twitchPeak', pool, index)
## Parameter of the saturation.
self.bSat = conf.parameterSet('bSat'+ activationModel,pool,index)
## Twitch- tetanus relationship
self.twTet = conf.parameterSet('twTet' + activationModel,pool,index)
## EMG data
## Build synapses
self.SynapsesOut = []
self.transmitSpikesThroughSynapses = []
self.indicesOfSynapsesOnTarget = []
def atualizeMotorUnit(self, t, v_mV):
'''
Atualize the dynamical and nondynamical (delay) parts of the motor unit.
- Inputs:
+ **t**: current instant, in ms.
'''
self.atualizeCompartments(t, v_mV)
self.atualizeDelay(t)
#@profile
def atualizeCompartments(self, t, v_mV):
'''
Atualize all neural compartments.
- Inputs:
+ **t**: current instant, in ms.
'''
self.v_mV[:] = v_mV
for i in xrange(self.somaIndex, self.compNumber):
if self.v_mV[i] > self.threshold_mV and t-self.tSpikes[i] > self.MNRefPer_ms:
self.addCompartmentSpike(t, i)
self.v_mV[i] = -10
#@profile
def addCompartmentSpike(self, t, comp):
'''
When the soma potential is above the threshold a spike is added tom the soma.
- Inputs:
+ **t**: current instant, in ms.
+ **comp**: integer with the compartment index.
'''
self.tSpikes[comp] = t
if comp == self.somaIndex:
self.somaSpikeTrain.append([t, int(self.index)])
self.transmitSpikes(t)
if comp == self.lastCompIndex:
self.lastCompSpikeTrain.append([t, int(self.index)])
self.Delay.addSpinalSpike(t)
def atualizeDelay(self, t):
'''
Atualize the terminal spike train, by considering the Delay of the nerve.
- Inputs:
+ **t**: current instant, in ms.
'''
if -1e-3 < (t - self.Delay.terminalSpikeTrain) < 1e-3:
self.terminalSpikeTrain.append([t, self.index])
# Check whether there is antidromic impulse reaching soma or RC
if self.Delay.indexAntidromicSpike < len(self.Delay.antidromicSpikeTrain) and -1e-2 < (t - self.Delay.antidromicSpikeTrain[self.Delay.indexAntidromicSpike]) < 1e-2:
# Considers only MN-RC connections
self.transmitSpikes(t)
# Refractory period of MN soma
if t-self.tSpikes[self.somaIndex] > self.MNRefPer_ms:
self.tSpikes[self.somaIndex] = t
self.somaSpikeTrain.append([t, int(self.index)])
self.Delay.indexAntidromicSpike += 1
for channel in self.compartment[self.somaIndex].Channels:
for channelState in channel.condState: channelState.changeState(t)
if self.stimulusCompartment == 'delay':
self.Delay.atualizeStimulus(t, self.nerveStimulus_mA[int(np.rint(t/self.conf.timeStep_ms))])
def transmitSpikes(self, t):
'''
- Inputs:
+ **t**: current instant, in ms.
'''
for i in xrange(len(self.indicesOfSynapsesOnTarget)):
self.transmitSpikesThroughSynapses[i].receiveSpike(t, self.indicesOfSynapsesOnTarget[i])
def getEMG(self, t):
'''
'''
emg = 0
numberOfSpikesUntilt = []
ta = 0
if (len(self.terminalSpikeTrain) == 0):
emg = 0
else:
for spike in self.terminalSpikeTrain:
if spike[0] < t:
numberOfSpikesUntilt.append(spike[0])
for spikeInstant in numberOfSpikesUntilt:
ta = t - spikeInstant - 3 * self.timeCteEMG_ms
if (ta <= 6 * self.timeCteEMG_ms):
if (self.hrType == 1):
emg += 1.19 * self.ampEMG_mV * ta * math.exp(-(ta/self.timeCteEMG_ms)**2) / self.timeCteEMG_ms
elif (self.hrType == 2):
emg += 0.69 * self.ampEMG_mV * (1 - 2*((ta / self.timeCteEMG_ms)**2)) * math.exp(-(ta/self.timeCteEMG_ms)**2)
return emg
def createStimulus(self):
'''
'''
self.stimulusMeanFrequency_Hz = float(self.conf.parameterSet('stimFrequency_' + self.nerve, self.pool, 0))
self.stimulusPulseDuration_ms = float(self.conf.parameterSet('stimPulseDuration_' + self.nerve, self.pool, 0))
self.stimulusIntensity_mA = float(self.conf.parameterSet('stimIntensity_' + self.nerve, self.pool, 0))
self.stimulusStart_ms = float(self.conf.parameterSet('stimStart_' + self.nerve, self.pool, 0))
self.stimulusStop_ms = float(self.conf.parameterSet('stimStop_' + self.nerve, self.pool, 0))
self.stimulusModulationStart_ms = float(self.conf.parameterSet('stimModulationStart_' + self.nerve, self.pool, 0))
self.stimulusModulationStop_ms = float(self.conf.parameterSet('stimModulationStop_' + self.nerve, self.pool, 0))
startStep = int(np.rint(self.stimulusStart_ms / self.conf.timeStep_ms))
for i in xrange(len(self.nerveStimulus_mA)):
if (i * self.conf.timeStep_ms >= self.stimulusStart_ms and i * self.conf.timeStep_ms <= self.stimulusStop_ms):
if (i * self.conf.timeStep_ms > self.stimulusModulationStart_ms and i * self.conf.timeStep_ms < self.stimulusModulationStop_ms):
stimulusFrequency_Hz = self.stimulusMeanFrequency_Hz + self.axonStimModulation(i * self.conf.timeStep_ms)
else:
stimulusFrequency_Hz = self.stimulusMeanFrequency_Hz
if stimulusFrequency_Hz > 0:
stimulusPeriod_ms = 1000.0 / stimulusFrequency_Hz
numberOfSteps = int(np.rint(stimulusPeriod_ms / self.conf.timeStep_ms))
if ((i - startStep) % numberOfSteps == 0):
self.nerveStimulus_mA[i:int(np.rint(i+1.0 / self.conf.timeStep_ms))] = self.stimulusIntensity_mA
def reset(self):
'''
'''
self.tSomaSpike = float("-inf")
self.v_mV = np.zeros((self.compNumber), dtype = np.float64)
for i in xrange(len(self.compartment)):
self.v_mV[i] = self.compartment[i].EqPot_mV
self.compartment[i].reset()
self.Delay.reset()
self.tSpikes = np.zeros((self.compNumber), dtype = np.float64)
self.iIonic = np.full_like(self.v_mV, 0.0)
self.iInjected = np.zeros_like(self.v_mV, dtype = 'd')
self.somaSpikeTrain = []
## Vector with the instants of spikes at the last compartment.
self.lastCompSpikeTrain = []
## Vector with the instants of spikes at the terminal.
self.terminalSpikeTrain = []
| rnwatanabe/projectPR | MotorUnitNoChannel.py | Python | gpl-3.0 | 22,823 | [
"NEURON"
] | fb71ae5bf38001d421cf310cc2098ddc19d0719d6ea819d13c7e832ee87fd22d |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""WebsiteTest testing class."""
import logging
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import environment
SCRIPT_DEBUG = 9 # TODO(vabr) -- make this consistent with run_tests.py.
class WebsiteTest:
"""WebsiteTest testing class.
Represents one website, defines some generic operations on that site.
To customise for a particular website, this class needs to be inherited
and the Login() method overridden.
"""
# Possible values of self.autofill_expectation.
AUTOFILLED = 1 # Expect password and username to be autofilled.
NOT_AUTOFILLED = 2 # Expect password and username not to be autofilled.
# The maximal accumulated time to spend in waiting for website UI
# interaction.
MAX_WAIT_TIME_IN_SECONDS = 200
# Types of test to be passed to self.RunTest().
TEST_TYPE_PROMPT_FAIL = 1
TEST_TYPE_PROMPT_SUCCESS = 2
TEST_TYPE_SAVE_AND_AUTOFILL = 3
def __init__(self, name, username_not_auto=False):
"""Creates a new WebsiteTest.
Args:
name: The website name, identifying it in the test results.
username_not_auto: Expect that the tested website fills username field
on load, and Chrome cannot autofill in that case.
"""
self.name = name
self.username = None
self.password = None
self.username_not_auto = username_not_auto
# Specify, whether it is expected that credentials get autofilled.
self.autofill_expectation = WebsiteTest.NOT_AUTOFILLED
self.remaining_seconds_to_wait = WebsiteTest.MAX_WAIT_TIME_IN_SECONDS
# The testing Environment, if added to any.
self.environment = None
# The webdriver from the environment.
self.driver = None
# Mouse/Keyboard actions.
def Click(self, selector):
"""Clicks on the element described by |selector|.
Args:
selector: The clicked element's CSS selector.
"""
logging.log(SCRIPT_DEBUG, "action: Click %s" % selector)
element = self.WaitUntilDisplayed(selector)
element.click()
def ClickIfClickable(self, selector):
"""Clicks on the element described by |selector| if it is clickable.
The driver's find_element_by_css_selector method defines what is clickable
-- anything for which it does not throw, is clickable. To be clickable,
the element must:
* exist in the DOM,
* be not covered by another element
* be inside the visible area.
Note that transparency does not influence clickability.
Args:
selector: The clicked element's CSS selector.
Returns:
True if the element is clickable (and was clicked on).
False otherwise.
"""
logging.log(SCRIPT_DEBUG, "action: ClickIfVisible %s" % selector)
element = self.WaitUntilDisplayed(selector)
try:
element.click()
return True
except Exception:
return False
def GoTo(self, url):
"""Navigates the main frame to |url|.
Args:
url: The URL of where to go to.
"""
logging.log(SCRIPT_DEBUG, "action: GoTo %s" % self.name)
self.driver.get(url)
def HoverOver(self, selector):
"""Hovers over the element described by |selector|.
Args:
selector: The CSS selector of the element to hover over.
"""
logging.log(SCRIPT_DEBUG, "action: Hover %s" % selector)
element = self.WaitUntilDisplayed(selector)
hover = ActionChains(self.driver).move_to_element(element)
hover.perform()
# Waiting/Displaying actions.
def _ReturnElementIfDisplayed(self, selector):
"""Returns the element described by |selector|, if displayed.
Note: This takes neither overlapping among elements nor position with
regards to the visible area into account.
Args:
selector: The CSS selector of the checked element.
Returns:
The element if displayed, None otherwise.
"""
try:
element = self.driver.find_element_by_css_selector(selector)
return element if element.is_displayed() else None
except Exception:
return None
def IsDisplayed(self, selector):
"""Check if the element described by |selector| is displayed.
Note: This takes neither overlapping among elements nor position with
regards to the visible area into account.
Args:
selector: The CSS selector of the checked element.
Returns:
True if the element is in the DOM and less than 100% transparent.
False otherwise.
"""
logging.log(SCRIPT_DEBUG, "action: IsDisplayed %s" % selector)
return self._ReturnElementIfDisplayed(selector) is not None
def Wait(self, duration):
"""Wait for |duration| in seconds.
To avoid deadlocks, the accummulated waiting time for the whole object does
not exceed MAX_WAIT_TIME_IN_SECONDS.
Args:
duration: The time to wait in seconds.
Raises:
Exception: In case the accummulated waiting limit is exceeded.
"""
logging.log(SCRIPT_DEBUG, "action: Wait %s" % duration)
self.remaining_seconds_to_wait -= duration
if self.remaining_seconds_to_wait < 0:
raise Exception("Waiting limit exceeded for website: %s" % self.name)
time.sleep(duration)
# TODO(vabr): Pull this out into some website-utils and use in Environment
# also?
def WaitUntilDisplayed(self, selector):
"""Waits until the element described by |selector| is displayed.
Args:
selector: The CSS selector of the element to wait for.
Returns:
The displayed element.
"""
element = self._ReturnElementIfDisplayed(selector)
while not element:
self.Wait(1)
element = self._ReturnElementIfDisplayed(selector)
return element
# Form actions.
def FillPasswordInto(self, selector):
"""Ensures that the selected element's value is the saved password.
Depending on self.autofill_expectation, this either checks that the
element already has the password autofilled, or checks that the value
is empty and replaces it with the password.
Args:
selector: The CSS selector for the filled element.
Raises:
Exception: An exception is raised if the element's value is different
from the expectation.
"""
logging.log(SCRIPT_DEBUG, "action: FillPasswordInto %s" % selector)
password_element = self.WaitUntilDisplayed(selector)
# Chrome protects the password inputs and doesn't fill them until
# the user interacts with the page. To be sure that such thing has
# happened we perform |Keys.CONTROL| keypress.
action_chains = ActionChains(self.driver)
action_chains.key_down(Keys.CONTROL).key_up(Keys.CONTROL).perform()
self.Wait(2) # TODO(vabr): Detect when autofill finished.
if self.autofill_expectation == WebsiteTest.AUTOFILLED:
if password_element.get_attribute("value") != self.password:
raise Exception("Error: autofilled password is different from the saved"
" one on website: %s" % self.name)
elif self.autofill_expectation == WebsiteTest.NOT_AUTOFILLED:
if password_element.get_attribute("value"):
raise Exception("Error: password value unexpectedly not empty on"
"website: %s" % self.name)
password_element.send_keys(self.password)
def FillUsernameInto(self, selector):
"""Ensures that the selected element's value is the saved username.
Depending on self.autofill_expectation, this either checks that the
element already has the username autofilled, or checks that the value
is empty and replaces it with the password. If self.username_not_auto
is true, it skips the checks and just overwrites the value with the
username.
Args:
selector: The CSS selector for the filled element.
Raises:
Exception: An exception is raised if the element's value is different
from the expectation.
"""
logging.log(SCRIPT_DEBUG, "action: FillUsernameInto %s" % selector)
username_element = self.WaitUntilDisplayed(selector)
self.Wait(2) # TODO(vabr): Detect when autofill finished.
if not self.username_not_auto:
if self.autofill_expectation == WebsiteTest.AUTOFILLED:
if username_element.get_attribute("value") != self.username:
raise Exception("Error: filled username different from the saved"
" one on website: %s" % self.name)
return
if self.autofill_expectation == WebsiteTest.NOT_AUTOFILLED:
if username_element.get_attribute("value"):
raise Exception("Error: username value unexpectedly not empty on"
"website: %s" % self.name)
username_element.clear()
username_element.send_keys(self.username)
def Submit(self, selector):
"""Finds an element using CSS |selector| and calls its submit() handler.
Args:
selector: The CSS selector for the element to call submit() on.
"""
logging.log(SCRIPT_DEBUG, "action: Submit %s" % selector)
element = self.WaitUntilDisplayed(selector)
element.submit()
# Login/Logout methods
def Login(self):
"""Login Method. Has to be overridden by the WebsiteTest test."""
raise NotImplementedError("Login is not implemented.")
def LoginWhenAutofilled(self):
"""Logs in and checks that the password is autofilled."""
self.autofill_expectation = WebsiteTest.AUTOFILLED
self.Login()
def LoginWhenNotAutofilled(self):
"""Logs in and checks that the password is not autofilled."""
self.autofill_expectation = WebsiteTest.NOT_AUTOFILLED
self.Login()
def Logout(self):
self.environment.DeleteCookies()
# Test scenarios
def PromptFailTest(self):
"""Checks that prompt is not shown on a failed login attempt.
Tries to login with a wrong password and checks that the password
is not offered for saving.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "PromptFailTest for %s" % self.name)
correct_password = self.password
# Hardcoded random wrong password. Chosen by fair `pwgen` call.
# For details, see: http://xkcd.com/221/.
self.password = "ChieF2ae"
self.LoginWhenNotAutofilled()
self.password = correct_password
self.environment.CheckForNewString(
[environment.MESSAGE_ASK, environment.MESSAGE_SAVE],
False,
"Error: did not detect wrong login on website: %s" % self.name)
def PromptSuccessTest(self):
"""Checks that prompt is shown on a successful login attempt.
Tries to login with a correct password and checks that the password
is offered for saving. Chrome cannot have the auto-save option on
when running this test.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "PromptSuccessTest for %s" % self.name)
if not self.environment.show_prompt:
raise Exception("Switch off auto-save during PromptSuccessTest.")
self.LoginWhenNotAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_ASK],
True,
"Error: did not detect login success on website: %s" % self.name)
def SaveAndAutofillTest(self):
"""Checks that a correct password is saved and autofilled.
Tries to login with a correct password and checks that the password
is saved and autofilled on next visit. Chrome must have the auto-save
option on when running this test.
Raises:
Exception: An exception is raised if the test fails.
"""
logging.log(SCRIPT_DEBUG, "SaveAndAutofillTest for %s" % self.name)
if self.environment.show_prompt:
raise Exception("Switch off auto-save during PromptSuccessTest.")
self.LoginWhenNotAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_SAVE],
True,
"Error: did not detect login success on website: %s" % self.name)
self.Logout()
self.LoginWhenAutofilled()
self.environment.CheckForNewString(
[environment.MESSAGE_SAVE],
True,
"Error: failed autofilled login on website: %s" % self.name)
def RunTest(self, test_type):
"""Runs test according to the |test_type|.
Raises:
Exception: If |test_type| is not one of the TEST_TYPE_* constants.
"""
if test_type == WebsiteTest.TEST_TYPE_PROMPT_FAIL:
self.PromptFailTest()
elif test_type == WebsiteTest.TEST_TYPE_PROMPT_SUCCESS:
self.PromptSuccessTest()
elif test_type == WebsiteTest.TEST_TYPE_SAVE_AND_AUTOFILL:
self.SaveAndAutofillTest()
else:
raise Exception("Unknown test type {}.".format(test_type))
| mou4e/zirconium | components/test/data/password_manager/automated_tests/websitetest.py | Python | bsd-3-clause | 12,787 | [
"VisIt"
] | 092946f1bcd39392710f9b528f1a5ff04da1c7aae073ba652b2777017825b5ce |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis
from MDAnalysisTests import module_not_found
from numpy.testing import TestCase, assert_equal, assert_almost_equal, dec
import numpy as np
from MDAnalysisTests.datafiles import Martini_membrane_gro
class TestLeafletFinder(TestCase):
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def setUp(self):
self.universe = MDAnalysis.Universe(Martini_membrane_gro, Martini_membrane_gro)
self.lipid_heads = self.universe.select_atoms("name PO4")
self.lipid_head_string = "name PO4"
def tearDown(self):
del self.universe
del self.lipid_heads
del self.lipid_head_string
def test_leaflet_finder(self):
from MDAnalysis.analysis.leaflet import LeafletFinder
lfls = LeafletFinder(self.universe, self.lipid_heads, pbc=True)
top_heads, bottom_heads = lfls.groups()
# Make top be... on top.
if top_heads.center_of_geometry()[2] < bottom_heads.center_of_geometry()[2]:
top_heads,bottom_heads = (bottom_heads,top_heads)
assert_equal(top_heads.indices, np.arange(1,2150,12), err_msg="Found wrong leaflet lipids")
assert_equal(bottom_heads.indices, np.arange(2521,4670,12), err_msg="Found wrong leaflet lipids")
def test_string_vs_atomgroup_proper(self):
from MDAnalysis.analysis.leaflet import LeafletFinder
lfls_ag = LeafletFinder(self.universe, self.lipid_heads, pbc=True)
lfls_string = LeafletFinder(self.universe, self.lipid_head_string, pbc=True)
groups_ag = lfls_ag.groups()
groups_string = lfls_string.groups()
assert_equal(groups_string[0].indices, groups_ag[0].indices)
assert_equal(groups_string[1].indices, groups_ag[1].indices)
def test_optimize_cutoff(self):
from MDAnalysis.analysis.leaflet import optimize_cutoff
cutoff, N = optimize_cutoff(self.universe, self.lipid_heads, pbc=True)
assert_equal(N, 2)
assert_almost_equal(cutoff, 10.5, decimal=4)
| alejob/mdanalysis | testsuite/MDAnalysisTests/analysis/test_leaflet.py | Python | gpl-2.0 | 3,088 | [
"MDAnalysis"
] | eb54bb094f3282547dc696d3e2e7004e26f6adc2cfa0036b3d27b008fc44ee13 |
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and University of
# of Connecticut School of Medicine.
# All rights reserved.
# Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., University of Heidelberg, and The University
# of Manchester.
# All rights reserved.
import COPASI
import unittest
from types import *
import math
class Test_CEvent(unittest.TestCase):
def setUp(self):
self.datamodel=COPASI.CRootContainer.addDatamodel()
self.model=self.datamodel.getModel()
self.compartment=self.model.createCompartment("Comp1")
self.metab=self.model.createMetabolite("metab1","Comp1")
self.event1=self.model.createEvent("Event_1")
def test_Base(self):
self.assert_(self.event1 != None)
self.assert_(self.event1.__class__ == COPASI.CEvent)
def test_Order(self):
self.event1.setOrder(25);
self.assert_(self.event1.getOrder()==25)
def test_DelayAssignment(self):
self.event1.setDelayAssignment(True)
self.assert_(self.event1.getDelayAssignment() == True)
self.event1.setDelayAssignment(False)
self.assert_(self.event1.getDelayAssignment() == False)
def test_TriggerExpression(self):
expression = "<CN=Root,Model=New Model,Reference=Time> gt 3.0"
self.event1.setTriggerExpression(expression)
expression2 = self.event1.getTriggerExpression()
self.assert_(expression == expression2);
def test_DelayExpression(self):
expression = "7.0"
result = self.event1.setDelayExpression(expression)
self.assert_(result == True)
expression2 = self.event1.getDelayExpression()
self.assert_(expression == expression2);
def test_Assignments(self):
self.assert_(self.event1.getAssignments().size() == 0)
assignment = self.event1.createAssignment()
self.assert_(assignment.__class__ == COPASI.CEventAssignment)
self.assert_(self.event1.getAssignments().size() == 1)
assignment.setTargetKey(self.metab.getKey())
self.assert_(assignment.getTargetKey() == self.metab.getKey())
expression = "5.0+12.3"
assignment.setExpression(expression)
expression2 = assignment.getExpression()
self.assert_(expression2 == expression)
def suite():
tests=[
'test_Base'
,'test_Order'
,'test_DelayAssignment'
,'test_TriggerExpression'
,'test_DelayExpression'
,'test_Assignments'
]
return unittest.TestSuite(map(Test_CEvent,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
| jonasfoe/COPASI | copasi/bindings/python/unittests/Test_CEvent.py | Python | artistic-2.0 | 2,626 | [
"COPASI"
] | d72f18ea32cc3ca2b4760bdf9fbfbf8f4cdef264ad943c41e914ddf1e3274bd6 |
## INFO ########################################################################
## ##
## plastey ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + C + Blender + Arch Linux ##
## Version: 0.2.2.112 (20150514) ##
## File: const.py ##
## ##
## For more information about the project, visit ##
## <http://plastey.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from math import radians
from configparser import ConfigParser
from os.path import join, expanduser
# Import blender modules => at the bottom
# Read configuration
config = ConfigParser()
with open('config.ini', encoding='utf-8') as file:
config.read_file(file)
# Internal details
INT_BLENDER_COUNTER = config['Internal']['blender_counter']
INT_TEXT_INTERVAL = float(config['Internal']['message_interval'])
INT_AUTO_SAVE_INTERVAL = float(config['Internal']['auto_save_interval'])
INT_TEMPORARY_FOLDER = expanduser(config['Internal']['temp_base_dir'])
INT_PERMANENT_FOLDER = expanduser(config['Internal']['permanent_save_dir'])
INT_TEMP_SAVE_FOLDER = expanduser(join(INT_TEMPORARY_FOLDER,
config['Internal']['temp_save_folder']))
INT_TEMP_SAVE_FILE = join(INT_TEMP_SAVE_FOLDER,
config['Internal']['temp_save_file'])
INT_AUTO_SAVE_FOLDER = expanduser(join(INT_TEMPORARY_FOLDER,
config['Internal']['temp_auto_save_dir']))
INT_AUTO_SAVE_FILE = join(INT_AUTO_SAVE_FOLDER,
config['Internal']['temp_auto_save_file'])
#INT_STATE_SHUT_DOWN = join(config['Internal']['temp_base_dir'],
# config['Internal']['temp_states'],
# config['Internal']['state_shut_down'])
#INT_STATE_RESTART = join(config['Internal']['temp_base_dir'],
# config['Internal']['temp_states'],
# config['Internal']['state_restart'])
#INT_STATE_RECOVER_AUTO = join(config['Internal']['temp_base_dir'],
# config['Internal']['temp_states'],
# config['Internal']['state_recover_auto'])
#INT_STATE_DONE = join(config['Internal']['temp_base_dir'],
# config['Internal']['temp_feedbacks'],
# config['Internal']['state_done'])
WINDOW_FULL_SCREEN = bool(eval(config['Render']['full_screen']))
WINDOW_DISPLAY_X = int(config['Render']['display_x'])
WINDOW_DISPLAY_Y = int(config['Render']['display_y'])
WINDOW_RESOLUTION_X = int(config['Render']['resolution_x'])
WINDOW_RESOLUTION_Y = int(config['Render']['resolution_y'])
# Application constants
APP_RUNNING = 0
APP_ESCAPED = -1
# Blender object names
OBJ_PROTOTYPE_FINGER = config['Names']['finger_object']
OBJ_PROTOTYPE_SURFACE = config['Names']['armature_object']
OBJ_PROTOTYPE_VERTEX_ALL = config['Names']['armature_control']
# EXPERIMENTAL
OBJ_ARMATURE_CONTROL = config['Names']['armature_control']
OBJ_ARMATURE = config['Names']['armature_object']
OBJ_GEOMETRY = config['Names']['geometry_object']
# EXPERIMENTAL
OBJ_GLOBAL = config['Names']['logic']
OBJ_DOT = config['Names']['dot_object'] + INT_BLENDER_COUNTER
OBJ_TEXT_FIRST = config['Names']['text_first_object']
OBJ_TEXT_OTHER = config['Names']['text_other_object']
OBJ_HUD_SCENE = config['Names']['hud_scene']
# Blender properties
PROP_TEXT_TIMER = config['Scripts']['var_text_time']
# Communication settings
COMM_IS_PAIRED = bool(eval(config['Communication']['paired']))
COMM_DEVICE_NAME = config['Communication']['device']
COMM_THIS_HOST = config['Communication']['this_host']
COMM_THIS_PORT = int(config['Communication']['this_port'])
COMM_OTHER_HOST = config['Communication']['other_host']
COMM_OTHER_PORT = int(config['Communication']['other_port'])
COMM_IS_MASTER = bool(eval(config['Communication']['master']))
COMM_RUNNING = 0
COMM_RESTART = -1
# Colors
COLOR_GEOMETRY_BASE = 0.000, 0.448, 0.205, 1.000
COLOR_GEOMETRY_DARK = 0.000, 0.073, 0.036, 1.000
COLOR_GEOMETRY_LITE = 0.000, 1.000, 0.448, 1.000
COLOR_FINGER_BASE = 1.000, 1.000, 1.000, 1.000
COLOR_GRAB_PINCH_BASE = COLOR_FINGER_BASE
COLOR_GRAB_PINCH_OKAY = 0.000, 1.000, 0.000, 0.350
COLOR_GRAB_PINCH_FAIL = 1.000, 0.000, 0.000, 1.000
COLOR_ROTATE_PINCH_BASE = COLOR_FINGER_BASE
COLOR_ROTATE_PINCH_OKAY = 0.000, 0.000, 1.000, 1.000
COLOR_GRAB_MOVE_BASE = COLOR_FINGER_BASE
COLOR_GRAB_MOVE_OKAY = 1.000, 1.000, 0.000, 1.000
COLOR_LOCKED = 1.000, 1.000, 0.000, 1.000
COLOR_UNLOCKED = COLOR_FINGER_BASE
COLOR_SELECTED = 0.000, 1.000, 1.000, 1.000
COLOR_DESELECTED = COLOR_FINGER_BASE
# Sizes
SIZE_FINGER_THUMB = 1.00
SIZE_FINGER_INDEX = 0.60
SIZE_FINGER_MIDDLE = 0.70
SIZE_FINGER_RING = 0.60
SIZE_FINGER_PINKY = 0.55
# Hardware fine-tuning
LEAP_MULTIPLIER = 0.1
RIFT_MULTIPLIER = 10
RIFT_POSITION_SHIFT_Y = -20
RIFT_POSITION_SHIFT_Z = 10
try:
from mathutils import Quaternion
RIFT_ORIENTATION_SHIFT = Quaternion((1, 0, 0), radians(80))
except ImportError:
print('const.py => RIFT_ORIENTATION_SHIFT is not available.\n'
'(Hint: Failed to load the mathutils module of blender)')
| kitchenbudapest/vr | const.py | Python | gpl-3.0 | 7,516 | [
"VisIt"
] | 37df413db70c41ae5b032f4df37e56f89b3ae64bd4853b0a0189b0056002ea71 |
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mr Smith in Google CP Solver.
From an IF Prolog example (http://www.ifcomputer.de/)
'''
The Smith family and their three children want to pay a visit but they
do not all have the time to do so. Following are few hints who will go
and who will not:
o If Mr Smith comes, his wife will come too.
o At least one of their two sons Matt and John will come.
o Either Mrs Smith or Tim will come, but not both.
o Either Tim and John will come, or neither will come.
o If Matt comes, then John and his father will
also come.
'''
The answer should be:
Mr_Smith_comes = 0
Mrs_Smith_comes = 0
Matt_comes = 0
John_comes = 1
Tim_comes = 1
Compare with the following models:
* ECLiPSe: http://www.hakank.org/eclipse/mr_smith.ecl
* SICStus Prolog: http://www.hakank.org/sicstus/mr_smith.pl
* Gecode: http://www.hakank.org/gecode/mr_smith.cpp
* MiniZinc: http://www.hakank.org/minizinc/mr_smith.mzn
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Mr Smith problem')
#
# data
#
n = 5
#
# declare variables
#
x = [solver.IntVar(0, 1, 'x[%i]' % i) for i in range(n)]
Mr_Smith, Mrs_Smith, Matt, John, Tim = x
#
# constraints
#
#
# I've kept the MiniZinc constraints for clarity
# and debugging.
#
# If Mr Smith comes then his wife will come too.
# (Mr_Smith -> Mrs_Smith)
solver.Add(Mr_Smith - Mrs_Smith <= 0)
# At least one of their two sons Matt and John will come.
# (Matt \/ John)
solver.Add(Matt + John >= 1)
# Either Mrs Smith or Tim will come but not both.
# bool2int(Mrs_Smith) + bool2int(Tim) = 1 /\
# (Mrs_Smith xor Tim)
solver.Add(Mrs_Smith + Tim == 1)
# Either Tim and John will come or neither will come.
# (Tim = John)
solver.Add(Tim == John)
# If Matt comes /\ then John and his father will also come.
# (Matt -> (John /\ Mr_Smith))
solver.Add(Matt - (John * Mr_Smith) <= 0)
#
# solution and search
#
db = solver.Phase(x,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print('x:', [x[i].Value() for i in range(n)])
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
if __name__ == '__main__':
main()
| linsicai/or-tools | examples/python/mr_smith.py | Python | apache-2.0 | 3,363 | [
"VisIt"
] | eccefe58a16929e96a61e07a269982b79b2fc27c7e344661e20c2fb789a34ca9 |
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes corresponding to phyloXML elements.
See Also
--------
Official specification:
http://phyloxml.org/
Journal article:
Han and Zmasek (2009), doi:10.1186/1471-2105-10-356
"""
__docformat__ = "restructuredtext en"
import re
import warnings
from Bio._py3k import basestring
from Bio import Alphabet
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.SeqRecord import SeqRecord
from Bio import BiopythonWarning
from Bio.Phylo import BaseTree
class PhyloXMLWarning(BiopythonWarning):
"""Warning for non-compliance with the phyloXML specification."""
pass
def _check_str(text, testfunc):
"""Check a string using testfunc, and warn if there's no match."""
if text is not None and not testfunc(text):
warnings.warn("String %s doesn't match the given regexp" % text,
PhyloXMLWarning, stacklevel=2)
# Core elements
class PhyloElement(BaseTree.TreeElement):
"""Base class for all PhyloXML objects."""
class Phyloxml(PhyloElement):
"""Root node of the PhyloXML document.
Contains an arbitrary number of Phylogeny elements, possibly followed by
elements from other namespaces.
:Parameters:
attributes : dict
(XML namespace definitions)
phylogenies : list
The phylogenetic trees
other : list
Arbitrary non-phyloXML elements, if any
"""
def __init__(self, attributes, phylogenies=None, other=None):
self.attributes = {
# standard
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xmlns": "http://www.phyloxml.org",
"xsi:schemaLocation": "http://www.phyloxml.org http://www.phyloxml.org/1.10/phyloxml.xsd",
}
if attributes:
self.attributes.update(attributes)
self.phylogenies = phylogenies or []
self.other = other or []
def __getitem__(self, index):
"""Get a phylogeny by index or name."""
if isinstance(index, int) or isinstance(index, slice):
return self.phylogenies[index]
if not isinstance(index, basestring):
raise KeyError("can't use %s as an index" % type(index))
for tree in self.phylogenies:
if tree.name == index:
return tree
else:
raise KeyError("no phylogeny found with name " + repr(index))
def __iter__(self):
"""Iterate through the phylogenetic trees in this object."""
return iter(self.phylogenies)
def __len__(self):
"""Number of phylogenetic trees in this object."""
return len(self.phylogenies)
def __str__(self):
return '%s([%s])' % (self.__class__.__name__,
',\n'.join(map(str, self.phylogenies)))
class Other(PhyloElement):
"""Container for non-phyloXML elements in the tree.
Usually, an Other object will have either a 'value' or a non-empty list
of 'children', but not both. This is not enforced here, though.
:Parameters:
tag : string
local tag for the XML node
namespace : string
XML namespace for the node -- should not be the default phyloXML
namespace.
attributes : dict of strings
attributes on the XML node
value : string
text contained directly within this XML node
children : list
child nodes, if any (also `Other` instances)
"""
def __init__(self, tag, namespace=None, attributes=None, value=None,
children=None):
self.tag = tag
self.namespace = namespace
self.attributes = attributes or {}
self.value = value
self.children = children or []
def __iter__(self):
"""Iterate through the children of this object (if any)."""
return iter(self.children)
class Phylogeny(PhyloElement, BaseTree.Tree):
"""A phylogenetic tree.
:Parameters:
root : Clade
the root node/clade of this tree
rooted : bool
True if this tree is rooted
rerootable : bool
True if this tree is rerootable
branch_length_unit : string
unit for branch_length values on clades
name : string
identifier for this tree, not required to be unique
id : Id
unique identifier for this tree
description : string
plain-text description
date : Date
date for the root node of this tree
confidences : list
Confidence objects for this tree
clade_relations : list
CladeRelation objects
sequence_relations : list
SequenceRelation objects
properties : list
Property objects
other : list
non-phyloXML elements (type `Other`)
"""
def __init__(self, root=None, rooted=True,
rerootable=None, branch_length_unit=None, type=None,
# Child nodes
name=None, id=None, description=None, date=None,
# Collections
confidences=None, clade_relations=None, sequence_relations=None,
properties=None, other=None,
):
assert isinstance(rooted, bool)
self.root = root
self.rooted = rooted
self.rerootable = rerootable
self.branch_length_unit = branch_length_unit
self.type = type
self.name = name
self.id = id
self.description = description
self.date = date
self.confidences = confidences or []
self.clade_relations = clade_relations or []
self.sequence_relations = sequence_relations or []
self.properties = properties or []
self.other = other or []
@classmethod
def from_tree(cls, tree, **kwargs):
"""Create a new Phylogeny given a Tree (from Newick/Nexus or BaseTree).
Keyword arguments are the usual `Phylogeny` constructor parameters.
"""
phy = cls(
root=Clade.from_clade(tree.root),
rooted=tree.rooted,
name=tree.name,
id=(tree.id is not None) and Id(str(tree.id)) or None)
phy.__dict__.update(kwargs)
return phy
@classmethod
def from_clade(cls, clade, **kwargs):
"""Create a new Phylogeny given a Newick or BaseTree Clade object.
Keyword arguments are the usual `PhyloXML.Clade` constructor parameters.
"""
return Clade.from_clade(clade).to_phylogeny(**kwargs)
def as_phyloxml(self):
"""Return this tree, a PhyloXML-compatible Phylogeny object.
Overrides the `BaseTree` method.
"""
return self
def to_phyloxml_container(self, **kwargs):
"""Create a new Phyloxml object containing just this phylogeny."""
return Phyloxml(kwargs, phylogenies=[self])
def to_alignment(self):
"""Construct an alignment from the aligned sequences in this tree."""
def is_aligned_seq(elem):
if isinstance(elem, Sequence) and elem.mol_seq.is_aligned:
return True
return False
seqs = self._filter_search(is_aligned_seq, 'preorder', True)
try:
first_seq = next(seqs)
except StopIteration:
# No aligned sequences were found --> empty MSA
return MultipleSeqAlignment([])
msa = MultipleSeqAlignment([first_seq.to_seqrecord()],
first_seq.get_alphabet())
msa.extend(seq.to_seqrecord() for seq in seqs)
return msa
# Singular property for plural attribute
def _get_confidence(self):
"""Equivalent to self.confidences[0] if there is only 1 value.
See also: `Clade.confidence`, `Clade.taxonomy`
"""
if len(self.confidences) == 0:
return None
if len(self.confidences) > 1:
raise AttributeError("more than 1 confidence value available; "
"use Phylogeny.confidences")
return self.confidences[0]
def _set_confidence(self, value):
if value is None:
# Special case: mirror the behavior of _get_confidence
self.confidences = []
return
if isinstance(value, float) or isinstance(value, int):
value = Confidence(value)
elif not isinstance(value, Confidence):
raise ValueError("value must be a number or Confidence instance")
if len(self.confidences) == 0:
self.confidences.append(value)
elif len(self.confidences) == 1:
self.confidences[0] = value
else:
raise ValueError("multiple confidence values already exist; "
"use Phylogeny.confidences instead")
def _del_confidence(self):
self.confidences = []
confidence = property(_get_confidence, _set_confidence, _del_confidence)
class Clade(PhyloElement, BaseTree.Clade):
"""Describes a branch of the current phylogenetic tree.
Used recursively, describes the topology of a phylogenetic tree.
Both ``color`` and ``width`` elements should be interpreted by client code
as applying to the whole clade, including all descendents, unless
overwritten in-sub clades. This module doesn't automatically assign these
attributes to sub-clades to achieve this cascade -- and neither should you.
:Parameters:
branch_length
parent branch length of this clade
id_source
link other elements to a clade (on the xml-level)
name : string
short label for this clade
confidences : list of Confidence objects
used to indicate the support for a clade/parent branch.
width : float
branch width for this clade (including branch from parent)
color : BranchColor
color used for graphical display of this clade
node_id
unique identifier for the root node of this clade
taxonomies : list
Taxonomy objects
sequences : list
Sequence objects
events : Events
describe such events as gene-duplications at the root node/parent
branch of this clade
binary_characters : BinaryCharacters
binary characters
distributions : list of Distribution objects
distribution(s) of this clade
date : Date
a date for the root node of this clade
references : list
Reference objects
properties : list
Property objects
clades : list Clade objects
Sub-clades
other : list of Other objects
non-phyloXML objects
"""
def __init__(self,
# Attributes
branch_length=None, id_source=None,
# Child nodes
name=None, width=None, color=None, node_id=None, events=None,
binary_characters=None, date=None,
# Collections
confidences=None, taxonomies=None, sequences=None,
distributions=None, references=None, properties=None, clades=None,
other=None,
):
self.branch_length = branch_length
self.id_source = id_source
self.name = name
self.width = width
self.color = color
self.node_id = node_id
self.events = events
self.binary_characters = binary_characters
self.date = date
self.confidences = confidences or []
self.taxonomies = taxonomies or []
self.sequences = sequences or []
self.distributions = distributions or []
self.references = references or []
self.properties = properties or []
self.clades = clades or []
self.other = other or []
@classmethod
def from_clade(cls, clade, **kwargs):
"""Create a new PhyloXML Clade from a Newick or BaseTree Clade object.
Keyword arguments are the usual PhyloXML Clade constructor parameters.
"""
new_clade = cls(branch_length=clade.branch_length,
name=clade.name)
new_clade.clades = [cls.from_clade(c) for c in clade]
new_clade.confidence = clade.confidence
new_clade.width = clade.width
new_clade.color = (BranchColor(
clade.color.red, clade.color.green, clade.color.blue)
if clade.color else None)
new_clade.__dict__.update(kwargs)
return new_clade
def to_phylogeny(self, **kwargs):
"""Create a new phylogeny containing just this clade."""
phy = Phylogeny(root=self, date=self.date)
phy.__dict__.update(kwargs)
return phy
# Shortcuts for list attributes that are usually only 1 item
# NB: Duplicated from Phylogeny class
def _get_confidence(self):
if len(self.confidences) == 0:
return None
if len(self.confidences) > 1:
raise AttributeError("more than 1 confidence value available; "
"use Clade.confidences")
return self.confidences[0]
def _set_confidence(self, value):
if value is None:
# Special case: mirror the behavior of _get_confidence
self.confidences = []
return
if isinstance(value, float) or isinstance(value, int):
value = Confidence(value)
elif not isinstance(value, Confidence):
raise ValueError("value must be a number or Confidence instance")
if len(self.confidences) == 0:
self.confidences.append(value)
elif len(self.confidences) == 1:
self.confidences[0] = value
else:
raise ValueError("multiple confidence values already exist; "
"use Phylogeny.confidences instead")
def _del_confidence(self):
self.confidences = []
confidence = property(_get_confidence, _set_confidence, _del_confidence)
def _get_taxonomy(self):
if len(self.taxonomies) == 0:
return None
if len(self.taxonomies) > 1:
raise AttributeError("more than 1 taxonomy value available; "
"use Clade.taxonomies")
return self.taxonomies[0]
def _set_taxonomy(self, value):
if not isinstance(value, Taxonomy):
raise ValueError("assigned value must be a Taxonomy instance")
if len(self.taxonomies) == 0:
self.taxonomies.append(value)
elif len(self.taxonomies) == 1:
self.taxonomies[0] = value
else:
raise ValueError("multiple taxonomy values already exist; "
"use Phylogeny.taxonomies instead")
taxonomy = property(_get_taxonomy, _set_taxonomy)
# PhyloXML wrapper for a special BaseTree attribute
class BranchColor(PhyloElement, BaseTree.BranchColor):
def __init__(self, *args, **kwargs):
BaseTree.BranchColor.__init__(self, *args, **kwargs)
# PhyloXML-specific complex types
class Accession(PhyloElement):
"""Captures the local part in a sequence identifier.
Example: In ``UniProtKB:P17304``, the Accession instance attribute ``value``
is 'P17304' and the ``source`` attribute is 'UniProtKB'.
"""
def __init__(self, value, source):
self.value = value
self.source = source
def __str__(self):
"""Show the class name and an identifying attribute."""
return '%s:%s' % (self.source, self.value)
class Annotation(PhyloElement):
"""The annotation of a molecular sequence.
It is recommended to annotate by using the optional 'ref' attribute.
:Parameters:
ref : string
reference string, e.g. 'GO:0008270',
'KEGG:Tetrachloroethene degradation', 'EC:1.1.1.1'
source : string
plain-text source for this annotation
evidence : str
describe evidence as free text (e.g. 'experimental')
desc : string
free text description
confidence : Confidence
state the type and value of support (type Confidence)
properties : list
typed and referenced annotations from external resources
uri : Uri
link
"""
re_ref = re.compile(r'[a-zA-Z0-9_]+:[a-zA-Z0-9_\.\-\s]+')
def __init__(self,
# Attributes
ref=None, source=None, evidence=None, type=None,
# Child nodes
desc=None, confidence=None, uri=None,
# Collection
properties=None):
_check_str(ref, self.re_ref.match)
self.ref = ref
self.source = source
self.evidence = evidence
self.type = type
self.desc = desc
self.confidence = confidence
self.uri = uri
self.properties = properties or []
class BinaryCharacters(PhyloElement):
"""Binary characters at the root of a clade.
The names and/or counts of binary characters present, gained, and lost
at the root of a clade.
"""
def __init__(self,
# Attributes
type=None, gained_count=None, lost_count=None, present_count=None,
absent_count=None,
# Child nodes (flattened into collections)
gained=None, lost=None, present=None, absent=None):
self.type = type
self.gained_count = gained_count
self.lost_count = lost_count
self.present_count = present_count
self.absent_count = absent_count
self.gained = gained or []
self.lost = lost or []
self.present = present or []
self.absent = absent or []
class CladeRelation(PhyloElement):
"""Expresses a typed relationship between two clades.
For example, this could be used to describe multiple parents of a clade.
@type id_ref_0: str
@type id_ref_1: str
@type distance: str
@type type: str
@type confidence: Confidence
"""
def __init__(self, type, id_ref_0, id_ref_1,
distance=None, confidence=None):
self.distance = distance
self.type = type
self.id_ref_0 = id_ref_0
self.id_ref_1 = id_ref_1
self.confidence = confidence
class Confidence(PhyloElement):
"""A general purpose confidence element.
For example, this can be used to express the bootstrap support value of a
clade (in which case the `type` attribute is 'bootstrap').
:Parameters:
value : float
confidence value
type : string
label for the type of confidence, e.g. 'bootstrap'
"""
def __init__(self, value, type='unknown'):
self.value = value
self.type = type
# Comparison operators
def __hash__(self):
"""Return the hash value of the object.
Hash values are integers. They are used to quickly compare dictionary
keys during a dictionary lookup. Numeric values that compare equal have
the same hash value (even if they are of different types, as is the
case for 1 and 1.0).
"""
return id(self)
def __eq__(self, other):
if isinstance(other, Confidence):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, Confidence):
return self.value != other.value
return self.value != other
# Ordering -- see functools.total_ordering in Py2.7
def __lt__(self, other):
if isinstance(other, Confidence):
return self.value < other.value
return self.value < other
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self.value < other)
# Arithmetic operators, including reverse
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __sub__(self, other):
return self.value - other
def __rsub__(self, other):
return other - self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __div__(self, other):
return self.value.__div__(other)
def __rdiv__(self, other):
return other.__div__(self.value)
def __truediv__(self, other):
"""Rational-style division in Py3.0+.
Also active in Py2.5+ with __future__.division import.
"""
return self.value / other
def __rtruediv__(self, other):
return other / self.value
def __floordiv__(self, other):
"""C-style and old-style division in Py3.0+.
Also active in Py2.5+ with __future__.division import.
"""
return self.value.__floordiv__(other)
def __rfloordiv__(self, other):
return other.__floordiv__(self.value)
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __divmod__(self, other):
return divmod(self.value, other)
def __rdivmod__(self, other):
return divmod(other, self.value)
def __pow__(self, other, modulo=None):
if modulo is not None:
return pow(self.value, other, modulo)
return pow(self.value, other)
def __rpow__(self, other):
return pow(other, self.value)
# Unary arithmetic operations: -, +, abs()
def __neg__(self):
return -self.value
def __pos__(self):
return self.value
def __abs__(self):
return abs(self.value)
# Explicit coercion to numeric types: int, long, float
def __float__(self):
return float(self.value)
def __int__(self):
return int(self.value)
def __long__(self):
return long(self.value)
class Date(PhyloElement):
"""A date associated with a clade/node.
Its value can be numerical by using the 'value' element and/or free text
with the 'desc' element' (e.g. 'Silurian'). If a numerical value is used, it
is recommended to employ the 'unit' attribute.
:Parameters:
unit : string
type of numerical value (e.g. 'mya' for 'million years ago')
value : float
the date value
desc : string
plain-text description of the date
minimum : float
lower bound on the date value
maximum : float
upper bound on the date value
"""
def __init__(self, value=None, unit=None, desc=None,
minimum=None, maximum=None):
self.value = value
self.unit = unit
self.desc = desc
self.minimum = minimum
self.maximum = maximum
def __str__(self):
"""Show the class name and the human-readable date."""
if self.unit and self.value is not None:
return '%s %s' % (self.value, self.unit)
if self.desc is not None:
return self.desc
return self.__class__.__name__
class Distribution(PhyloElement):
"""Geographic distribution of the items of a clade (species, sequences).
Intended for phylogeographic applications.
:Parameters:
desc : string
free-text description of the location
points : list of `Point` objects
coordinates (similar to the 'Point' element in Google's KML format)
polygons : list of `Polygon` objects
coordinate sets defining geographic regions
"""
def __init__(self, desc=None, points=None, polygons=None):
self.desc = desc
self.points = points or []
self.polygons = polygons or []
class DomainArchitecture(PhyloElement):
"""Domain architecture of a protein.
:Parameters:
length : int
total length of the protein sequence
domains : list ProteinDomain objects
the domains within this protein
"""
def __init__(self, length=None, domains=None):
self.length = length
self.domains = domains
class Events(PhyloElement):
"""Events at the root node of a clade (e.g. one gene duplication).
All attributes are set to None by default, but this object can also be
treated as a dictionary, in which case None values are treated as missing
keys and deleting a key resets that attribute's value back to None.
"""
ok_type = set(('transfer', 'fusion', 'speciation_or_duplication', 'other',
'mixed', 'unassigned'))
def __init__(self, type=None, duplications=None, speciations=None,
losses=None, confidence=None):
_check_str(type, self.ok_type.__contains__)
self.type = type
self.duplications = duplications
self.speciations = speciations
self.losses = losses
self.confidence = confidence
def items(self):
return [(k, v) for k, v in self.__dict__.items() if v is not None]
def keys(self):
return [k for k, v in self.__dict__.items() if v is not None]
def values(self):
return [v for v in self.__dict__.values() if v is not None]
def __len__(self):
# TODO - Better way to do this?
return len(self.values())
def __getitem__(self, key):
if not hasattr(self, key):
raise KeyError(key)
val = getattr(self, key)
if val is None:
raise KeyError("%s has not been set in this object" % repr(key))
return val
def __setitem__(self, key, val):
setattr(self, key, val)
def __delitem__(self, key):
setattr(self, key, None)
def __iter__(self):
return iter(self.keys())
def __contains__(self, key):
return (hasattr(self, key) and getattr(self, key) is not None)
class Id(PhyloElement):
"""A general-purpose identifier element.
Allows to indicate the provider (or authority) of an identifier, e.g. NCBI,
along with the value itself.
"""
def __init__(self, value, provider=None):
self.value = value
self.provider = provider
def __str__(self):
if self.provider is not None:
return '%s:%s' % (self.provider, self.value)
return self.value
class MolSeq(PhyloElement):
"""Store a molecular sequence.
:Parameters:
value : string
the sequence itself
is_aligned : bool
True if this sequence is aligned with the others (usually meaning
all aligned seqs are the same length and gaps may be present)
"""
re_value = re.compile(r'[a-zA-Z\.\-\?\*_]+')
def __init__(self, value, is_aligned=None):
_check_str(value, self.re_value.match)
self.value = value
self.is_aligned = is_aligned
def __str__(self):
return self.value
class Point(PhyloElement):
"""Geographic coordinates of a point, with an optional altitude.
Used by element 'Distribution'.
:Parameters:
geodetic_datum : string, required
the geodetic datum (also called 'map datum'). For example, Google's
KML uses 'WGS84'.
lat : numeric
latitude
long : numeric
longitude
alt : numeric
altitude
alt_unit : string
unit for the altitude (e.g. 'meter')
"""
def __init__(self, geodetic_datum, lat, long, alt=None, alt_unit=None):
self.geodetic_datum = geodetic_datum
self.lat = lat
self.long = long
self.alt = alt
self.alt_unit = alt_unit
class Polygon(PhyloElement):
"""A polygon defined by a list of 'Points' (used by element 'Distribution').
:param points: list of 3 or more points representing vertices.
"""
def __init__(self, points=None):
self.points = points or []
def __str__(self):
return '%s([%s])' % (self.__class__.__name__,
',\n'.join(map(str, self.points)))
class Property(PhyloElement):
"""A typed and referenced property from an external resources.
Can be attached to `Phylogeny`, `Clade`, and `Annotation` objects.
:Parameters:
value : string
the value of the property
ref : string
reference to an external resource, e.g. "NOAA:depth"
applies_to : string
indicates the item to which a property applies to (e.g. 'node' for
the parent node of a clade, 'parent_branch' for the parent branch of
a clade, or just 'clade').
datatype : string
the type of a property; limited to xsd-datatypes
(e.g. 'xsd:string', 'xsd:boolean', 'xsd:integer', 'xsd:decimal',
'xsd:float', 'xsd:double', 'xsd:date', 'xsd:anyURI').
unit : string (optional)
the unit of the property, e.g. "METRIC:m"
id_ref : Id (optional)
allows to attached a property specifically to one element (on the
xml-level)
"""
re_ref = re.compile(r'[a-zA-Z0-9_]+:[a-zA-Z0-9_\.\-\s]+')
ok_applies_to = set(('phylogeny', 'clade', 'node', 'annotation',
'parent_branch', 'other'))
ok_datatype = set(('xsd:string', 'xsd:boolean', 'xsd:decimal', 'xsd:float',
'xsd:double', 'xsd:duration', 'xsd:dateTime', 'xsd:time', 'xsd:date',
'xsd:gYearMonth', 'xsd:gYear', 'xsd:gMonthDay', 'xsd:gDay',
'xsd:gMonth', 'xsd:hexBinary', 'xsd:base64Binary', 'xsd:anyURI',
'xsd:normalizedString', 'xsd:token', 'xsd:integer',
'xsd:nonPositiveInteger', 'xsd:negativeInteger', 'xsd:long', 'xsd:int',
'xsd:short', 'xsd:byte', 'xsd:nonNegativeInteger', 'xsd:unsignedLong',
'xsd:unsignedInt', 'xsd:unsignedShort', 'xsd:unsignedByte',
'xsd:positiveInteger'))
def __init__(self, value, ref, applies_to, datatype,
unit=None, id_ref=None):
_check_str(ref, self.re_ref.match)
_check_str(applies_to, self.ok_applies_to.__contains__)
_check_str(datatype, self.ok_datatype.__contains__)
_check_str(unit, self.re_ref.match)
self.unit = unit
self.id_ref = id_ref
self.value = value
self.ref = ref
self.applies_to = applies_to
self.datatype = datatype
class ProteinDomain(PhyloElement):
"""Represents an individual domain in a domain architecture.
The locations use 0-based indexing, as most Python objects including
SeqFeature do, rather than the usual biological convention starting at 1.
This means the start and end attributes can be used directly as slice
indexes on Seq objects.
:Parameters:
start : non-negative integer
start of the domain on the sequence, using 0-based indexing
end : non-negative integer
end of the domain on the sequence
confidence : float
can be used to store e.g. E-values
id : string
unique identifier/name
"""
def __init__(self, value, start, end, confidence=None, id=None):
self.value = value
self.start = start
self.end = end
self.confidence = confidence
self.id = id
@classmethod
def from_seqfeature(cls, feat):
return ProteinDomain(feat.id,
feat.location.nofuzzy_start,
feat.location.nofuzzy_end,
confidence=feat.qualifiers.get('confidence'))
def to_seqfeature(self):
feat = SeqFeature(location=FeatureLocation(self.start, self.end),
id=self.value)
if hasattr(self, 'confidence'):
feat.qualifiers['confidence'] = self.confidence
return feat
class Reference(PhyloElement):
"""Literature reference for a clade.
NB: Whenever possible, use the ``doi`` attribute instead of the free-text
``desc`` element.
"""
re_doi = re.compile(r'[a-zA-Z0-9_\.]+/[a-zA-Z0-9_\.]+')
def __init__(self, doi=None, desc=None):
_check_str(doi, self.re_doi.match)
self.doi = doi
self.desc = desc
class Sequence(PhyloElement):
"""A molecular sequence (Protein, DNA, RNA) associated with a node.
One intended use for ``id_ref`` is to link a sequence to a taxonomy (via the
taxonomy's ``id_source``) in case of multiple sequences and taxonomies per
node.
:Parameters:
type : {'dna', 'rna', 'protein'}
type of molecule this sequence represents
id_ref : string
reference to another resource
id_source : string
source for the reference
symbol : string
short symbol of the sequence, e.g. 'ACTM' (max. 10 chars)
accession : Accession
accession code for this sequence.
name : string
full name of the sequence, e.g. 'muscle Actin'
location
location of a sequence on a genome/chromosome.
mol_seq : MolSeq
the molecular sequence itself
uri : Uri
link
annotations : list of Annotation objects
annotations on this sequence
domain_architecture : DomainArchitecture
protein domains on this sequence
other : list of Other objects
non-phyloXML elements
"""
alphabets = {'dna': Alphabet.generic_dna,
'rna': Alphabet.generic_rna,
'protein': Alphabet.generic_protein}
re_symbol = re.compile(r'\S{1,10}')
def __init__(self,
# Attributes
type=None, id_ref=None, id_source=None,
# Child nodes
symbol=None, accession=None, name=None, location=None,
mol_seq=None, uri=None, domain_architecture=None,
# Collections
annotations=None, other=None,
):
_check_str(type, self.alphabets.__contains__)
_check_str(symbol, self.re_symbol.match)
self.type = type
self.id_ref = id_ref
self.id_source = id_source
self.symbol = symbol
self.accession = accession
self.name = name
self.location = location
self.mol_seq = mol_seq
self.uri = uri
self.domain_architecture = domain_architecture
self.annotations = annotations or []
self.other = other or []
@classmethod
def from_seqrecord(cls, record, is_aligned=None):
"""Create a new PhyloXML Sequence from a SeqRecord object."""
if is_aligned is None:
is_aligned = isinstance(record.seq.alphabet, Alphabet.Gapped)
params = {
'accession': Accession(record.id, ''),
'symbol': record.name,
'name': record.description,
'mol_seq': MolSeq(str(record.seq), is_aligned),
}
if isinstance(record.seq.alphabet, Alphabet.DNAAlphabet):
params['type'] = 'dna'
elif isinstance(record.seq.alphabet, Alphabet.RNAAlphabet):
params['type'] = 'rna'
elif isinstance(record.seq.alphabet, Alphabet.ProteinAlphabet):
params['type'] = 'protein'
# Unpack record.annotations
for key in ('id_ref', 'id_source', 'location'):
if key in record.annotations:
params[key] = record.annotations[key]
if isinstance(record.annotations.get('uri'), dict):
params['uri'] = Uri(**record.annotations['uri'])
# Build a Sequence.annotation object
if record.annotations.get('annotations'):
params['annotations'] = []
for annot in record.annotations['annotations']:
ann_args = {}
for key in ('ref', 'source', 'evidence', 'type', 'desc'):
if key in annot:
ann_args[key] = annot[key]
if isinstance(annot.get('confidence'), list):
ann_args['confidence'] = Confidence(
*annot['confidence'])
if isinstance(annot.get('properties'), list):
ann_args['properties'] = [Property(**prop)
for prop in annot['properties']
if isinstance(prop, dict)]
params['annotations'].append(Annotation(**ann_args))
# Unpack record.features
if record.features:
params['domain_architecture'] = DomainArchitecture(
length=len(record.seq),
domains=[ProteinDomain.from_seqfeature(feat)
for feat in record.features])
return Sequence(**params)
def to_seqrecord(self):
"""Create a SeqRecord object from this Sequence instance.
The seqrecord.annotations dictionary is packed like so::
{ # Sequence attributes with no SeqRecord equivalent:
'id_ref': self.id_ref,
'id_source': self.id_source,
'location': self.location,
'uri': { 'value': self.uri.value,
'desc': self.uri.desc,
'type': self.uri.type },
# Sequence.annotations attribute (list of Annotations)
'annotations': [{'ref': ann.ref,
'source': ann.source,
'evidence': ann.evidence,
'type': ann.type,
'confidence': [ann.confidence.value,
ann.confidence.type],
'properties': [{'value': prop.value,
'ref': prop.ref,
'applies_to': prop.applies_to,
'datatype': prop.datatype,
'unit': prop.unit,
'id_ref': prop.id_ref}
for prop in ann.properties],
} for ann in self.annotations],
}
"""
def clean_dict(dct):
"""Remove None-valued items from a dictionary."""
return dict((key, val) for key, val in dct.items()
if val is not None)
seqrec = SeqRecord(Seq(self.mol_seq.value, self.get_alphabet()),
**clean_dict({
'id': str(self.accession),
'name': self.symbol,
'description': self.name,
# 'dbxrefs': None,
}))
if self.domain_architecture:
seqrec.features = [dom.to_seqfeature()
for dom in self.domain_architecture.domains]
# Sequence attributes with no SeqRecord equivalent
seqrec.annotations = clean_dict({
'id_ref': self.id_ref,
'id_source': self.id_source,
'location': self.location,
'uri': self.uri and clean_dict({
'value': self.uri.value,
'desc': self.uri.desc,
'type': self.uri.type,
}),
'annotations': self.annotations and [
clean_dict({
'ref': ann.ref,
'source': ann.source,
'evidence': ann.evidence,
'type': ann.type,
'confidence': ann.confidence and [
ann.confidence.value,
ann.confidence.type],
'properties': [clean_dict({
'value': prop.value,
'ref': prop.ref,
'applies_to': prop.applies_to,
'datatype': prop.datatype,
'unit': prop.unit,
'id_ref': prop.id_ref})
for prop in ann.properties],
}) for ann in self.annotations],
})
return seqrec
def get_alphabet(self):
alph = self.alphabets.get(self.type, Alphabet.generic_alphabet)
if self.mol_seq and self.mol_seq.is_aligned:
return Alphabet.Gapped(alph)
return alph
class SequenceRelation(PhyloElement):
"""Express a typed relationship between two sequences.
For example, this could be used to describe an orthology (in which case
attribute 'type' is 'orthology').
:Parameters:
id_ref_0 : Id
first sequence reference identifier
id_ref_1 : Id
second sequence reference identifier
distance : float
distance between the two sequences
type : restricted string
describe the type of relationship
confidence : Confidence
confidence value for this relation
"""
ok_type = set(('orthology', 'one_to_one_orthology', 'super_orthology',
'paralogy', 'ultra_paralogy', 'xenology', 'unknown', 'other'))
def __init__(self, type, id_ref_0, id_ref_1,
distance=None, confidence=None):
_check_str(type, self.ok_type.__contains__)
self.distance = distance
self.type = type
self.id_ref_0 = id_ref_0
self.id_ref_1 = id_ref_1
self.confidence = confidence
class Taxonomy(PhyloElement):
"""Describe taxonomic information for a clade.
:Parameters:
id_source : Id
link other elements to a taxonomy (on the XML level)
id : Id
unique identifier of a taxon, e.g. Id('6500',
provider='ncbi_taxonomy') for the California sea hare
code : restricted string
store UniProt/Swiss-Prot style organism codes, e.g. 'APLCA' for the
California sea hare 'Aplysia californica'
scientific_name : string
the standard scientific name for this organism, e.g. 'Aplysia
californica' for the California sea hare
authority : string
keep the authority, such as 'J. G. Cooper, 1863', associated with
the 'scientific_name'
common_names : list of strings
common names for this organism
synonyms : list of strings
synonyms for this taxon?
rank : restricted string
taxonomic rank
uri : Uri
link
other : list of Other objects
non-phyloXML elements
"""
re_code = re.compile(r'[a-zA-Z0-9_]{2,10}')
ok_rank = set(('domain', 'kingdom', 'subkingdom', 'branch', 'infrakingdom',
'superphylum', 'phylum', 'subphylum', 'infraphylum', 'microphylum',
'superdivision', 'division', 'subdivision', 'infradivision',
'superclass', 'class', 'subclass', 'infraclass', 'superlegion',
'legion', 'sublegion', 'infralegion', 'supercohort', 'cohort',
'subcohort', 'infracohort', 'superorder', 'order', 'suborder',
'superfamily', 'family', 'subfamily', 'supertribe', 'tribe', 'subtribe',
'infratribe', 'genus', 'subgenus', 'superspecies', 'species',
'subspecies', 'variety', 'subvariety', 'form', 'subform', 'cultivar',
'unknown', 'other'))
def __init__(self,
# Attributes
id_source=None,
# Child nodes
id=None, code=None, scientific_name=None, authority=None,
rank=None, uri=None,
# Collections
common_names=None, synonyms=None, other=None,
):
_check_str(code, self.re_code.match)
_check_str(rank, self.ok_rank.__contains__)
self.id_source = id_source
self.id = id
self.code = code
self.scientific_name = scientific_name
self.authority = authority
self.rank = rank
self.uri = uri
self.common_names = common_names or []
self.synonyms = synonyms or []
self.other = other or []
def __str__(self):
"""Show the class name and an identifying attribute."""
if self.code is not None:
return self.code
if self.scientific_name is not None:
return self.scientific_name
if self.rank is not None:
return self.rank
if self.id is not None:
return str(self.id)
return self.__class__.__name__
class Uri(PhyloElement):
"""A uniform resource identifier.
In general, this is expected to be an URL (for example, to link to an image
on a website, in which case the ``type`` attribute might be 'image' and
``desc`` might be 'image of a California sea hare').
"""
def __init__(self, value, desc=None, type=None):
self.value = value
self.desc = desc
self.type = type
def __str__(self):
if self.value:
return self.value
return repr(self)
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Phylo/PhyloXML.py | Python | gpl-2.0 | 45,819 | [
"Biopython"
] | ef723b26c783f9e37f0b43f49f20224bf4eba261ec0fba1aced372c4a48f4ab1 |
"""
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3))
ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| michaelpacer/scikit-image | doc/examples/plot_canny.py | Python | bsd-3-clause | 1,633 | [
"Gaussian"
] | e04ae8833be2f50d2b8acb28424a569f8128942323f47273acad5257a0640a4f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.