text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
if ( ('PFVFLOW' in features) and ('TWOPHASEFLOW' in features) ):
from yade import pack
from yade import export
from yade import timing
from yade import plot
import time
from math import *
num_spheres=1000# number of spheres
young=1e6
compFricDegree = 3 # initial contact friction during the confining phase
finalFricDegree = 30 # contact friction during the deviatoric loading
mn,mx=Vector3(0,0,0),Vector3(1,1,0.4) # corners of the initial packing
graindensity=2600
toleranceWarning =1.e-11
toleranceCritical=1.e-6
O.materials.append(FrictMat(young=young,poisson=0.5,frictionAngle=radians(compFricDegree),density=graindensity,label='spheres'))
O.materials.append(FrictMat(young=young,poisson=0.5,frictionAngle=0,density=0,label='walls'))
walls=aabbWalls([mn,mx],thickness=0,material='walls')
wallIds=O.bodies.append(walls)
sp=pack.SpherePack()
sp.makeCloud(mn,mx,-1,0.3333,num_spheres,False, 0.95,seed=1) #"seed" make the "random" generation always the same
sp.toSimulation(material='spheres')
triax=TriaxialStressController(
maxMultiplier=1.+2e4/young, # spheres growing factor (fast growth)
finalMaxMultiplier=1.+2e3/young, # spheres growing factor (slow growth)
thickness = 0,
stressMask = 7,
max_vel = 0.005,
internalCompaction=True, # If true the confining pressure is generated by growing particles
)
newton=NewtonIntegrator(damping=0.2)
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()],label="iloop"
),
TwoPhaseFlowEngine(dead=1,label="flow"),#introduced as a dead engine for the moment, see 2nd section
GlobalStiffnessTimeStepper(active=1,timeStepUpdateInterval=100,timestepSafetyCoefficient=0.8),
triax,
newton
]
triax.goal1=triax.goal2=triax.goal3=-10000
while 1:
O.run(1000, True)
unb=unbalancedForce()
if unb<0.001 and abs(-10000-triax.meanStress)/10000<0.001:
break
setContactFriction(radians(finalFricDegree))
radius=0
for b in O.bodies:
if b.state.mass==0:
b.state.blockedDOFs='xyzXYZ'
b.state.vel=(0,0,0)
b.state.angVel=(0,0,0)
if b.state.mass>0:
radius+=b.shape.radius
#b.state.blockedDOFs='xyz'
#b.state.vel=(0,0,0)
radius=radius/num_spheres
triax.dead=True
while 1:
O.run(1000, True)
unb=unbalancedForce()
if unb<0.001:
break
press=1000.
O.run(10,1)
flow.dead=0
flow.meshUpdateInterval=-1
flow.useSolver=3
flow.permeabilityFactor=1
flow.viscosity=0.1
flow.bndCondIsWaterReservoir=[0,0,1,0,0,0]
flow.bndCondIsPressure=[0,0,1,0,0,0]
flow.bndCondValue=[0,0,press,0,0,0]
flow.boundaryUseMaxMin=[0,0,0,0,0,0]
flow.iniVoidVolumes=True
newton.damping=0.1
GlobalStiffnessTimeStepper.dead=True
O.dt=min(0.8*PWaveTimeStep(),0.8*1./1200.*pi/flow.viscosity*graindensity*radius**2)
O.dynDt=False
flow.surfaceTension = 0.0
flow.drainageFirst=False
flow.isDrainageActivated=False
flow.isImbibitionActivated=True
flow.isCellLabelActivated=True
flow.initialization()
cs=flow.getClusters()
c0=cs[1]
voidvol=0.0
voidvoltot=0.0
nvoids=flow.nCells()
initialvol=[0] * (nvoids)
bar=[0] * (nvoids)
initiallevel=O.bodies[flow.wallIds[flow.ymin]].state.pos[1]+(O.bodies[flow.wallIds[flow.ymax]].state.pos[1]-O.bodies[flow.wallIds[flow.ymin]].state.pos[1])/3
for ii in range(nvoids):
initialvol[ii]=1./flow.getCellInvVoidVolume(ii)
voidvoltot+=initialvol[ii]
bar[ii]=flow.getCellBarycenter(ii)[1]
iniok=0
while (iniok==0):
celleini1=[nvoids+1] * (nvoids)
celleini0=[0] * (nvoids)
for ii in range(len(c0.getInterfaces())):
if bar[c0.getInterfaces()[ii][1]]<initiallevel:
if celleini1[c0.getInterfaces()[ii][1]]==nvoids+1:
celleini1[c0.getInterfaces()[ii][1]]=ii
celleini0[c0.getInterfaces()[ii][1]]=c0.getInterfaces()[ii][0]
for ii in range(nvoids):
if celleini1[ii]!=nvoids+1:
flow.clusterOutvadePore(celleini0[ii],ii)
no=0
for ii in range(nvoids):
if bar[ii]<initiallevel:
if flow.getCellLabel(ii)==0:
no=1
if no==0:
iniok=1
for ii in range(len(c0.getInterfaces())):
c0.setCapVol(ii,0.0)
c0.solvePressure()
flow.computeCapillaryForce()
for b in O.bodies:
O.forces.setPermF(b.id, flow.fluidForce(b.id))
O.run(1,1)
flow.savePhaseVtk("./vtk",True)
timeini=O.time
ini=O.iter
Qin=0.0
#Qout=0.0
totalflux=[0] * (nvoids)
#totalCellSat=0.0
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
voidvol+=initialvol[ii]
bubble=0
dd=0.0
celleok=[0] * (nvoids)
deltabubble=0
col0=[0] * (nvoids)
neighK=[0.0] * (nvoids) #FIXME: after remeshing the size will be invalid since nvoids can change, initializations will have to go in the function itself
def pressureImbibition():
global Qin,total2,dd,deltabubble,bubble
c0.updateCapVolList(O.dt)
Qin+=-1.*(flow.getBoundaryFlux(flow.wallIds[flow.ymin]))*O.dt
#Qout+=(flow.getBoundaryFlux(flow.wallIds[flow.ymax]))*O.dt
col1=[0] * (nvoids)
delta=[0.0] * (nvoids)
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
totalflux[ii]+=-1.*flow.getCellFluxFromId(ii)*O.dt
if (totalflux[ii])>=initialvol[ii]:
col1[ii]=1
if (totalflux[ii])>initialvol[ii]:
delta[ii]=totalflux[ii]-initialvol[ii]
totalflux[ii]+=-1*delta[ii]
#dd+=delta[ii]
# advices:
# never write 'getInterfaces()' inside a loop, it's expensive, get the list once outside loop
# get interfaces again only if you know the list could have change (cluster got/lost pores).
# I'm fixing only the first loop below (old version left commented)
#
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][1]
if col1[ll]==1:
if celleok[ll]==0:
celleok[ll]=1
col0[ll]=c0.getInterfaces()[ii][0]
for jj in range(nvoids):
if col1[jj]==1:
flow.clusterOutvadePore(col0[jj],jj)
#totalCellSat+=initialvol[jj]
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
neighK[ll]+=c0.getConductivity(ii)
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
c0.setCapVol(ii,delta[ll]/neighK[ll]*c0.getConductivity(ii))
totalflux[c0.getInterfaces()[ii][1]]+=delta[ll]/neighK[ll]*c0.getConductivity(ii)
for ii in range(nvoids):
if delta[ii]!=0:
if neighK[ii]==0:
deltabubble+=delta[ii]
bubble+=1
col1=[0] * (nvoids)
delta=[0.0] * (nvoids)
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
if (totalflux[ii])>=initialvol[ii]:
col1[ii]=1
if (totalflux[ii])>initialvol[ii]:
delta[ii]=totalflux[ii]-initialvol[ii]
totalflux[ii]+=-1*delta[ii]
#dd+=delta[ii]
if col1!=[0] * (nvoids):
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][1]
if col1[ll]==1:
if celleok[ll]==0:
celleok[ll]=1
col0[ll]=c0.getInterfaces()[ii][0]
for jj in range(nvoids):
if col1[jj]==1:
flow.clusterOutvadePore(col0[jj],jj)
#totalCellSat+=initialvol[jj]
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
neighK[ll]+=c0.getConductivity(ii)
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][0]
if delta[ll]!=0:
c0.setCapVol(ii,delta[ll]/neighK[ll]*c0.getConductivity(ii))
totalflux[c0.getInterfaces()[ii][1]]+=delta[ll]/neighK[ll]*c0.getConductivity(ii)
for ii in range(nvoids):
if delta[ii]!=0:
if neighK[ii]==0:
deltabubble+=delta[ii]
bubble+=1
col1=[0] * (nvoids)
delta=[0.0] * (nvoids)
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
if (totalflux[ii])>=initialvol[ii]:
col1[ii]=1
if (totalflux[ii])>initialvol[ii]:
delta[ii]=totalflux[ii]-initialvol[ii]
totalflux[ii]+=-1*delta[ii]
dd+=delta[ii]
print(O.iter,'waterloss',ii,delta[ii])
if col1!=[0] * (nvoids):
for ii in range(len(c0.getInterfaces())):
ll=c0.getInterfaces()[ii][1]
if col1[ll]==1:
if celleok[ll]==0:
celleok[ll]=1
col0[ll]=c0.getInterfaces()[ii][0]
for jj in range(nvoids):
if col1[jj]==1:
flow.clusterOutvadePore(col0[jj],jj)
#totalCellSat+=initialvol[jj]
total2=0.0
for ii in range(nvoids):
total2+=totalflux[ii]
c0.solvePressure()
flow.computeCapillaryForce()
for b in O.bodies:
O.forces.setPermF(b.id, flow.fluidForce(b.id))
file=open('Test.txt',"w")
checkdifference=0
def equilibriumtest():
global F33,F22,checkdifference
#unbalanced=utils.unbalancedForce()
F33=abs(O.forces.f(flow.wallIds[flow.ymax])[1])
F22=abs(O.forces.f(flow.wallIds[flow.ymin])[1])
#F11 =abs(O.forces.f(flow.wallIds[flow.xmax])[0]),
#F00=abs(O.forces.f(flow.wallIds[flow.xmin])[0]),
#F44=abs(O.forces.f(flow.wallIds[flow.zmin])[2]),
#F55=abs(O.forces.f(flow.wallIds[flow.zmax])[2]),
deltaF=abs(F33-F22)
file.write(str(O.iter)+" "+str(F33)+" "+str(F22)+" "+str(deltaF)+"\n")
if O.time>=timeini+1.5:
if checkdifference==0:
print('check F done')
if deltaF>0.01*press:
raise YadeCheckError('Error: too high difference between forces acting at the bottom and upper walls')
#O.pause()
checkdifference=1
once=0
def fluxtest():
global once,QinOk
no=0
QinOk=Qin-deltabubble
error=QinOk-total2
if error>toleranceWarning:
print("Warning: difference between total water volume flowing through bottom wall and water loss due to air bubble generations",QinOk," vs. total water volume flowing inside dry or partially saturated cells",total2)
if error>toleranceCritical:
raise YadeCheckError("The difference is more, than the critical tolerance!")
file.write(str(O.time-timeini)+" "+str(total2)+" "+str(QinOk)+" "+str(error)+"\n")
for ii in range(nvoids):
if flow.getCellLabel(ii)==0:
no=1
if once==0:
if no==0:
imbtime=O.time-timeini
print(imbtime,voidvol,total2,QinOk)
if voidvol-total2>toleranceWarning:
print("Warning: initial volume of dry voids",voidvol," vs. total water volume flowing inside dry or partially saturated cells",total2)
if voidvol-total2>toleranceCritical:
raise YadeCheckError("The difference is more, than the critical tolerance!")
file.write(str(imbtime)+" "+str(voidvol)+" "+str(total2)+" "+str(QinOk)+"\n")
once=1
timing.stats()
def addPlotData():
plot.addData(i1=O.iter,
t=O.time,
Fupper=F33,
Fbottom=F22,
Q=QinOk,
T=total2
)
plot.live=True
plot.plots={' t ':('Fupper','Fbottom'),'t':('Q','T')}
plot.plot()
def pl():
flow.savePhaseVtk("./vtk",True)
O.engines=O.engines+[PyRunner(iterPeriod=100,command='pl()')]
#O.engines=O.engines+[VTKRecorder(iterPeriod=100,recorders=['spheres'],fileName='./exp')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='equilibriumtest()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='pressureImbibition()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='fluxtest()')]
O.engines=O.engines+[PyRunner(iterPeriod=1,command='addPlotData()')]
O.timingEnabled=True
#file.close()
#plot.saveDataTxt('plots.txt',vars=('i1','t','Fupper','Fbottom','Q','T'))
#O.run(1,1)
import tempfile, shutil
dirpath = tempfile.mkdtemp()
for fileName in ['./vtk', './Test.txt' ]:
if (os.path.exists(fileName)): shutil.move(fileName,dirpath)
print("File %s moved into %s/ directory"%(fileName,dirpath))
else:
print("This check_TwoPhaseFlowEngine_PressureInjection.py cannot be executed because TWOPHASEFLOW or PFVFLOW are disabled")
|
cosurgi/trunk
|
scripts/checks-and-tests/checks/check_TwoPhaseFlowEngine_PressureInjection.py
|
Python
|
gpl-2.0
| 13,370
|
[
"VTK"
] |
f2f32c6cd4d996a79c08508abefd6be36acdcbed8f9a40e84e6c241cba4ad55d
|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts up an appserver and runs end-to-end tests against it.
Instead of running this script directly, use the 'server_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables.
You can specify a particular test class or method on the command line:
tools/server_tests ConfigTests
tools/server_tests PersonNoteTests.test_delete_and_restore
"""
import datetime
import difflib
import inspect
import logging
import optparse
import os
import re
import signal
import smtpd
import subprocess
import sys
import threading
import time
import traceback
import unittest
import calendar
import config
from model import *
import remote_api
import reveal
import scrape
import setup
from test_pfif import text_diff
import utils
from utils import PERSON_STATUS_TEXT, NOTE_STATUS_TEXT
NOTE_STATUS_OPTIONS = [
'',
'information_sought',
'is_note_author',
'believed_alive',
'believed_missing',
'believed_dead'
]
last_star = time.time() # timestamp of the last message that started with '*'.
def log(message, *args):
"""Prints a timestamped message to stderr (handy for debugging or profiling
tests). If the message starts with '*', the clock will be reset to zero."""
global last_star
now = time.time()
if isinstance(message, unicode):
message = message.encode('utf-8')
else:
message = str(message)
print >>sys.stderr, '%6.2f:' % (now - last_star), message, args or ''
if message[:1] == '*':
last_star = now
def timed(function):
def timed_function(*args, **kwargs):
start = time.time()
try:
function(*args, **kwargs)
finally:
print '%s: %.2f s' % (function.__name__, time.time() - start)
return timed_function
class ProcessRunner(threading.Thread):
"""A thread that starts a subprocess, collects its output, and stops it."""
READY_RE = re.compile('') # this output means the process is ready
OMIT_RE = re.compile('INFO ') # omit these lines from the displayed output
ERROR_RE = re.compile('ERROR|CRITICAL') # this output indicates failure
def __init__(self, name, args):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.process = None # subprocess.Popen instance
self.ready = False # process is running and ready
self.failed = False # process emitted an error message in its output
self.output = []
def run(self):
"""Starts the subprocess and collects its output while it runs."""
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
# Each subprocess needs a thread to be watching it and absorbing its
# output; otherwise it will block when its stdout pipe buffer fills.
while self.process.poll() is None:
line = self.process.stdout.readline()
if not line: # process finished
return
if self.READY_RE.search(line):
self.ready = True
if self.OMIT_RE.search(line): # filter out these lines
continue
if self.ERROR_RE.search(line): # something went wrong
self.failed = True
if line.strip():
self.output.append(line.strip())
def stop(self):
"""Terminates the subprocess and returns its status code."""
if self.process: # started
if self.isAlive(): # still running
os.kill(self.process.pid, signal.SIGKILL)
else:
self.failed = self.process.returncode != 0
self.clean_up()
if self.failed:
self.flush_output()
print >>sys.stderr, '%s failed (status %s).\n' % (
self.name, self.process.returncode)
else:
print >>sys.stderr, '%s stopped.' % self.name
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, lines_to_print = [], self.output
if lines_to_print:
print >>sys.stderr
for line in lines_to_print:
print >>sys.stderr, self.name + ': ' + line
def wait_until_ready(self, timeout=10):
"""Waits until the subprocess has logged that it is ready."""
fail_time = time.time() + timeout
while self.isAlive() and not self.ready and time.time() < fail_time:
for jiffy in range(10): # wait one second, aborting early if ready
if not self.ready:
time.sleep(0.1)
if not self.ready:
self.flush_output() # after each second, show output
if self.ready:
print >>sys.stderr, '%s started.' % self.name
else:
raise RuntimeError('%s failed to start.' % self.name)
def clean_up(self):
pass
class AppServerRunner(ProcessRunner):
"""Manages a dev_appserver subprocess."""
READY_RE = re.compile('Running application ' + remote_api.get_app_id())
def __init__(self, port, smtp_port):
self.datastore_path = '/tmp/dev_appserver.datastore.%d' % os.getpid()
ProcessRunner.__init__(self, 'appserver', [
os.environ['PYTHON'],
os.path.join(os.environ['APPENGINE_DIR'], 'dev_appserver.py'),
os.environ['APP_DIR'],
'--port=%s' % port,
'--clear_datastore',
'--datastore_path=%s' % self.datastore_path,
'--require_indexes',
'--smtp_host=localhost',
'--smtp_port=%d' % smtp_port
])
def clean_up(self):
if os.path.exists(self.datastore_path):
os.unlink(self.datastore_path)
class MailThread(threading.Thread):
"""Runs an SMTP server and stores the incoming messages."""
messages = []
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
self.stop_requested = False
def run(self):
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
MailThread.messages.append(
{'from': mailfrom, 'to': rcpttos, 'data': data})
server = MailServer(('localhost', self.port), None)
print >>sys.stderr, 'SMTP server started.'
while not self.stop_requested:
smtpd.asyncore.loop(timeout=0.5, count=1)
print >>sys.stderr, 'SMTP server stopped.'
def stop(self):
self.stop_requested = True
def wait_until_ready(self, timeout=10):
pass
def get_test_data(filename):
return open(os.path.join(remote_api.TESTS_DIR, filename)).read()
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True)
])
def assert_params_conform(url, required_params=None, forbidden_params=None):
"""Enforces the presence and non-presence of URL parameters.
If required_params or forbidden_params is set, this function asserts that
the given URL contains or does not contain those parameters, respectively.
"""
required_params = required_params or {}
forbidden_params = forbidden_params or {}
# TODO(kpy): Decode the URL, don't match against it directly like this.
for key, value in required_params.iteritems():
param_regex = r'\b%s=%s\b' % (re.escape(key), re.escape(value))
assert re.search(param_regex, url), \
'URL %s must contain %s=%s' % (url, key, value)
for key, value in forbidden_params.iteritems():
param_regex = r'\b%s=%s\b' % (re.escape(key), re.escape(value))
assert not re.search(param_regex, url), \
'URL %s must not contain %s=%s' % (url, key, value)
class TestsBase(unittest.TestCase):
"""Base class for test cases."""
verbose = 0
hostport = None
kinds_written_by_tests = []
default_test_time = datetime.datetime(2010, 1, 2, 3, 4, 5)
debug = False
def get_debug(self):
return self.debug
def set_debug(self, dbg):
self.debug = dbg
def debug_print(self, msg):
"""Echo useful stuff to stderr, encoding to preserve sanity."""
if self.get_debug():
print >>sys.stderr, msg.encode('ascii', 'ignore')
def setUp(self):
"""Sets up a scrape Session for each test."""
# See http://zesty.ca/scrape for documentation on scrape.
self.s = scrape.Session(verbose=self.verbose)
self.logged_in_as_admin = False
MailThread.messages = []
def path_to_url(self, path):
return 'http://%s%s' % (self.hostport, path)
def go(self, path, **kwargs):
"""Navigates the scrape Session to the given path on the test server."""
return self.s.go(self.path_to_url(path), **kwargs)
def tearDown(self):
"""Resets the datastore by deleting anything written during a test."""
# make sure we reset current time as well.
self.set_utcnow_for_test(date_time=None)
self.set_debug(TestsBase.debug)
if self.kinds_written_by_tests:
setup.wipe_datastore(*self.kinds_written_by_tests)
def set_utcnow_for_test(self, date_time=None):
"""Set utc timestamp locally and on the server.
Args:
date_time: a datetime object, or None to reset to wall time.
"""
utils.set_utcnow_for_test(date_time)
new_utcnow = '' # If date_time is None, the parameter should be empty.
if date_time:
new_utcnow = calendar.timegm(date_time.utctimetuple())
self.get_url_as_admin(
'/admin/set_utcnow_for_test?test_mode=yes&utcnow=%s' % new_utcnow)
self.debug_print('set utcnow to %s: %s' %
(date_time, self.s.doc.content))
def get_url_as_admin(self, path):
'''Authenticate as admin and continue to the provided path.
# TODO(lschumacher): update other logins to use this.
Args:
path - path to continue, including leading /.
Returns:
true if status == 200.'''
if not self.logged_in_as_admin:
self.go('/_ah/login?continue=%s' % self.path_to_url(path))
self.debug_print(
'get_url_as_admin %s: %s' % (path, self.s.doc.content))
login_form = self.s.doc.first('form')
self.s.submit(login_form, admin='True', action='Login')
self.logged_in_as_admin = self.s.status == 200
# already logged in, so fetch path directly. We do this unconditionaly
# since sometimes continue doesn't seem to work quite right.
self.go(path)
self.debug_print(
u'got_url_as_admin %s: %s' % (path, self.s.doc.content))
return self.s.status == 200
class ReadOnlyTests(TestsBase):
"""Tests that don't modify data go here."""
def test_main(self):
"""Check the main page with no language specified."""
doc = self.go('/?subdomain=haiti')
assert 'I\'m looking for someone' in doc.text
def test_main_english(self):
"""Check the main page with English language specified."""
doc = self.go('/?subdomain=haiti&lang=en')
assert 'I\'m looking for someone' in doc.text
def test_main_french(self):
"""Check the French main page."""
doc = self.go('/?subdomain=haiti&lang=fr')
assert 'Je recherche quelqu\'un' in doc.text
def test_main_creole(self):
"""Check the Creole main page."""
doc = self.go('/?subdomain=haiti&lang=ht')
assert u'Mwen ap ch\u00e8che yon moun' in doc.text
def test_language_links(self):
"""Check that the language links go to the translated main page."""
doc = self.go('/?subdomain=haiti')
doc = self.s.follow(u'espa\u00f1ol')
assert 'Busco a alguien' in doc.text
doc = self.s.follow(u'Fran\u00e7ais')
assert 'Je recherche quelqu\'un' in doc.text
doc = self.go('/?subdomain=pakistan')
doc = self.s.follow(u'\u0627\u0631\u062f\u0648')
assert (u'\u0645\u06CC\u06BA \u06A9\u0633\u06CC \u06A9\u0648 ' +
u'\u062A\u0644\u0627\u0634 \u06A9\u0631 ' +
u'\u0631\u06C1\u0627 \u06C1\u0648') in doc.text
doc = self.s.follow(u'English')
assert 'I\'m looking for someone' in doc.text
def test_language_xss(self):
"""Regression test for an XSS vulnerability in the 'lang' parameter."""
doc = self.go('/?subdomain=haiti&lang="<script>alert(1)</script>')
assert '<script>' not in doc.content
def test_query(self):
"""Check the query page."""
doc = self.go('/query?subdomain=haiti')
button = doc.firsttag('input', type='submit')
assert button['value'] == 'Search for this person'
doc = self.go('/query?subdomain=haiti&role=provide')
button = doc.firsttag('input', type='submit')
assert button['value'] == 'Provide information about this person'
def test_results(self):
"""Check the results page."""
doc = self.go('/results?subdomain=haiti&query=xy')
assert 'We have nothing' in doc.text
def test_create(self):
"""Check the create page."""
doc = self.go('/create?subdomain=haiti')
assert 'Identify who you are looking for' in doc.text
doc = self.go('/create?subdomain=haiti&role=provide')
assert 'Identify who you have information about' in doc.text
def test_view(self):
"""Check the view page."""
doc = self.go('/view?subdomain=haiti')
assert 'No person id was specified' in doc.text
def test_multiview(self):
"""Check the multiview page."""
doc = self.go('/multiview?subdomain=haiti')
assert 'Compare these records' in doc.text
def test_photo(self):
"""Check the photo page."""
doc = self.go('/photo?subdomain=haiti')
assert 'No photo id was specified' in doc.text
def test_static(self):
"""Check that the static files are accessible."""
doc = self.go('/static/no-photo.gif?subdomain=haiti')
assert doc.content.startswith('GIF89a')
doc = self.go('/static/style.css?subdomain=haiti')
assert 'body {' in doc.content
def test_embed(self):
"""Check the embed page."""
doc = self.go('/embed?subdomain=haiti')
assert 'Embedding' in doc.text
def test_gadget(self):
"""Check the gadget page."""
doc = self.go('/gadget?subdomain=haiti')
assert '<Module>' in doc.content
assert 'application/xml' in self.s.headers['content-type']
def test_sitemap(self):
"""Check the sitemap generator."""
doc = self.go('/sitemap?subdomain=haiti')
assert '</sitemapindex>' in doc.content
doc = self.go('/sitemap?subdomain=haiti&shard_index=1')
assert '</urlset>' in doc.content
def test_config_subdomain_titles(self):
doc = self.go('/?subdomain=haiti')
assert 'Haiti Earthquake' in doc.first('h1').text
doc = self.go('/?subdomain=pakistan')
assert 'Pakistan Floods' in doc.first('h1').text
def test_config_language_menu_options(self):
doc = self.go('/?subdomain=haiti')
assert doc.first('a', u'Fran\xe7ais')
assert doc.first('a', u'Krey\xf2l')
assert not doc.all('a',u'\u0627\u0631\u062F\u0648') # Urdu
doc = self.go('/?subdomain=pakistan')
assert doc.first('a',u'\u0627\u0631\u062F\u0648') # Urdu
assert not doc.all('a', u'Fran\xe7ais')
def test_config_keywords(self):
doc = self.go('/?subdomain=haiti')
meta = doc.firsttag('meta', name='keywords')
assert 'tremblement' in meta['content']
doc = self.go('/?subdomain=pakistan')
meta = doc.firsttag('meta', name='keywords')
assert 'pakistan flood' in meta['content']
class PersonNoteTests(TestsBase):
"""Tests that modify Person and Note entities in the datastore go here.
The contents of the datastore will be reset for each test."""
kinds_written_by_tests = [Person, Note, UserActionLog]
def assert_error_deadend(self, page, *fragments):
"""Assert that the given page is a dead-end.
Checks to make sure there's an error message that contains the given
fragments. On failure, fail assertion. On success, step back.
"""
error_message = page.first(class_=re.compile(r'.*\berror\b.*'))
for fragment in fragments:
assert fragment in error_message.text, (
'%s missing from error message' % fragment)
self.s.back()
# The verify_ functions below implement common fragments of the testing
# workflow that are assembled below in the test_ methods.
def verify_results_page(self, num_results, all_have=(), some_have=(), status=()):
"""Verifies conditions on the results page common to seeking and
providing. Verifies that all of the results contain all of the
strings in all_have and that at least one of the results has each
of some_have.
Precondition: the current session must be on the results page
Postcondition: the current session is still on the results page
"""
# Check that the results are as expected
result_titles = self.s.doc.all(class_='resultDataTitle')
assert len(result_titles) == num_results
for title in result_titles:
for text in all_have:
assert text in title.content, \
'%s must have %s' % (title.content, text)
for text in some_have:
assert any(text in title.content for title in result_titles), \
'One of %s must have %s' % (result_titles, text)
if status:
result_statuses = self.s.doc.all(class_='resultDataPersonFound')
assert len(result_statuses) == len(status)
for expected_status, result_status in zip(status, result_statuses):
assert expected_status in result_status.content, \
'"%s" missing expected status: "%s"' % (
result_status, expected_status)
def verify_unsatisfactory_results(self):
"""Verifies the clicking the button at the bottom of the results page.
Precondition: the current session must be on the results page
Postcondition: the current session is on the create new record page
"""
# Click the button to create a new record
found = False
for results_form in self.s.doc.all('form'):
if 'Create a new record' in results_form.content:
self.s.submit(results_form)
found = True
assert found, "didn't find Create a new record in any form"
def verify_create_form(self, prefilled_params=None, unfilled_params=None):
"""Verifies the behavior of the create form.
Verifies that the form must contain prefilled_params (a dictionary)
and may not have any defaults for unfilled_params.
Precondition: the current session is on the create new record page
Postcondition: the current session is still on the create page
"""
create_form = self.s.doc.first('form')
for key, value in (prefilled_params or {}).iteritems():
assert create_form.params[key] == value
for key in unfilled_params or ():
assert not create_form.params[key]
# Try to submit without filling in required fields
self.assert_error_deadend(
self.s.submit(create_form), 'required', 'try again')
def verify_note_form(self):
"""Verifies the behavior of the add note form.
Precondition: the current session is on a page with a note form.
Postcondition: the current session is still on a page with a note form.
"""
note_form = self.s.doc.first('form')
assert 'Tell us the status of this person' in note_form.content
self.assert_error_deadend(
self.s.submit(note_form), 'required', 'try again')
def verify_details_page(self, num_notes, details=None):
"""Verifies the content of the details page.
Verifies that the details contain the given number of notes and the
given details.
Precondition: the current session is on the details page
Postcondition: the current session is still on the details page
"""
# Do not assert params. Upon reaching the details page, you've lost
# the difference between seekers and providers and the param is gone.
details = details or {}
details_page = self.s.doc
# Person info is stored in matching 'label' and 'field' cells.
fields = dict(zip(
[label.text.strip() for label in details_page.all(class_='label')],
details_page.all(class_='field')))
for label, value in details.iteritems():
assert fields[label].text.strip() == value
assert len(details_page.all(class_='view note')) == num_notes
def verify_click_search_result(self, n, url_test=lambda u: None):
"""Simulates clicking the nth search result (where n is zero-based).
Also passes the URL followed to the given url_test function for checking.
This function should raise an AssertionError on failure.
Precondition: the current session must be on the results page
Postcondition: the current session is on the person details page
"""
# Get the list of links.
results = self.s.doc.first('ul', class_='searchResults')
result_link = results.all('a', class_='result-link')[n]
# Verify and then follow the link.
url_test(result_link['href'])
self.s.go(result_link['href'])
def verify_update_notes(self, found, note_body, author, status, **kwargs):
"""Verifies the process of adding a new note.
Posts a new note with the given parameters.
Precondition: the current session must be on the details page
Postcondition: the current session is still on the details page
"""
# Do not assert params. Upon reaching the details page, you've lost
# the difference between seekers and providers and the param is gone.
details_page = self.s.doc
num_initial_notes = len(details_page.all(class_='view note'))
note_form = details_page.first('form')
params = dict(kwargs)
params['found'] = (found and 'yes') or 'no'
params['text'] = note_body
params['author_name'] = author
extra_values = [note_body, author]
if status:
params['status'] = status
extra_values.append(str(NOTE_STATUS_TEXT.get(status)))
details_page = self.s.submit(note_form, **params)
notes = details_page.all(class_='view note')
assert len(notes) == num_initial_notes + 1
new_note_text = notes[-1].text
extra_values.extend(kwargs.values())
for text in extra_values:
assert text in new_note_text, \
'Note text %r missing %r' % (new_note_text, text)
# Show this text if and only if the person has been found
assert ('This person has been in contact with someone'
in new_note_text) == found
def verify_email_sent(self, message_count=1):
"""Verifies email was sent, firing manually from the taskqueue
if necessary. """
# Explicitly fire the send-mail task if necessary
doc = self.go('/_ah/admin/tasks?queue=send-mail')
try:
button = doc.firsttag('button',
**{'class': 'ae-taskqueues-run-now'})
doc = self.s.submit(d.first('form', name='queue_run_now'),
run_now=button.id)
except scrape.ScrapeError, e:
# button not found, assume task completed
pass
assert len(MailThread.messages) == message_count
def test_seeking_someone_regular(self):
"""Follow the seeking someone flow on the regular-sized embed."""
# Set utcnow to match source date
self.set_utcnow_for_test(datetime.datetime(2001, 1, 1, 0, 0, 0))
test_source_date = utils.get_utcnow().strftime('%Y-%m-%d')
# Shorthand to assert the correctness of our URL
def assert_params(url=None):
assert_params_conform(
url or self.s.url, {'role': 'seek'}, {'small': 'yes'})
# Start on the home page and click the "I'm looking for someone" button
self.go('/?subdomain=haiti')
search_page = self.s.follow('I\'m looking for someone')
search_form = search_page.first('form')
assert 'Search for this person' in search_form.content
# Try a search, which should yield no results.
self.s.submit(search_form, query='_test_first_name')
assert_params()
self.verify_results_page(0)
assert_params()
self.verify_unsatisfactory_results()
assert_params()
# Submit the create form with minimal information.
create_form = self.s.doc.first('form')
self.s.submit(create_form,
first_name='_test_first_name',
last_name='_test_last_name',
author_name='_test_author_name')
# For now, the date of birth should be hidden.
assert 'birth' not in self.s.content.lower()
self.verify_details_page(0, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Author\'s name:': '_test_author_name'})
# Now the search should yield a result.
self.s.submit(search_form, query='_test_first_name')
assert_params()
self.verify_results_page(1, all_have=(['_test_first_name']),
some_have=(['_test_first_name']),
status=(['Unspecified']))
self.verify_click_search_result(0, assert_params)
# set the person entry_date to something in order to make sure adding
# note doesn't update
person = Person.all().filter('first_name =', '_test_first_name').get()
person.entry_date = datetime.datetime(2006, 6, 6, 6, 6, 6)
db.put(person)
self.verify_details_page(0)
self.verify_note_form()
self.verify_update_notes(
False, '_test A note body', '_test A note author', None)
self.verify_update_notes(
True, '_test Another note body', '_test Another note author',
'believed_alive',
last_known_location='Port-au-Prince')
person = Person.all().filter('first_name =', '_test_first_name').get()
assert person.entry_date == datetime.datetime(2006, 6, 6, 6, 6, 6)
self.s.submit(search_form, query='_test_first_name')
assert_params()
self.verify_results_page(1, all_have=(['_test_first_name']),
some_have=(['_test_first_name']),
status=(['Someone has received information that this person is alive']))
# Submit the create form with complete information
self.s.submit(create_form,
author_name='_test_author_name',
author_email='_test_author_email',
author_phone='_test_author_phone',
clone='yes',
source_name='_test_source_name',
source_date=test_source_date,
source_url='_test_source_url',
first_name='_test_first_name',
last_name='_test_last_name',
sex='female',
date_of_birth='1955',
age='52',
home_street='_test_home_street',
home_neighborhood='_test_home_neighborhood',
home_city='_test_home_city',
home_state='_test_home_state',
home_postal_code='_test_home_postal_code',
home_country='_test_home_country',
photo_url='_test_photo_url',
expiry_option='10',
description='_test_description')
self.verify_details_page(0, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Sex:': 'female',
# 'Date of birth:': '1955', # currently hidden
'Age:': '52',
'Street name:': '_test_home_street',
'Neighborhood:': '_test_home_neighborhood',
'City:': '_test_home_city',
'Province or state:': '_test_home_state',
'Postal or zip code:': '_test_home_postal_code',
'Home country:': '_test_home_country',
'Author\'s name:': '_test_author_name',
'Author\'s phone number:': '(click to reveal)',
'Author\'s e-mail address:': '(click to reveal)',
'Original URL:': 'Link',
'Original posting date:': '2001-01-01 00:00 UTC',
'Original site name:': '_test_source_name',
'Expiry date of this record:': '2001-01-11 00:00 UTC'})
def test_new_indexing(self):
"""First create new entry with new_search param then search for it"""
# Shorthand to assert the correctness of our URL
def assert_params(url=None):
assert_params_conform(
url or self.s.url, {'role': 'seek'}, {'small': 'yes'})
# Start on the home page and click the "I'm looking for someone" button
self.go('/?subdomain=haiti')
search_page = self.s.follow('I\'m looking for someone')
search_form = search_page.first('form')
assert 'Search for this person' in search_form.content
# Try a search, which should yield no results.
self.s.submit(search_form, query='ABCD EFGH IJKL MNOP')
assert_params()
self.verify_results_page(0)
assert_params()
self.verify_unsatisfactory_results()
assert_params()
# Submit the create form with a valid first and last name
self.s.submit(self.s.doc.first('form'),
first_name='ABCD EFGH',
last_name='IJKL MNOP',
author_name='author_name')
# Try a middle-name match.
self.s.submit(search_form, query='EFGH')
self.verify_results_page(1, all_have=(['ABCD EFGH']))
# Try a middle-name non-match.
self.s.submit(search_form, query='ABCDEF')
self.verify_results_page(0)
# Try a middle-name prefix match.
self.s.submit(search_form, query='MNO')
self.verify_results_page(1, all_have=(['ABCD EFGH']))
# Try a multiword match.
self.s.submit(search_form, query='MNOP IJK ABCD EFG')
self.verify_results_page(1, all_have=(['ABCD EFGH']))
def test_have_information_regular(self):
"""Follow the "I have information" flow on the regular-sized embed."""
# Set utcnow to match source date
self.set_utcnow_for_test(datetime.datetime(2001, 1, 1, 0, 0, 0))
test_source_date = utils.get_utcnow().strftime('%Y-%m-%d')
# Shorthand to assert the correctness of our URL
def assert_params(url=None):
assert_params_conform(
url or self.s.url, {'role': 'provide'}, {'small': 'yes'})
self.go('/?subdomain=haiti')
search_page = self.s.follow('I have information about someone')
search_form = search_page.first('form')
assert 'I have information about someone' in search_form.content
self.assert_error_deadend(
self.s.submit(search_form),
'Enter the person\'s given and family names.')
self.assert_error_deadend(
self.s.submit(search_form, first_name='_test_first_name'),
'Enter the person\'s given and family names.')
self.s.submit(search_form,
first_name='_test_first_name',
last_name='_test_last_name')
assert_params()
# Because the datastore is empty, should go straight to the create page
self.verify_create_form(prefilled_params={
'first_name': '_test_first_name',
'last_name': '_test_last_name'})
self.verify_note_form()
# Submit the create form with minimal information
create_form = self.s.doc.first('form')
self.s.submit(create_form,
first_name='_test_first_name',
last_name='_test_last_name',
author_name='_test_author_name',
text='_test A note body')
self.verify_details_page(1, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Author\'s name:': '_test_author_name'})
# Try the search again, and should get some results
self.s.submit(search_form,
first_name='_test_first_name',
last_name='_test_last_name')
assert_params()
self.verify_results_page(
1, all_have=('_test_first_name', '_test_last_name'))
self.verify_click_search_result(0, assert_params)
# For now, the date of birth should be hidden.
assert 'birth' not in self.s.content.lower()
self.verify_details_page(1)
self.verify_note_form()
self.verify_update_notes(
False, '_test A note body', '_test A note author', None)
self.verify_update_notes(
True, '_test Another note body', '_test Another note author',
None, last_known_location='Port-au-Prince')
# Submit the create form with complete information
self.s.submit(create_form,
author_name='_test_author_name',
author_email='_test_author_email',
author_phone='_test_author_phone',
clone='yes',
source_name='_test_source_name',
source_date=test_source_date,
source_url='_test_source_url',
first_name='_test_first_name',
last_name='_test_last_name',
sex='male',
date_of_birth='1970-01',
age='30-40',
home_street='_test_home_street',
home_neighborhood='_test_home_neighborhood',
home_city='_test_home_city',
home_state='_test_home_state',
home_postal_code='_test_home_postal_code',
home_country='_test_home_country',
photo_url='_test_photo_url',
expiry_option='20',
description='_test_description',
add_note='yes',
found='yes',
status='believed_alive',
email_of_found_person='_test_email_of_found_person',
phone_of_found_person='_test_phone_of_found_person',
last_known_location='_test_last_known_location',
text='_test A note body')
self.verify_details_page(1, details={
'Given name:': '_test_first_name',
'Family name:': '_test_last_name',
'Sex:': 'male',
# 'Date of birth:': '1970-01', # currently hidden
'Age:': '30-40',
'Street name:': '_test_home_street',
'Neighborhood:': '_test_home_neighborhood',
'City:': '_test_home_city',
'Province or state:': '_test_home_state',
'Postal or zip code:': '_test_home_postal_code',
'Home country:': '_test_home_country',
'Author\'s name:': '_test_author_name',
'Author\'s phone number:': '(click to reveal)',
'Author\'s e-mail address:': '(click to reveal)',
'Original URL:': 'Link',
'Original posting date:': '2001-01-01 00:00 UTC',
'Original site name:': '_test_source_name',
'Expiry date of this record:': '2001-01-21 00:00 UTC'})
def test_multiview(self):
"""Test the page for marking duplicate records."""
db.put(Person(
key_name='haiti:test.google.com/person.111',
subdomain='haiti',
author_name='_author_name_1',
author_email='_author_email_1',
author_phone='_author_phone_1',
entry_date=utils.get_utcnow(),
first_name='_first_name_1',
last_name='_last_name_1',
sex='male',
date_of_birth='1970-01-01',
age='31-41',
))
db.put(Person(
key_name='haiti:test.google.com/person.222',
subdomain='haiti',
author_name='_author_name_2',
author_email='_author_email_2',
author_phone='_author_phone_2',
entry_date=utils.get_utcnow(),
first_name='_first_name_2',
last_name='_last_name_2',
sex='male',
date_of_birth='1970-02-02',
age='32-42',
))
db.put(Person(
key_name='haiti:test.google.com/person.333',
subdomain='haiti',
author_name='_author_name_3',
author_email='_author_email_3',
author_phone='_author_phone_3',
entry_date=utils.get_utcnow(),
first_name='_first_name_3',
last_name='_last_name_3',
sex='male',
date_of_birth='1970-03-03',
age='33-43',
))
# All three records should appear on the multiview page.
doc = self.go('/multiview?subdomain=haiti' +
'&id1=test.google.com/person.111' +
'&id2=test.google.com/person.222' +
'&id3=test.google.com/person.333')
assert '_first_name_1' in doc.content
assert '_first_name_2' in doc.content
assert '_first_name_3' in doc.content
assert '31-41' in doc.content
assert '32-42' in doc.content
assert '33-43' in doc.content
# Mark all three as duplicates.
button = doc.firsttag('input', value='Yes, these are the same person')
doc = self.s.submit(button, text='duplicate test', author_name='foo')
# We should arrive back at the first record, with two duplicate notes.
assert self.s.status == 200
assert 'id=test.google.com%2Fperson.111' in self.s.url
assert 'Possible duplicates' in doc.content
assert '_first_name_2 _last_name_2' in doc.content
assert '_first_name_3 _last_name_3' in doc.content
p = Person.get('haiti', 'test.google.com/person.111')
assert len(p.get_linked_persons()) == 2
# Ask for detailed information on the duplicate markings.
doc = self.s.follow('Show who marked these duplicates')
assert '_first_name_1' in doc.content
notes = doc.all('div', class_='view note')
assert len(notes) == 2, str(doc.content.encode('ascii', 'ignore'))
assert 'Posted by foo' in notes[0].text
assert 'duplicate test' in notes[0].text
assert ('This record is a duplicate of test.google.com/person.222' in
notes[0].text)
assert 'Posted by foo' in notes[1].text
assert 'duplicate test' in notes[1].text
assert ('This record is a duplicate of test.google.com/person.333' in
notes[1].text)
def test_reveal(self):
"""Test the hiding and revealing of contact information in the UI."""
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_reveal_author_name',
author_email='_reveal_author_email',
author_phone='_reveal_author_phone',
entry_date=utils.get_utcnow(),
first_name='_reveal_first_name',
last_name='_reveal_last_name',
sex='male',
date_of_birth='1970-01-01',
age='30-40',
))
db.put(Person(
key_name='haiti:test.google.com/person.456',
subdomain='haiti',
author_name='_reveal_author_name',
author_email='_reveal_author_email',
author_phone='_reveal_author_phone',
entry_date=datetime.datetime.now(),
first_name='_reveal_first_name',
last_name='_reveal_last_name',
sex='male',
date_of_birth='1970-01-01',
age='30-40',
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_name='_reveal_note_author_name',
author_email='_reveal_note_author_email',
author_phone='_reveal_note_author_phone',
entry_date=utils.get_utcnow(),
email_of_found_person='_reveal_email_of_found_person',
phone_of_found_person='_reveal_phone_of_found_person',
person_record_id='test.google.com/person.123',
))
# All contact information should be hidden by default.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# Clicking the '(click to reveal)' link should bring the user
# to a captcha turing test page.
reveal_region = doc.first('a', u'(click to reveal)')
url = reveal_region.get('href', '')
doc = self.go(url[url.find('/reveal'):])
assert 'iframe' in doc.content
assert 'recaptcha_response_field' in doc.content
# Try to continue with an invalid captcha response. Get redirected
# back to the same page.
button = doc.firsttag('input', value='Proceed')
doc = self.s.submit(button)
assert 'iframe' in doc.content
assert 'recaptcha_response_field' in doc.content
# Continue as if captcha is valid. All information should be viewable.
url = '/reveal?subdomain=haiti&id=test.google.com/person.123&' + \
'test_mode=yes'
doc = self.s.submit(button, url=url)
assert '_reveal_author_email' in doc.content
assert '_reveal_author_phone' in doc.content
assert '_reveal_note_author_email' in doc.content
assert '_reveal_note_author_phone' in doc.content
assert '_reveal_email_of_found_person' in doc.content
assert '_reveal_phone_of_found_person' in doc.content
# Start over. Information should no longer be viewable.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# Other person's records should also be invisible.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.456')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# All contact information should be hidden on the multiview page, too.
doc = self.go('/multiview?subdomain=haiti' +
'&id1=test.google.com/person.123' +
'&id2=test.google.com/person.456')
assert '_reveal_author_email' not in doc.content
assert '_reveal_author_phone' not in doc.content
assert '_reveal_note_author_email' not in doc.content
assert '_reveal_note_author_phone' not in doc.content
assert '_reveal_email_of_found_person' not in doc.content
assert '_reveal_phone_of_found_person' not in doc.content
# Now supply a valid revelation signature.
signature = reveal.sign(u'multiview:test.google.com/person.123', 10)
doc = self.go('/multiview?subdomain=haiti' +
'&id1=test.google.com/person.123' +
'&signature=' + signature)
assert '_reveal_author_email' in doc.content
assert '_reveal_author_phone' in doc.content
# Notes are not shown on the multiview page.
def test_note_status(self):
"""Test the posting and viewing of the note status field in the UI."""
status_class = re.compile(r'\bstatus\b')
# Check that the right status options appear on the create page.
doc = self.go('/create?subdomain=haiti&role=provide')
note = doc.first(**{'class': 'note input'})
options = note.first('select', name='status').all('option')
assert len(options) == len(NOTE_STATUS_OPTIONS)
for option, text in zip(options, NOTE_STATUS_OPTIONS):
assert text in option.attrs['value']
# Create a record with no status and get the new record's ID.
form = doc.first('form')
doc = self.s.submit(form,
first_name='_test_first',
last_name='_test_last',
author_name='_test_author',
text='_test_text')
view_url = self.s.url
# Check that the right status options appear on the view page.
doc = self.s.go(view_url)
note = doc.first(**{'class': 'note input'})
options = note.first('select', name='status').all('option')
assert len(options) == len(NOTE_STATUS_OPTIONS)
for option, text in zip(options, NOTE_STATUS_OPTIONS):
assert text in option.attrs['value']
# Set the status in a note and check that it appears on the view page.
form = doc.first('form')
self.s.submit(form, author_name='_test_author2', text='_test_text',
status='believed_alive')
doc = self.s.go(view_url)
note = doc.last(**{'class': 'view note'})
assert 'believed_alive' in note.content
assert 'believed_dead' not in note.content
# Set status to is_note_author, but don't check found.
self.s.submit(form,
author_name='_test_author',
text='_test_text',
status='is_note_author')
self.assert_error_deadend(
self.s.submit(form,
author_name='_test_author',
text='_test_text',
status='is_note_author'),
'in contact', 'Status of this person')
def test_api_write_pfif_1_2(self):
"""Post a single entry as PFIF 1.2 using the upload API."""
data = get_test_data('test.pfif-1.2.xml')
self.set_utcnow_for_test(self.default_test_time)
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
person = Person.get('haiti', 'test.google.com/person.21009')
assert person.first_name == u'_test_first_name'
assert person.last_name == u'_test_last_name'
assert person.sex == u'female'
assert person.date_of_birth == u'1970-01'
assert person.age == u'35-45'
assert person.author_name == u'_test_author_name'
assert person.author_email == u'_test_author_email'
assert person.author_phone == u'_test_author_phone'
assert person.home_street == u'_test_home_street'
assert person.home_neighborhood == u'_test_home_neighborhood'
assert person.home_city == u'_test_home_city'
assert person.home_state == u'_test_home_state'
assert person.home_postal_code == u'_test_home_postal_code'
assert person.home_country == u'US'
assert person.record_id == u'test.google.com/person.21009'
assert person.photo_url == u'_test_photo_url'
assert person.source_name == u'_test_source_name'
assert person.source_url == u'_test_source_url'
assert person.source_date == datetime.datetime(2000, 1, 1, 0, 0, 0)
# Current date should replace the provided entry_date.
self.assertEqual(utils.get_utcnow(), person.entry_date)
# The latest_status property should come from the third Note.
assert person.latest_status == u'is_note_author'
assert person.latest_status_source_date == \
datetime.datetime(2000, 1, 18, 20, 21, 22)
# The latest_found property should come from the fourth Note.
assert person.latest_found == False
assert person.latest_found_source_date == \
datetime.datetime(2000, 1, 18, 20, 0, 0)
notes = person.get_notes()
assert len(notes) == 4
notes.sort(key=lambda note: note.record_id)
note = notes[0]
assert note.author_name == u'_test_author_name'
assert note.author_email == u'_test_author_email'
assert note.author_phone == u'_test_author_phone'
assert note.email_of_found_person == u'_test_email_of_found_person'
assert note.phone_of_found_person == u'_test_phone_of_found_person'
assert note.last_known_location == u'_test_last_known_location'
assert note.record_id == u'test.google.com/note.27009'
assert note.person_record_id == u'test.google.com/person.21009'
assert note.text == u'_test_text'
assert note.source_date == datetime.datetime(2000, 1, 16, 4, 5, 6)
# Current date should replace the provided entry_date.
assert note.entry_date == utils.get_utcnow()
assert note.found == False
assert note.status == u'believed_missing'
assert note.linked_person_record_id == u'test.google.com/person.999'
note = notes[1]
assert note.author_name == u'inna-testing'
assert note.author_email == u'inna-testing@gmail.com'
assert note.author_phone == u'inna-testing-number'
assert note.email_of_found_person == u''
assert note.phone_of_found_person == u''
assert note.last_known_location == u'19.16592425362802 -71.9384765625'
assert note.record_id == u'test.google.com/note.31095'
assert note.person_record_id == u'test.google.com/person.21009'
assert note.text == u'new comment - testing'
assert note.source_date == datetime.datetime(2000, 1, 17, 14, 15, 16)
# Current date should replace the provided entry_date.
assert note.entry_date.year == utils.get_utcnow().year
assert note.found == True
assert note.status == ''
assert not note.linked_person_record_id
# Just confirm that a missing <found> tag is parsed as None.
# We already checked all the other fields above.
note = notes[2]
assert note.found == None
assert note.status == u'is_note_author'
note = notes[3]
assert note.found == False
assert note.status == u'believed_missing'
def test_api_write_pfif_1_2_note(self):
"""Post a single note-only entry as PFIF 1.2 using the upload API."""
self.set_utcnow_for_test(self.default_test_time)
# Create person records that the notes will attach to.
Person(key_name='haiti:test.google.com/person.21009',
subdomain='haiti',
first_name='_test_first_name_1',
last_name='_test_last_name_1',
entry_date=datetime.datetime(2001, 1, 1, 1, 1, 1)).put()
Person(key_name='haiti:test.google.com/person.21010',
subdomain='haiti',
first_name='_test_first_name_2',
last_name='_test_last_name_2',
entry_date=datetime.datetime(2002, 2, 2, 2, 2, 2)).put()
data = get_test_data('test.pfif-1.2-note.xml')
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
person = Person.get('haiti', 'test.google.com/person.21009')
assert person
notes = person.get_notes()
assert len(notes) == 1
note = notes[0]
assert note.author_name == u'_test_author_name'
assert note.author_email == u'_test_author_email'
assert note.author_phone == u'_test_author_phone'
assert note.email_of_found_person == u'_test_email_of_found_person'
assert note.phone_of_found_person == u'_test_phone_of_found_person'
assert note.last_known_location == u'_test_last_known_location'
assert note.record_id == u'test.google.com/note.27009'
assert note.person_record_id == u'test.google.com/person.21009'
assert note.text == u'_test_text'
assert note.source_date == datetime.datetime(2000, 1, 16, 7, 8, 9)
# Current date should replace the provided entry_date.
self.assertEqual(note.entry_date, utils.get_utcnow())
assert note.found == False
assert note.status == u'believed_missing'
assert note.linked_person_record_id == u'test.google.com/person.999'
# Found flag and status should have propagated to the Person.
assert person.latest_found == False
assert person.latest_found_source_date == note.source_date
assert person.latest_status == u'believed_missing'
assert person.latest_status_source_date == note.source_date
person = Person.get('haiti', 'test.google.com/person.21010')
assert person
notes = person.get_notes()
assert len(notes) == 1
note = notes[0]
assert note.author_name == u'inna-testing'
assert note.author_email == u'inna-testing@gmail.com'
assert note.author_phone == u'inna-testing-number'
assert note.email_of_found_person == u''
assert note.phone_of_found_person == u''
assert note.last_known_location == u'19.16592425362802 -71.9384765625'
assert note.record_id == u'test.google.com/note.31095'
assert note.person_record_id == u'test.google.com/person.21010'
assert note.text == u'new comment - testing'
assert note.source_date == datetime.datetime(2000, 1, 17, 17, 18, 19)
# Current date should replace the provided entry_date.
assert note.entry_date == utils.get_utcnow()
assert note.found is None
assert note.status == u'is_note_author'
assert not note.linked_person_record_id
# Status should have propagated to the Person, but not found.
assert person.latest_found is None
assert person.latest_found_source_date is None
assert person.latest_status == u'is_note_author'
assert person.latest_status_source_date == note.source_date
def test_api_write_pfif_1_1(self):
"""Post a single entry as PFIF 1.1 using the upload API."""
data = get_test_data('test.pfif-1.1.xml')
self.set_utcnow_for_test(self.default_test_time)
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
person = Person.get('haiti', 'test.google.com/person.21009')
assert person.first_name == u'_test_first_name'
assert person.last_name == u'_test_last_name'
assert person.author_name == u'_test_author_name'
assert person.author_email == u'_test_author_email'
assert person.author_phone == u'_test_author_phone'
assert person.home_city == u'_test_home_city'
assert person.home_street == u'_test_home_street'
assert person.home_neighborhood == u'_test_home_neighborhood'
assert person.home_state == u'_test_home_state'
assert person.home_postal_code == u'_test_home_zip'
assert person.record_id == u'test.google.com/person.21009'
assert person.photo_url == u'_test_photo_url'
assert person.source_name == u'_test_source_name'
assert person.source_url == u'_test_source_url'
assert person.source_date == datetime.datetime(2000, 1, 1, 0, 0, 0)
# Current date should replace the provided entry_date.
self.assertEqual(utils.get_utcnow(), person.entry_date)
# The latest_found property should come from the first Note.
self.assertTrue(person.latest_found)
assert person.latest_found_source_date == \
datetime.datetime(2000, 1, 16, 1, 2, 3)
# There's no status field in PFIF 1.1.
assert person.latest_status == ''
assert person.latest_status_source_date is None
notes = person.get_notes()
assert len(notes) == 2
notes.sort(key=lambda note: note.record_id)
note = notes[0]
assert note.author_name == u'_test_author_name'
assert note.author_email == u'_test_author_email'
assert note.author_phone == u'_test_author_phone'
assert note.email_of_found_person == u'_test_email_of_found_person'
assert note.phone_of_found_person == u'_test_phone_of_found_person'
assert note.last_known_location == u'_test_last_known_location'
assert note.record_id == u'test.google.com/note.27009'
assert note.text == u'_test_text'
assert note.source_date == datetime.datetime(2000, 1, 16, 1, 2, 3)
# Current date should replace the provided entry_date.
assert note.entry_date == utils.get_utcnow()
assert note.found == True
note = notes[1]
assert note.author_name == u'inna-testing'
assert note.author_email == u'inna-testing@gmail.com'
assert note.author_phone == u'inna-testing-number'
assert note.email_of_found_person == u''
assert note.phone_of_found_person == u''
assert note.last_known_location == u'19.16592425362802 -71.9384765625'
assert note.record_id == u'test.google.com/note.31095'
assert note.text == u'new comment - testing'
assert note.source_date == datetime.datetime(2000, 1, 17, 11, 12, 13)
# Current date should replace the provided entry_date.
assert note.entry_date.year == utils.get_utcnow().year
assert note.found is None
def test_api_write_bad_key(self):
"""Attempt to post an entry with an invalid API key."""
data = get_test_data('test.pfif-1.2.xml')
self.go('/api/write?subdomain=haiti&key=bad_key',
data=data, type='application/xml')
assert self.s.status == 403
def test_api_write_empty_record(self):
"""Verify that empty entries are accepted."""
doc = self.go('/api/write?subdomain=haiti&key=test_key',
data='''
<pfif xmlns="http://zesty.ca/pfif/1.2">
<person>
<person_record_id>test.google.com/person.empty</person_record_id>
</person>
</pfif>''', type='application/xml')
# The Person record should have been accepted.
person_status = doc.first('status:write')
assert person_status.first('status:written').text == '1'
# An empty Person entity should be in the datastore.
person = Person.get('haiti', 'test.google.com/person.empty')
def test_api_write_wrong_domain(self):
"""Attempt to post an entry with a domain that doesn't match the key."""
data = get_test_data('test.pfif-1.2.xml')
doc = self.go('/api/write?subdomain=haiti&key=other_key',
data=data, type='application/xml')
# The Person record should have been rejected.
person_status = doc.first('status:write')
assert person_status.first('status:written').text == '0'
assert ('Not in authorized domain' in
person_status.first('status:error').text)
# Both of the Note records should have been rejected.
note_status = person_status.next('status:write')
assert note_status.first('status:written').text == '0'
first_error = note_status.first('status:error')
second_error = first_error.next('status:error')
assert 'Not in authorized domain' in first_error.text
assert 'Not in authorized domain' in second_error.text
def test_api_read(self):
"""Fetch a single record as PFIF (1.1, 1.2 and 1.3) via the read API."""
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
first_name='_read_first_name',
last_name='_read_last_name',
full_name="_first_dot_last",
sex='female',
date_of_birth='1970-01-01',
age='40-50',
home_city='_read_home_city',
home_neighborhood='_read_home_neighborhood',
home_state='_read_home_state',
home_street='_read_home_street',
home_postal_code='_read_home_postal_code',
home_country='_read_home_country',
other='_read_other & < > "',
photo_url='_read_photo_url',
source_name='_read_source_name',
source_url='_read_source_url',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
email_of_found_person='_read_email_of_found_person',
last_known_location='_read_last_known_location',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
phone_of_found_person='_read_phone_of_found_person',
text='_read_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=utils.get_utcnow(), #datetime.datetime(2006, 6, 6, 6, 6, 6),
found=True,
status='believed_missing'
))
# Fetch a PFIF 1.1 document.
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.1')
expected_content = \
'''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.1">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_zip>_read_home_postal_code</pfif:home_zip>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch a PFIF 1.2 document.
# Note that date_of_birth, author_email, author_phone,
# email_of_found_person, and phone_of_found_person are omitted
# intentionally (see utils.filter_sensitive_fields).
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.2')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:sex>female</pfif:sex>
<pfif:age>40-50</pfif:age>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_postal_code>_read_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_read_home_country</pfif:home_country>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_missing</pfif:status>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Verify that PFIF 1.2 is the default version.
default_doc = self.go(
'/api/read?subdomain=haiti&id=test.google.com/person.123')
assert default_doc.content == doc.content
# Fetch a PFIF 1.3 document.
# Note that date_of_birth, author_email, author_phone,
# email_of_found_person, and phone_of_found_person are omitted
# intentionally (see utils.filter_sensitive_fields).
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.3')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:full_name>_first_dot_last</pfif:full_name>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:sex>female</pfif:sex>
<pfif:age>40-50</pfif:age>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_postal_code>_read_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_read_home_country</pfif:home_country>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_missing</pfif:status>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch a PFIF 1.2 document, with full read authorization.
doc = self.go('/api/read?subdomain=haiti&key=full_read_key' +
'&id=test.google.com/person.123&version=1.2')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:author_email>_read_author_email</pfif:author_email>
<pfif:author_phone>_read_author_phone</pfif:author_phone>
<pfif:source_name>_read_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_read_source_url</pfif:source_url>
<pfif:first_name>_read_first_name</pfif:first_name>
<pfif:last_name>_read_last_name</pfif:last_name>
<pfif:sex>female</pfif:sex>
<pfif:date_of_birth>1970-01-01</pfif:date_of_birth>
<pfif:age>40-50</pfif:age>
<pfif:home_street>_read_home_street</pfif:home_street>
<pfif:home_neighborhood>_read_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_read_home_city</pfif:home_city>
<pfif:home_state>_read_home_state</pfif:home_state>
<pfif:home_postal_code>_read_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_read_home_country</pfif:home_country>
<pfif:photo_url>_read_photo_url</pfif:photo_url>
<pfif:other>_read_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_read_author_name</pfif:author_name>
<pfif:author_email>_read_author_email</pfif:author_email>
<pfif:author_phone>_read_author_phone</pfif:author_phone>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_missing</pfif:status>
<pfif:email_of_found_person>_read_email_of_found_person</pfif:email_of_found_person>
<pfif:phone_of_found_person>_read_phone_of_found_person</pfif:phone_of_found_person>
<pfif:last_known_location>_read_last_known_location</pfif:last_known_location>
<pfif:text>_read_text</pfif:text>
</pfif:note>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_read_key(self):
"""Verifies that when read_auth_key_required is set, an authorization
key is required to read data from the API or feeds."""
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
first_name='_read_first_name',
last_name='_read_last_name',
sex='female',
date_of_birth='1970-01-01',
age='40-50',
home_city='_read_home_city',
home_neighborhood='_read_home_neighborhood',
home_state='_read_home_state',
home_street='_read_home_street',
home_postal_code='_read_home_postal_code',
home_country='_read_home_country',
other='_read_other & < > "',
photo_url='_read_photo_url',
source_name='_read_source_name',
source_url='_read_source_url',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='_read_author_email',
author_name='_read_author_name',
author_phone='_read_author_phone',
email_of_found_person='_read_email_of_found_person',
last_known_location='_read_last_known_location',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
phone_of_found_person='_read_phone_of_found_person',
text='_read_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=datetime.datetime(2006, 6, 6, 6, 6, 6),
found=True,
status='believed_missing'
))
config.set_for_subdomain('haiti', read_auth_key_required=True)
try:
# Fetch a PFIF 1.2 document from a domain that requires a read key.
# Without an authorization key, the request should fail.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.1')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-read authorization key, the request should fail.
doc = self.go('/api/read?subdomain=haiti&key=test_key' +
'&id=test.google.com/person.123&version=1.1')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid read authorization key, the request should succeed.
doc = self.go('/api/read?subdomain=haiti&key=read_key' +
'&id=test.google.com/person.123&version=1.2')
assert '_read_first_name' in doc.content
# Fetch the person feed from a domain that requires a read key.
# Without an authorization key, the request should fail.
doc = self.go('/feeds/person?subdomain=haiti')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-read authorization key, the request should fail.
doc = self.go('/feeds/person?subdomain=haiti&key=test_key')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid read authorization key, the request should succeed.
doc = self.go('/feeds/person?subdomain=haiti&key=read_key')
assert '_read_author_name' in doc.content
# Fetch the note feed from a domain that requires a read key.
# Without an authorization key, the request should fail.
doc = self.go('/feeds/note?subdomain=haiti')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-read authorization key, the request should fail.
doc = self.go('/feeds/note?subdomain=haiti&key=test_key')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid read authorization key, the request should succeed.
doc = self.go('/feeds/note?subdomain=haiti&key=read_key')
assert '_read_text' in doc.content
finally:
config.set_for_subdomain('haiti', read_auth_key_required=False)
def test_api_read_with_non_ascii(self):
"""Fetch a record containing non-ASCII characters using the read API.
This tests both PFIF 1.1 and 1.2."""
self.set_utcnow_for_test(self.default_test_time)
expiry_date = self.default_test_time + datetime.timedelta(1,0,0)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
expiry_date=expiry_date,
author_name=u'a with acute = \u00e1',
source_name=u'c with cedilla = \u00e7',
source_url=u'e with acute = \u00e9',
full_name=u'arabic alif = \u0627',
first_name=u'greek alpha = \u03b1',
last_name=u'hebrew alef = \u05d0'
))
# Fetch a PFIF 1.1 document.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.1')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.1">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch a PFIF 1.2 document.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.2')
assert re.match(r'''<\?xml version="1.0" encoding="UTF-8"\?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
</pfif:pfif>
''', doc.content)
# Verify that PFIF 1.2 is the default version.
default_doc = self.go(
'/api/read?subdomain=haiti&id=test.google.com/person.123')
assert default_doc.content == doc.content, \
text_diff(default_doc.content, doc.content)
# Fetch a PFIF 1.3 document.
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.123&version=1.3')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:expiry_date>2010-01-03T03:04:05Z</pfif:expiry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:full_name>arabic alif = \xd8\xa7</pfif:full_name>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Verify that PFIF 1.3 is not the default version.
default_doc = self.go(
'/api/read?subdomain=haiti&id=test.google.com/person.123')
assert default_doc.content != doc.content
def test_search_api(self):
"""Verifies that search API works and returns person and notes correctly.
Also check that it optionally requires search_auth_key_."""
# Add a first person to datastore.
self.go('/create?subdomain=haiti')
self.s.submit(self.s.doc.first('form'),
first_name='_search_first_name',
last_name='_search_lastname',
author_name='_search_author_name')
# Add a note for this person.
self.s.submit(self.s.doc.first('form'),
found='yes',
text='this is text for first person',
author_name='_search_note_author_name')
# Add a 2nd person with same firstname but different lastname.
self.go('/create?subdomain=haiti')
self.s.submit(self.s.doc.first('form'),
first_name='_search_first_name',
last_name='_search_2ndlastname',
author_name='_search_2nd_author_name')
# Add a note for this 2nd person.
self.s.submit(self.s.doc.first('form'),
found='yes',
text='this is text for second person',
author_name='_search_note_2nd_author_name')
config.set_for_subdomain('haiti', search_auth_key_required=True)
try:
# Make a search without a key, it should fail as config requires
# a search_key.
doc = self.go('/api/search?subdomain=haiti' +
'&q=_search_lastname')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a non-search authorization key, the request should fail.
doc = self.go('/api/search?subdomain=haiti&key=test_key' +
'&q=_search_lastname')
assert self.s.status == 403
assert 'Missing or invalid authorization key' in doc.content
# With a valid search authorization key, the request should succeed.
doc = self.go('/api/search?subdomain=haiti&key=search_key' +
'&q=_search_lastname')
assert self.s.status not in [403,404]
# Make sure we return the first record and not the 2nd one.
assert '_search_first_name' in doc.content
assert '_search_2ndlastname' not in doc.content
# Check we also retrieved the first note and not the second one.
assert '_search_note_author_name' in doc.content
assert '_search_note_2nd_author_name' not in doc.content
# Check that we can retrieve several persons matching a query
# and check their notes are also retrieved.
doc = self.go('/api/search?subdomain=haiti&key=search_key' +
'&q=_search_first_name')
assert self.s.status not in [403,404]
# Check we found the 2 records.
assert '_search_lastname' in doc.content
assert '_search_2ndlastname' in doc.content
# Check we also retrieved the notes.
assert '_search_note_author_name' in doc.content
assert '_search_note_2nd_author_name' in doc.content
# If no results are found we return an empty pfif file
doc = self.go('/api/search?subdomain=haiti&key=search_key' +
'&q=_wrong_last_name')
assert self.s.status not in [403,404]
empty_pfif = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.2">
</pfif:pfif>
'''
assert (empty_pfif == doc.content)
# Check that we can get results without a key if no key is required.
config.set_for_subdomain('haiti', search_auth_key_required=False)
doc = self.go('/api/search?subdomain=haiti' +
'&q=_search_first_name')
assert self.s.status not in [403,404]
# Check we found 2 records.
assert '_search_lastname' in doc.content
assert '_search_2ndlastname' in doc.content
# Check we also retrieved the notes.
assert '_search_note_author_name' in doc.content
assert '_search_note_2nd_author_name' in doc.content
# Check that max_result is working fine
config.set_for_subdomain('haiti', search_auth_key_required=False)
doc = self.go('/api/search?subdomain=haiti' +
'&q=_search_first_name&max_results=1')
assert self.s.status not in [403,404]
# Check we found only 1 record. Note that we can't rely on
# which record it found.
assert len(re.findall('_search_first_name', doc.content)) == 1
assert len(re.findall('<pfif:person>', doc.content)) == 1
# Check we also retrieved exactly one note.
assert len(re.findall('<pfif:note>', doc.content)) == 1
finally:
config.set_for_subdomain('haiti', search_auth_key_required=False)
def test_person_feed(self):
"""Fetch a single person using the PFIF Atom feed."""
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_email='_feed_author_email',
author_name='_feed_author_name',
author_phone='_feed_author_phone',
first_name='_feed_first_name',
last_name='_feed_last_name',
sex='male',
date_of_birth='1975',
age='30-40',
home_street='_feed_home_street',
home_neighborhood='_feed_home_neighborhood',
home_city='_feed_home_city',
home_state='_feed_home_state',
home_postal_code='_feed_home_postal_code',
home_country='_feed_home_country',
other='_feed_other & < > "',
photo_url='_feed_photo_url',
source_name='_feed_source_name',
source_url='_feed_source_url',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='_feed_author_email',
author_name='_feed_author_name',
author_phone='_feed_author_phone',
email_of_found_person='_feed_email_of_found_person',
last_known_location='_feed_last_known_location',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
phone_of_found_person='_feed_phone_of_found_person',
text='_feed_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=utils.get_utcnow(),
found=True,
status='is_note_author'
))
# sanity check.
note = Note.get('haiti', 'test.google.com/note.456')
self.debug_print('Note entry_date: %s' % note.entry_date)
self.assertEqual(note.entry_date, utils.get_utcnow())
note = None
# Feeds use PFIF 1.2.
# Note that date_of_birth, author_email, author_phone,
# email_of_found_person, and phone_of_found_person are omitted
# intentionally (see utils.filter_sensitive_fields).
doc = self.go('/feeds/person?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_name>_feed_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_feed_source_url</pfif:source_url>
<pfif:first_name>_feed_first_name</pfif:first_name>
<pfif:last_name>_feed_last_name</pfif:last_name>
<pfif:sex>male</pfif:sex>
<pfif:age>30-40</pfif:age>
<pfif:home_street>_feed_home_street</pfif:home_street>
<pfif:home_neighborhood>_feed_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_feed_home_city</pfif:home_city>
<pfif:home_state>_feed_home_state</pfif:home_state>
<pfif:home_postal_code>_feed_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_feed_home_country</pfif:home_country>
<pfif:photo_url>_feed_photo_url</pfif:photo_url>
<pfif:other>_feed_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>is_note_author</pfif:status>
<pfif:last_known_location>_feed_last_known_location</pfif:last_known_location>
<pfif:text>_feed_text</pfif:text>
</pfif:note>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>_feed_first_name _feed_last_name</title>
<author>
<name>_feed_author_name</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>_feed_first_name _feed_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Test the omit_notes parameter.
doc = self.go('/feeds/person?subdomain=haiti&omit_notes=yes')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti&omit_notes=yes</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&omit_notes=yes</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_name>_feed_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_feed_source_url</pfif:source_url>
<pfif:first_name>_feed_first_name</pfif:first_name>
<pfif:last_name>_feed_last_name</pfif:last_name>
<pfif:sex>male</pfif:sex>
<pfif:age>30-40</pfif:age>
<pfif:home_street>_feed_home_street</pfif:home_street>
<pfif:home_neighborhood>_feed_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_feed_home_city</pfif:home_city>
<pfif:home_state>_feed_home_state</pfif:home_state>
<pfif:home_postal_code>_feed_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_feed_home_country</pfif:home_country>
<pfif:photo_url>_feed_photo_url</pfif:photo_url>
<pfif:other>_feed_other & < > "</pfif:other>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>_feed_first_name _feed_last_name</title>
<author>
<name>_feed_author_name</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>_feed_first_name _feed_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Fetch the entry, with full read authorization.
doc = self.go('/feeds/person?subdomain=haiti&key=full_read_key')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti&key=full_read_key</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&key=full_read_key</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:author_email>_feed_author_email</pfif:author_email>
<pfif:author_phone>_feed_author_phone</pfif:author_phone>
<pfif:source_name>_feed_source_name</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>_feed_source_url</pfif:source_url>
<pfif:first_name>_feed_first_name</pfif:first_name>
<pfif:last_name>_feed_last_name</pfif:last_name>
<pfif:sex>male</pfif:sex>
<pfif:date_of_birth>1975</pfif:date_of_birth>
<pfif:age>30-40</pfif:age>
<pfif:home_street>_feed_home_street</pfif:home_street>
<pfif:home_neighborhood>_feed_home_neighborhood</pfif:home_neighborhood>
<pfif:home_city>_feed_home_city</pfif:home_city>
<pfif:home_state>_feed_home_state</pfif:home_state>
<pfif:home_postal_code>_feed_home_postal_code</pfif:home_postal_code>
<pfif:home_country>_feed_home_country</pfif:home_country>
<pfif:photo_url>_feed_photo_url</pfif:photo_url>
<pfif:other>_feed_other & < > "</pfif:other>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:author_email>_feed_author_email</pfif:author_email>
<pfif:author_phone>_feed_author_phone</pfif:author_phone>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>is_note_author</pfif:status>
<pfif:email_of_found_person>_feed_email_of_found_person</pfif:email_of_found_person>
<pfif:phone_of_found_person>_feed_phone_of_found_person</pfif:phone_of_found_person>
<pfif:last_known_location>_feed_last_known_location</pfif:last_known_location>
<pfif:text>_feed_text</pfif:text>
</pfif:note>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>_feed_first_name _feed_last_name</title>
<author>
<name>_feed_author_name</name>
<email>_feed_author_email</email>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>_feed_first_name _feed_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_note_feed(self):
"""Fetch a single note using the PFIF Atom feed."""
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_feed_first_name',
last_name='_feed_last_name',
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
person_record_id='test.google.com/person.123',
linked_person_record_id='test.google.com/person.888',
author_email='_feed_author_email',
author_name='_feed_author_name',
author_phone='_feed_author_phone',
email_of_found_person='_feed_email_of_found_person',
last_known_location='_feed_last_known_location',
phone_of_found_person='_feed_phone_of_found_person',
text='_feed_text',
source_date=datetime.datetime(2005, 5, 5, 5, 5, 5),
entry_date=datetime.datetime(2006, 6, 6, 6, 6, 6),
found=True,
status='believed_dead'
))
# Feeds use PFIF 1.2.
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/feeds/note?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/note?subdomain=haiti</id>
<title>%s</title>
<updated>2006-06-06T06:06:06Z</updated>
<link rel="self">http://%s/feeds/note?subdomain=haiti</link>
<entry>
<pfif:note>
<pfif:note_record_id>test.google.com/note.456</pfif:note_record_id>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:linked_person_record_id>test.google.com/person.888</pfif:linked_person_record_id>
<pfif:entry_date>2006-06-06T06:06:06Z</pfif:entry_date>
<pfif:author_name>_feed_author_name</pfif:author_name>
<pfif:source_date>2005-05-05T05:05:05Z</pfif:source_date>
<pfif:found>true</pfif:found>
<pfif:status>believed_dead</pfif:status>
<pfif:last_known_location>_feed_last_known_location</pfif:last_known_location>
<pfif:text>_feed_text</pfif:text>
</pfif:note>
<id>pfif:test.google.com/note.456</id>
<title>_feed_text</title>
<author>
<name>_feed_author_name</name>
</author>
<updated>2006-06-06T06:06:06Z</updated>
<content>_feed_text</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_person_feed_with_bad_chars(self):
"""Fetch a person whose fields contain characters that are not
legally representable in XML, using the PFIF Atom feed."""
# See: http://www.w3.org/TR/REC-xml/#charsets
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_name=u'illegal character (\x01)',
first_name=u'illegal character (\x1a)',
last_name=u'illegal character (\ud800)',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6)
))
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/feeds/person?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>illegal character ()</pfif:author_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:first_name>illegal character ()</pfif:first_name>
<pfif:last_name>illegal character ()</pfif:last_name>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>illegal character () illegal character ()</title>
<author>
<name>illegal character ()</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>illegal character () illegal character ()</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_person_feed_with_non_ascii(self):
"""Fetch a person whose fields contain non-ASCII characters,
using the PFIF Atom feed."""
self.set_utcnow_for_test(self.default_test_time)
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
entry_date=utils.get_utcnow(),
author_name=u'a with acute = \u00e1',
source_name=u'c with cedilla = \u00e7',
source_url=u'e with acute = \u00e9',
first_name=u'greek alpha = \u03b1',
last_name=u'hebrew alef = \u05d0',
source_date=datetime.datetime(2001, 2, 3, 4, 5, 6)
))
# Note that author_email, author_phone, email_of_found_person, and
# phone_of_found_person are omitted intentionally (see
# utils.filter_sensitive_fields).
doc = self.go('/feeds/person?subdomain=haiti')
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T03:04:05Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T03:04:05Z</pfif:entry_date>
<pfif:author_name>a with acute = \xc3\xa1</pfif:author_name>
<pfif:source_name>c with cedilla = \xc3\xa7</pfif:source_name>
<pfif:source_date>2001-02-03T04:05:06Z</pfif:source_date>
<pfif:source_url>e with acute = \xc3\xa9</pfif:source_url>
<pfif:first_name>greek alpha = \xce\xb1</pfif:first_name>
<pfif:last_name>hebrew alef = \xd7\x90</pfif:last_name>
</pfif:person>
<id>pfif:test.google.com/person.123</id>
<title>greek alpha = \xce\xb1 hebrew alef = \xd7\x90</title>
<author>
<name>a with acute = \xc3\xa1</name>
</author>
<updated>2001-02-03T04:05:06Z</updated>
<source>
<title>%s</title>
</source>
<content>greek alpha = \xce\xb1 hebrew alef = \xd7\x90</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_person_feed_parameters(self):
"""Test the max_results, skip, and min_entry_date parameters."""
db.put([Person(
key_name='haiti:test.google.com/person.%d' % i,
subdomain='haiti',
entry_date=datetime.datetime(2000, 1, 1, i, i, i),
first_name='first.%d' % i,
last_name='last.%d' % i
) for i in range(1, 21)]) # Create 20 persons.
def assert_ids(*ids):
person_ids = re.findall(r'record_id>test.google.com/person.(\d+)',
self.s.doc.content)
assert map(int, person_ids) == list(ids)
# Should get records in reverse chronological order by default.
doc = self.go('/feeds/person?subdomain=haiti')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11)
# Fewer results.
doc = self.go('/feeds/person?subdomain=haiti&max_results=1')
assert_ids(20)
doc = self.go('/feeds/person?subdomain=haiti&max_results=9')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12)
# More results.
doc = self.go('/feeds/person?subdomain=haiti&max_results=12')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9)
# Skip some results.
doc = self.go('/feeds/person?subdomain=haiti&skip=12&max_results=5')
assert_ids(8, 7, 6, 5, 4)
# Should get records in forward chronological order with min_entry_date.
doc = self.go('/feeds/person?subdomain=haiti' +
'&min_entry_date=2000-01-01T18:18:18Z')
assert_ids(18, 19, 20)
doc = self.go('/feeds/person?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:03Z')
assert_ids(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
doc = self.go('/feeds/person?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:04Z')
assert_ids(4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
def test_note_feed_parameters(self):
"""Test the max_results, skip, min_entry_date, and person_record_id
parameters."""
entities = []
for i in range(1, 3): # Create person.1 and person.2.
entities.append(Person(
key_name='haiti:test.google.com/person.%d' % i,
subdomain='haiti',
entry_date=datetime.datetime(2000, 1, 1, i, i, i),
first_name='first',
last_name='last'
))
for i in range(1, 6): # Create notes 1-5 on person.1.
entities.append(Note(
key_name='haiti:test.google.com/note.%d' % i,
subdomain='haiti',
person_record_id='test.google.com/person.1',
entry_date=datetime.datetime(2000, 1, 1, i, i, i)
))
for i in range(6, 18): # Create notes 6-17 on person.2.
entities.append(Note(
key_name='haiti:test.google.com/note.%d' % i,
subdomain='haiti',
person_record_id='test.google.com/person.2',
entry_date=datetime.datetime(2000, 1, 1, i, i, i)
))
for i in range(18, 21): # Create notes 18-20 on person.1.
entities.append(Note(
key_name='haiti:test.google.com/note.%d' % i,
subdomain='haiti',
person_record_id='test.google.com/person.1',
entry_date=datetime.datetime(2000, 1, 1, i, i, i)
))
db.put(entities)
def assert_ids(*ids):
note_ids = re.findall(r'record_id>test.google.com/note.(\d+)',
self.s.doc.content)
assert map(int, note_ids) == list(ids)
# Should get records in reverse chronological order by default.
doc = self.go('/feeds/note?subdomain=haiti')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11)
# Fewer results.
doc = self.go('/feeds/note?subdomain=haiti&max_results=1')
assert_ids(20)
doc = self.go('/feeds/note?subdomain=haiti&max_results=9')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12)
# More results.
doc = self.go('/feeds/note?subdomain=haiti&max_results=12')
assert_ids(20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9)
# Skip some results.
doc = self.go('/feeds/note?subdomain=haiti&skip=12&max_results=5')
assert_ids(8, 7, 6, 5, 4)
# Should get records in forward chronological order.
doc = self.go('/feeds/note?subdomain=haiti' +
'&min_entry_date=2000-01-01T18:18:18Z')
assert_ids(18, 19, 20)
doc = self.go('/feeds/note?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:03Z')
assert_ids(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
doc = self.go('/feeds/note?subdomain=haiti' +
'&min_entry_date=2000-01-01T03:03:04Z')
assert_ids(4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
# Filter by person_record_id.
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.1')
assert_ids(20, 19, 18, 5, 4, 3, 2, 1)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.2')
assert_ids(17, 16, 15, 14, 13, 12, 11, 10, 9, 8)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.2' +
'&max_results=11')
assert_ids(17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.1' +
'&min_entry_date=2000-01-01T03:03:03Z')
assert_ids(3, 4, 5, 18, 19, 20)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.1' +
'&min_entry_date=2000-01-01T03:03:04Z')
assert_ids(4, 5, 18, 19, 20)
doc = self.go('/feeds/note?subdomain=haiti' +
'&person_record_id=test.google.com/person.2' +
'&min_entry_date=2000-01-01T06:06:06Z')
assert_ids(6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
def test_api_read_status(self):
"""Test the reading of the note status field at /api/read and /feeds."""
# A missing status should not appear as a tag.
db.put(Person(
key_name='haiti:test.google.com/person.1001',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_status_first_name',
last_name='_status_last_name',
author_name='_status_author_name'
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>' not in doc.content
# An unspecified status should not appear as a tag.
db.put(Note(
key_name='haiti:test.google.com/note.2002',
subdomain='haiti',
person_record_id='test.google.com/person.1001',
entry_date=utils.get_utcnow()
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>' not in doc.content
# An empty status should not appear as a tag.
db.put(Note(
key_name='haiti:test.google.com/note.2002',
subdomain='haiti',
person_record_id='test.google.com/person.1001',
status='',
entry_date=utils.get_utcnow()
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>' not in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>' not in doc.content
# When the status is specified, it should appear in the feed.
db.put(Note(
key_name='haiti:test.google.com/note.2002',
subdomain='haiti',
person_record_id='test.google.com/person.1001',
entry_date=utils.get_utcnow(),
status='believed_alive'
))
doc = self.go('/api/read?subdomain=haiti' +
'&id=test.google.com/person.1001')
assert '<pfif:status>believed_alive</pfif:status>' in doc.content
doc = self.go('/feeds/person?subdomain=haiti')
assert '<pfif:status>believed_alive</pfif:status>' in doc.content
doc = self.go('/feeds/note?subdomain=haiti')
assert '<pfif:status>believed_alive</pfif:status>' in doc.content
def test_delete_clone(self):
"""Confirms that attempting to delete clone records produces the
appropriate UI message."""
now, person, note = self.setup_person_and_note('test.google.com')
# Check that there is a Delete button on the view page.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
button = doc.firsttag('input', value='Delete this record')
# Check that the deletion confirmation page shows the right message.
doc = self.s.submit(button)
assert 'we might later receive another copy' in doc.text
# Click the button to delete a record.
button = doc.firsttag('input', value='Yes, delete the record')
doc = self.s.submit(button)
# Check to make sure that the user was redirected to the same page due
# to an invalid captcha.
assert 'delete the record for "_test_first_name ' + \
'_test_last_name"' in doc.text
assert 'incorrect-captcha-sol' in doc.content
# Continue with a valid captcha (faked, for purpose of test). Check the
# sent messages for proper notification of related e-mail accounts.
doc = self.s.go(
'/delete',
data='subdomain=haiti&id=test.google.com/person.123&' +
'reason_for_deletion=spam_received&test_mode=yes')
# Both entities should be gone.
assert not db.get(person.key())
assert not db.get(note.key())
# Clone deletion cannot be undone, so no e-mail should have been sent.
assert len(MailThread.messages) == 0
def setup_person_and_note(self, domain='haiti.person-finder.appspot.com'):
"""Puts a Person with associated Note into the datastore, returning
(now, person, note) for testing. This creates an original record
by default; to make a clone record, pass in a domain name."""
now = datetime.datetime(2010, 1, 1, 0, 0, 0)
self.set_utcnow_for_test(now)
person = Person(
key_name='haiti:%s/person.123' % domain,
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
source_date=now,
entry_date=now
)
person.update_index(['old', 'new'])
note = Note(
key_name='haiti:%s/note.456' % domain,
subdomain='haiti',
author_email='test2@example.com',
person_record_id='%s/person.123' % domain,
source_date=now,
entry_date=now,
text='Testing'
)
db.put([person, note])
return now, person, note
def setup_photo(self, person):
"""Stores a Photo for the given person, for testing."""
photo = Photo(bin_data='xyz')
photo.put()
person.photo = photo
person.photo_url = '_test_photo_url'
person.put()
return photo
def test_photo(self):
"""Checks that a stored photo can be retrieved."""
now, person, note = self.setup_person_and_note()
photo = self.setup_photo(person)
doc = self.go('/photo?id=%s&subdomain=haiti' % photo.key().id())
assert doc.content == 'xyz'
def test_delete_and_restore(self):
"""Checks that deleting a record through the UI, then undeleting
it using the link in the deletion notification, causes the record to
disappear and reappear correctly, produces e-mail notifications,
and has the correct effect on the outgoing API and feeds."""
now, person, note = self.setup_person_and_note()
photo = self.setup_photo(person)
# Advance time by one day.
now = datetime.datetime(2010, 1, 2, 0, 0, 0)
self.set_utcnow_for_test(now)
# Visit the page and click the button to delete a record.
doc = self.go('/view?subdomain=haiti&' +
'id=haiti.person-finder.appspot.com/person.123')
button = doc.firsttag('input', value='Delete this record')
doc = self.s.submit(button)
assert 'delete the record for "_test_first_name ' + \
'_test_last_name"' in doc.text
button = doc.firsttag('input', value='Yes, delete the record')
doc = self.s.submit(button)
# Check to make sure that the user was redirected to the same page due
# to an invalid captcha.
assert 'delete the record for "_test_first_name ' + \
'_test_last_name"' in doc.text
assert 'incorrect-captcha-sol' in doc.content
# Continue with a valid captcha (faked, for purpose of test). Check the
# sent messages for proper notification of related e-mail accounts.
doc = self.s.go(
'/delete',
data='subdomain=haiti&' +
'id=haiti.person-finder.appspot.com/person.123&' +
'reason_for_deletion=spam_received&test_mode=yes')
assert len(MailThread.messages) == 2
messages = sorted(MailThread.messages, key=lambda m: m['to'][0])
# After sorting by recipient, the second message should be to the
# person author, test@example.com (sorts after test2@example.com).
assert messages[1]['to'] == ['test@example.com']
words = ' '.join(messages[1]['data'].split())
assert ('Subject: [Person Finder] Deletion notice for ' +
'"_test_first_name _test_last_name"' in words)
assert 'the author of this record' in words
assert 'restore it by following this link' in words
restore_url = re.search('(/restore.*)', messages[1]['data']).group(1)
# The first message should be to the note author, test2@example.com.
assert messages[0]['to'] == ['test2@example.com']
words = ' '.join(messages[0]['data'].split())
assert ('Subject: [Person Finder] Deletion notice for ' +
'"_test_first_name _test_last_name"' in words)
assert 'the author of a note on this record' in words
assert 'restore it by following this link' not in words
# The Person and Note records should now be marked expired.
person = db.get(person.key())
assert person.is_expired
assert person.source_date == now
assert person.entry_date == now
assert person.expiry_date == now
note = db.get(note.key())
assert note.is_expired
# The Person and Note records should be inaccessible.
assert not Person.get('haiti', person.record_id)
assert not Note.get('haiti', note.record_id)
# Make sure that a UserActionLog row was created.
last_log_entry = UserActionLog.all().order('-time').get()
assert last_log_entry
assert last_log_entry.action == 'delete'
assert last_log_entry.entity_kind == 'Person'
assert (last_log_entry.entity_key_name ==
'haiti:haiti.person-finder.appspot.com/person.123')
assert last_log_entry.reason == 'spam_received'
assert Photo.get_by_id(photo.key().id())
# Search for the record. Make sure it does not show up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' in doc.text
# The read API should expose an expired record.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# The outgoing person feed should contain an expired record.
doc = self.go('/feeds/person?subdomain=haiti&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.3">
<id>http://%s/feeds/person?subdomain=haiti&version=1.3</id>
<title>%s</title>
<updated>2010-01-02T00:00:00Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&version=1.3</link>
<entry>
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
<id>pfif:haiti.person-finder.appspot.com/person.123</id>
<author>
</author>
<updated>2010-01-02T00:00:00Z</updated>
<source>
<title>%s</title>
</source>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
doc = self.go('/feeds/person?subdomain=haiti') # PFIF 1.2
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.2">
<id>http://%s/feeds/person?subdomain=haiti</id>
<title>%s</title>
<updated>2010-01-02T00:00:00Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti</link>
<entry>
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:first_name></pfif:first_name>
<pfif:last_name></pfif:last_name>
</pfif:person>
<id>pfif:haiti.person-finder.appspot.com/person.123</id>
<author>
</author>
<updated>2010-01-02T00:00:00Z</updated>
<source>
<title>%s</title>
</source>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Advance time by one day.
now = datetime.datetime(2010, 1, 3, 0, 0, 0)
self.set_utcnow_for_test(now)
# Restore the record using the URL in the e-mail. Clicking the link
# should take you to a CAPTCHA page to confirm.
doc = self.go(restore_url)
assert 'captcha' in doc.content
# Fake a valid captcha and actually reverse the deletion
url = restore_url + '&test_mode=yes'
doc = self.s.submit(button, url=url)
assert 'Identifying information' in doc.text
assert '_test_first_name _test_last_name' in doc.text
assert Person.get('haiti', 'haiti.person-finder.appspot.com/person.123')
note = Note.get('haiti', 'haiti.person-finder.appspot.com/note.456')
assert note
self.assertEquals([note.record_id],
[n.record_id for n in person.get_notes()])
assert 'Testing' in doc.text, \
'Testing not in: %s' % str(doc.text.encode('ascii', 'ignore'))
new_id = self.s.url[
self.s.url.find('haiti'):self.s.url.find('&subdomain')]
new_id = new_id.replace('%2F', '/')
# Make sure that Person/Note records are now visible, with all
# of their original attributes from prior to deletion.
person = Person.get_by_key_name('haiti:' + new_id)
notes = Note.get_by_person_record_id('haiti', person.record_id)
assert person
assert len(notes) == 1
assert person.author_name == '_test_author_name'
assert person.author_email == 'test@example.com'
assert person.first_name == '_test_first_name'
assert person.last_name == '_test_last_name'
assert person.photo_url == '_test_photo_url'
assert person.subdomain == 'haiti'
assert person.source_date == now
assert person.entry_date == now
assert person.expiry_date == now + datetime.timedelta(60, 0, 0)
assert not person.is_expired
assert notes[0].author_email == 'test2@example.com'
assert notes[0].text == 'Testing'
assert notes[0].person_record_id == new_id
assert not notes[0].is_expired
# Search for the record. Make sure it shows up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' not in doc.text
# The read API should show a record with all the fields present,
# as if the record was just written with new field values.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-03T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-03-04T00:00:00Z</pfif:expiry_date>
<pfif:author_name>_test_author_name</pfif:author_name>
<pfif:source_date>2010-01-03T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
<pfif:first_name>_test_first_name</pfif:first_name>
<pfif:last_name>_test_last_name</pfif:last_name>
<pfif:photo_url>_test_photo_url</pfif:photo_url>
<pfif:note>
<pfif:note_record_id>haiti.person-finder.appspot.com/note.456</pfif:note_record_id>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-01T00:00:00Z</pfif:entry_date>
<pfif:author_name></pfif:author_name>
<pfif:source_date>2010-01-01T00:00:00Z</pfif:source_date>
<pfif:text>Testing</pfif:text>
</pfif:note>
</pfif:pfif>
'''
# The outgoing feed should contain a complete record also.
doc = self.go('/feeds/person?subdomain=haiti&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:pfif="http://zesty.ca/pfif/1.3">
<id>http://%s/feeds/person?subdomain=haiti&version=1.3</id>
<title>%s</title>
<updated>2010-01-03T00:00:00Z</updated>
<link rel="self">http://%s/feeds/person?subdomain=haiti&version=1.3</link>
<entry>
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-03T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-03-04T00:00:00Z</pfif:expiry_date>
<pfif:author_name>_test_author_name</pfif:author_name>
<pfif:source_date>2010-01-03T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
<pfif:first_name>_test_first_name</pfif:first_name>
<pfif:last_name>_test_last_name</pfif:last_name>
<pfif:photo_url>_test_photo_url</pfif:photo_url>
<pfif:note>
<pfif:note_record_id>haiti.person-finder.appspot.com/note.456</pfif:note_record_id>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-01T00:00:00Z</pfif:entry_date>
<pfif:author_name></pfif:author_name>
<pfif:source_date>2010-01-01T00:00:00Z</pfif:source_date>
<pfif:text>Testing</pfif:text>
</pfif:note>
</pfif:person>
<id>pfif:haiti.person-finder.appspot.com/person.123</id>
<title>_test_first_name _test_last_name</title>
<author>
<name>_test_author_name</name>
</author>
<updated>2010-01-03T00:00:00Z</updated>
<source>
<title>%s</title>
</source>
<content>_test_first_name _test_last_name</content>
</entry>
</feed>
''' % (self.hostport, self.hostport, self.hostport, self.hostport)
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Confirm that restoration notifications were sent.
assert len(MailThread.messages) == 4
messages = sorted(MailThread.messages[2:], key=lambda m: m['to'][0])
# After sorting by recipient, the second message should be to the
# person author, test@example.com (sorts after test2@example.com).
assert messages[1]['to'] == ['test@example.com']
words = ' '.join(messages[1]['data'].split())
assert ('Subject: [Person Finder] Record restoration notice for ' +
'"_test_first_name _test_last_name"' in words)
# The first message should be to the note author, test2@example.com.
assert messages[0]['to'] == ['test2@example.com']
words = ' '.join(messages[0]['data'].split())
assert ('Subject: [Person Finder] Record restoration notice for ' +
'"_test_first_name _test_last_name"' in words)
def test_delete_and_wipe(self):
"""Checks that deleting a record through the UI, then waiting until
after the expiration grace period ends, causes the record to
disappear and be deleted permanently from the datastore, leaving
behind the appropriate placeholder in the outgoing API and feeds."""
now, person, note = self.setup_person_and_note()
photo = self.setup_photo(person)
# Advance time by one day.
now = datetime.datetime(2010, 1, 2, 0, 0, 0)
self.set_utcnow_for_test(now)
# Simulate a deletion request with a valid Turing test response.
# (test_delete_and_restore already tests this flow in more detail.)
doc = self.s.go('/delete',
data='subdomain=haiti&' +
'id=haiti.person-finder.appspot.com/person.123&' +
'reason_for_deletion=spam_received&test_mode=yes')
# Run the DeleteExpired task.
doc = self.s.go('/tasks/delete_expired')
# The Person and Note records should be marked expired but retain data.
person = db.get(person.key())
assert person.is_expired
assert person.first_name == '_test_first_name'
assert person.source_date == now
assert person.entry_date == now
assert person.expiry_date == now
note = db.get(note.key())
assert note.is_expired
assert note.text == 'Testing'
# The Photo should still be there.
assert db.get(photo.key())
# The Person and Note records should be inaccessible.
assert not Person.get('haiti', person.record_id)
assert not Note.get('haiti', note.record_id)
# Search for the record. Make sure it does not show up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' in doc.text
# The read API should expose an expired record.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>haiti.person-finder.appspot.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Advance time past the end of the expiration grace period.
now = datetime.datetime(2010, 1, 6, 0, 0, 0)
self.set_utcnow_for_test(now)
# Run the DeleteExpired task.
doc = self.s.go('/tasks/delete_expired')
# The Person record should still exist but now be empty.
# The timestamps should be unchanged.
person = db.get(person.key())
assert person.is_expired
assert person.first_name == None
assert person.source_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
assert person.entry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
assert person.expiry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
# The Note and Photo should be gone.
assert not db.get(note.key())
assert not db.get(photo.key())
# The placeholder exposed by the read API should be unchanged.
doc = self.go('/api/read?subdomain=haiti&id=haiti.person-finder.appspot.com/person.123&version=1.3') # PFIF 1.3
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# The Person and Note records should be inaccessible.
assert not Person.get('haiti', person.record_id)
assert not Note.get('haiti', note.record_id)
# Search for the record. Make sure it does not show up.
doc = self.go('/results?subdomain=haiti&role=seek&' +
'query=_test_first_name+_test_last_name')
assert 'No results found' in doc.text
def test_incoming_expired_record(self):
"""Tests that an incoming expired record can cause an existing record
to expire and be deleted."""
now, person, note = self.setup_person_and_note('test.google.com')
assert person.first_name == '_test_first_name'
# Advance time by one day.
now = datetime.datetime(2010, 1, 2, 0, 0, 0)
self.set_utcnow_for_test(now)
# Simulate the arrival of an update that expires this record.
data = '''\
<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-02T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2001-01-02T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
self.go('/api/write?subdomain=haiti&key=test_key',
data=data, type='application/xml')
# Advance time by one day.
now = datetime.datetime(2010, 1, 3, 0, 0, 0)
self.set_utcnow_for_test(now)
# Run the DeleteExpired task.
self.s.go('/tasks/delete_expired').content
# The Person record should be hidden but not yet gone.
# The timestamps should reflect the time that the record was hidden.
assert not Person.get('haiti', person.record_id)
person = db.get(person.key())
assert person.is_expired
assert person.first_name == ''
assert person.source_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.entry_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.expiry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
# The Note record should be hidden but not yet gone.
assert not Note.get('haiti', note.record_id)
assert db.get(note.key())
# The read API should expose an expired record.
doc = self.go('/api/read?subdomain=haiti&id=test.google.com/person.123&version=1.3') # PFIF 1.3
expected_content = '''<?xml version="1.0" encoding="UTF-8"?>
<pfif:pfif xmlns:pfif="http://zesty.ca/pfif/1.3">
<pfif:person>
<pfif:person_record_id>test.google.com/person.123</pfif:person_record_id>
<pfif:entry_date>2010-01-03T00:00:00Z</pfif:entry_date>
<pfif:expiry_date>2010-01-02T00:00:00Z</pfif:expiry_date>
<pfif:source_date>2010-01-03T00:00:00Z</pfif:source_date>
<pfif:full_name></pfif:full_name>
</pfif:person>
</pfif:pfif>
'''
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
# Advance time by three more days (past the expiration grace period).
now = datetime.datetime(2010, 1, 6, 0, 0, 0)
self.set_utcnow_for_test(now)
# Run the DeleteExpired task.
self.s.go('/tasks/delete_expired').content
# The Person record should still exist but now be empty.
# The timestamps should be unchanged.
person = db.get(person.key())
assert person.is_expired
assert person.first_name is None
assert person.source_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.entry_date == datetime.datetime(2010, 1, 3, 0, 0, 0)
assert person.expiry_date == datetime.datetime(2010, 1, 2, 0, 0, 0)
# The Note record should be gone.
assert not db.get(note.key())
# The read API should show the same expired record as before.
doc = self.go('/api/read?subdomain=haiti&id=test.google.com/person.123&version=1.3') # PFIF 1.3
assert expected_content == doc.content, \
text_diff(expected_content, doc.content)
def test_mark_notes_as_spam(self):
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
entry_date=datetime.datetime.now()
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
author_email='test2@example.com',
person_record_id='test.google.com/person.123',
entry_date=utils.get_utcnow(),
text='Testing'
))
person = Person.get('haiti', 'test.google.com/person.123')
assert len(person.get_notes()) == 1
assert Note.get('haiti', 'test.google.com/note.456')
# Visit the page and click the button to mark a note as spam.
# Bring up confirmation page.
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
doc = self.s.follow('Report spam')
assert 'Are you sure' in doc.text
assert 'Testing' in doc.text
assert 'captcha' not in doc.content
button = doc.firsttag('input', value='Yes, update the note')
doc = self.s.submit(button)
assert 'Status updates for this person' in doc.text
assert 'This note has been marked as spam.' in doc.text
assert 'Not spam' in doc.text
assert 'Reveal note' in doc.text
# When a note is flagged, these new links appear.
assert doc.first('a', id='reveal-note')
assert doc.first('a', id='hide-note')
# When a note is flagged, the contents of the note are hidden.
assert doc.first('div', class_='contents')['style'] == 'display: none;'
# Make sure that a UserActionLog entry was created
assert len(UserActionLog.all().fetch(10)) == 1
# Unmark the note as spam.
doc = self.s.follow('Not spam')
assert 'Are you sure' in doc.text
assert 'Testing' in doc.text
assert 'captcha' in doc.content
# Make sure it redirects to the same page with error
doc = self.s.submit(button)
assert 'incorrect-captcha-sol' in doc.content
assert 'Are you sure' in doc.text
assert 'Testing' in doc.text
url = '/flag_note?subdomain=haiti&id=test.google.com/note.456&' + \
'test_mode=yes'
doc = self.s.submit(button, url=url)
assert 'This note has been marked as spam.' not in doc.text
assert 'Status updates for this person' in doc.text
assert 'Report spam' in doc.text
# Make sure that a second UserActionLog entry was created
assert len(UserActionLog.all().fetch(10)) == 2
def test_subscriber_notifications(self):
"Tests that a notification is sent when a record is updated"
SUBSCRIBER = 'example1@example.com'
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
entry_date=datetime.datetime.utcnow(),
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
person_record_id='test.google.com/person.123',
text='Testing',
entry_date=datetime.datetime.utcnow(),
))
db.put(Subscription(
key_name='haiti:test.google.com/person.123:example1@example.com',
subdomain='haiti',
person_record_id='test.google.com/person.123',
email=SUBSCRIBER,
language='fr'
))
# Reset the MailThread queue _before_ making any requests
# to the server, else risk errantly deleting messages
MailThread.messages = []
# Visit the details page and add a note, triggering notification
# to the subscriber
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.123')
self.verify_details_page(1)
self.verify_note_form()
self.verify_update_notes(False, '_test A note body',
'_test A note author',
status='information_sought')
self.verify_email_sent()
message = MailThread.messages[0]
assert message['to'] == [SUBSCRIBER]
assert 'do-not-reply@' in message['from']
assert '_test_first_name _test_last_name' in message['data']
# Subscription is French, email should be, too
assert 'recherche des informations' in message['data']
assert '_test A note body' in message['data']
assert 'view?id=test.google.com%2Fperson.123' in message['data']
def test_subscribe_and_unsubscribe(self):
"""Tests subscribing to notifications on status updating"""
SUBSCRIBE_EMAIL = 'testsubscribe@example.com'
db.put(Person(
key_name='haiti:test.google.com/person.111',
subdomain='haiti',
author_name='_test_author_name',
author_email='test@example.com',
first_name='_test_first_name',
last_name='_test_last_name',
entry_date=datetime.datetime.utcnow()
))
person = Person.get('haiti', 'test.google.com/person.111')
# Reset the MailThread queue _before_ making any requests
# to the server, else risk errantly deleting messages
MailThread.messages = []
d = self.go('/create?subdomain=haiti')
doc = self.s.submit(d.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author',
subscribe='on')
assert 'Subscribe to updates about _test_first _test_last' in doc.text
# Empty email is an error.
button = doc.firsttag('input', value='Subscribe')
doc = self.s.submit(button)
assert 'Invalid e-mail address. Please try again.' in doc.text
assert len(person.get_subscriptions()) == 0
# Invalid captcha response is an error
button = doc.firsttag('input', value='Subscribe')
doc = self.s.submit(button, subscribe_email=SUBSCRIBE_EMAIL)
assert 'iframe' in doc.content
assert 'recaptcha_response_field' in doc.content
assert len(person.get_subscriptions()) == 0
# Invalid email is an error (even with valid captcha)
INVALID_EMAIL = 'test@example'
url = ('/subscribe?subdomain=haiti&id=test.google.com/person.111&'
'test_mode=yes')
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
INVALID_EMAIL})
assert 'Invalid e-mail address. Please try again.' in doc.text
assert len(person.get_subscriptions()) == 0
# Valid email and captcha is success
url = ('/subscribe?subdomain=haiti&id=test.google.com/person.111&'
'test_mode=yes')
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
SUBSCRIBE_EMAIL})
assert 'successfully subscribed. ' in doc.text
assert '_test_first_name _test_last_name' in doc.text
subscriptions = person.get_subscriptions()
assert len(subscriptions) == 1
assert subscriptions[0].email == SUBSCRIBE_EMAIL
assert subscriptions[0].language == 'en'
# Already subscribed person is shown info page
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
SUBSCRIBE_EMAIL})
assert 'already subscribed. ' in doc.text
assert 'for _test_first_name _test_last_name' in doc.text
assert len(person.get_subscriptions()) == 1
self.verify_email_sent()
message = MailThread.messages[0]
assert message['to'] == [SUBSCRIBE_EMAIL]
assert 'do-not-reply@' in message['from']
assert '_test_first_name _test_last_name' in message['data']
assert 'view?id=test.google.com%2Fperson.111' in message['data']
# Already subscribed person with new language is success
url = url + '&lang=fr'
doc = self.s.submit(button, url=url, paramdict = {'subscribe_email':
SUBSCRIBE_EMAIL})
assert 'successfully subscribed. ' in doc.text
assert '_test_first_name _test_last_name' in doc.text
subscriptions = person.get_subscriptions()
assert len(subscriptions) == 1
assert subscriptions[0].email == SUBSCRIBE_EMAIL
assert subscriptions[0].language == 'fr'
# Test the unsubscribe link in the email
unsub_url = re.search('(/unsubscribe.*)', message['data']).group(1)
doc = self.go(unsub_url)
assert 'successfully unsubscribed' in doc.content
assert len(person.get_subscriptions()) == 0
def test_config_use_family_name(self):
# use_family_name=True
d = self.go('/create?subdomain=haiti')
assert d.first('label', for_='first_name').text.strip() == 'Given name:'
assert d.first('label', for_='last_name').text.strip() == 'Family name:'
assert d.firsttag('input', name='first_name')
assert d.firsttag('input', name='last_name')
self.s.submit(d.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
d = self.go('/view?id=%s&subdomain=haiti' % person.record_id)
f = d.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Given name:'
assert f[0].first('td', class_='field').text.strip() == '_test_first'
assert f[1].first('td', class_='label').text.strip() == 'Family name:'
assert f[1].first('td', class_='field').text.strip() == '_test_last'
person.delete()
# use_family_name=False
d = self.go('/create?subdomain=pakistan')
assert d.first('label', for_='first_name').text.strip() == 'Name:'
assert not d.all('label', for_='last_name')
assert d.firsttag('input', name='first_name')
assert not d.alltags('input', name='last_name')
assert 'Given name' not in d.text
assert 'Family name' not in d.text
self.s.submit(d.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
d = self.go(
'/view?id=%s&subdomain=pakistan' % person.record_id)
f = d.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Name:'
assert f[0].first('td', class_='field').text.strip() == '_test_first'
assert 'Given name' not in d.text
assert 'Family name' not in d.text
assert '_test_last' not in d.first('body').text
person.delete()
def test_config_family_name_first(self):
# family_name_first=True
doc = self.go('/create?subdomain=china')
given_label = doc.first('label', for_='first_name')
family_label = doc.first('label', for_='last_name')
assert given_label.text.strip() == 'Given name:'
assert family_label.text.strip() == 'Family name:'
assert family_label.start < given_label.start
given_input = doc.firsttag('input', name='first_name')
family_input = doc.firsttag('input', name='last_name')
assert family_input.start < given_input.start
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=china' % person.record_id)
f = doc.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Family name:'
assert f[0].first('td', class_='field').text.strip() == '_test_last'
assert f[1].first('td', class_='label').text.strip() == 'Given name:'
assert f[1].first('td', class_='field').text.strip() == '_test_first'
person.delete()
# family_name_first=False
doc = self.go('/create?subdomain=haiti')
given_label = doc.first('label', for_='first_name')
family_label = doc.first('label', for_='last_name')
assert given_label.text.strip() == 'Given name:'
assert family_label.text.strip() == 'Family name:'
assert family_label.start > given_label.start
given_input = doc.firsttag('input', name='first_name')
family_input = doc.firsttag('input', name='last_name')
assert family_input.start > given_input.start
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=haiti' % person.record_id)
f = doc.first('table', class_='fields').all('tr')
assert f[0].first('td', class_='label').text.strip() == 'Given name:'
assert f[0].first('td', class_='field').text.strip() == '_test_first'
assert f[1].first('td', class_='label').text.strip() == 'Family name:'
assert f[1].first('td', class_='field').text.strip() == '_test_last'
person.delete()
def test_config_use_postal_code(self):
# use_postal_code=True
doc = self.go('/create?subdomain=haiti')
assert doc.first('label', for_='home_postal_code')
assert doc.firsttag('input', name='home_postal_code')
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
home_postal_code='_test_12345',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=haiti' % person.record_id)
assert 'Postal or zip code' in doc.text
assert '_test_12345' in doc.text
person.delete()
# use_postal_code=False
doc = self.go('/create?subdomain=pakistan')
assert not doc.all('label', for_='home_postal_code')
assert not doc.alltags('input', name='home_postal_code')
self.s.submit(doc.first('form'),
first_name='_test_first',
last_name='_test_last',
home_postal_code='_test_12345',
author_name='_test_author')
person = Person.all().get()
doc = self.go('/view?id=%s&subdomain=pakistan' % person.record_id)
assert 'Postal or zip code' not in doc.text
assert '_test_12345' not in doc.text
person.delete()
class PersonNoteCounterTests(TestsBase):
"""Tests that modify Person, Note, and Counter entities in the datastore
go here. The contents of the datastore will be reset for each test."""
kinds_written_by_tests = [Person, Note, Counter]
def test_tasks_count(self):
"""Tests the counting task."""
# Add two Persons and two Notes in the 'haiti' subdomain.
db.put(Person(
key_name='haiti:test.google.com/person.123',
subdomain='haiti',
author_name='_test1_author_name',
entry_date=utils.get_utcnow(),
first_name='_test1_first_name',
last_name='_test1_last_name',
sex='male',
date_of_birth='1970-01-01',
age='50-60',
latest_status='believed_missing'
))
db.put(Note(
key_name='haiti:test.google.com/note.123',
subdomain='haiti',
person_record_id='haiti:test.google.com/person.123',
entry_date=utils.get_utcnow(),
status='believed_missing'
))
db.put(Person(
key_name='haiti:test.google.com/person.456',
subdomain='haiti',
author_name='_test2_author_name',
entry_date=utils.get_utcnow(),
first_name='_test2_first_name',
last_name='_test2_last_name',
sex='female',
date_of_birth='1970-02-02',
age='30-40',
latest_found=True
))
db.put(Note(
key_name='haiti:test.google.com/note.456',
subdomain='haiti',
person_record_id='haiti:test.google.com/person.456',
entry_date=utils.get_utcnow(),
found=True
))
# Run the counting task (should finish counting in a single run).
doc = self.go('/tasks/count/person?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
# Check the resulting counters.
assert Counter.get_count('haiti', 'person.all') == 2
assert Counter.get_count('haiti', 'person.sex=male') == 1
assert Counter.get_count('haiti', 'person.sex=female') == 1
assert Counter.get_count('haiti', 'person.sex=other') == 0
assert Counter.get_count('haiti', 'person.found=TRUE') == 1
assert Counter.get_count('haiti', 'person.found=') == 1
assert Counter.get_count('haiti', 'person.status=believed_missing') == 1
assert Counter.get_count('haiti', 'person.status=') == 1
assert Counter.get_count('pakistan', 'person.all') == 0
# Add a Person in the 'pakistan' subdomain.
db.put(Person(
key_name='pakistan:test.google.com/person.789',
subdomain='pakistan',
author_name='_test3_author_name',
entry_date=utils.get_utcnow(),
first_name='_test3_first_name',
last_name='_test3_last_name',
sex='male',
date_of_birth='1970-03-03',
age='30-40',
))
# Re-run the counting tasks for both subdomains.
doc = self.go('/tasks/count/person?subdomain=haiti')
doc = self.go('/tasks/count/person?subdomain=pakistan')
# Check the resulting counters.
assert Counter.get_count('haiti', 'person.all') == 2
assert Counter.get_count('pakistan', 'person.all') == 1
# Check that the counted value shows up correctly on the main page.
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking' not in doc.text
db.put(Counter(scan_name=u'person', subdomain=u'haiti', last_key=u'',
count_all=5L))
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking' not in doc.text
db.put(Counter(scan_name=u'person', subdomain=u'haiti', last_key=u'',
count_all=86L))
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking' not in doc.text
db.put(Counter(scan_name=u'person', subdomain=u'haiti', last_key=u'',
count_all=278L))
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'Currently tracking about 300 records' in doc.text
def test_admin_dashboard(self):
"""Visits the dashboard page and makes sure it doesn't crash."""
db.put(Counter(scan_name='Person', subdomain='haiti', last_key='',
count_all=278))
db.put(Counter(scan_name='Person', subdomain='pakistan', last_key='',
count_all=127))
db.put(Counter(scan_name='Note', subdomain='haiti', last_key='',
count_all=12))
db.put(Counter(scan_name='Note', subdomain='pakistan', last_key='',
count_all=8))
assert self.get_url_as_admin('/admin/dashboard')
assert self.s.status == 200
class ConfigTests(TestsBase):
"""Tests that modify ConfigEntry entities in the datastore go here.
The contents of the datastore will be reset for each test."""
def tearDown(self):
reset_data() # This is very expensive due to all the put()s in setup.
def test_admin_page(self):
# Load the administration page.
doc = self.go('/admin?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
assert self.s.status == 200
# Activate a new subdomain.
assert not Subdomain.get_by_key_name('xyz')
create_form = doc.first('form', id='subdomain_create')
doc = self.s.submit(create_form, subdomain_new='xyz')
assert Subdomain.get_by_key_name('xyz')
# Change some settings for the new subdomain.
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["no"]',
subdomain_titles='{"no": "Jordskjelv"}',
keywords='foo, bar',
use_family_name='false',
family_name_first='false',
use_postal_code='false',
min_query_word_length='1',
map_default_zoom='6',
map_default_center='[4, 5]',
map_size_pixels='[300, 300]',
read_auth_key_required='false'
)
cfg = config.Configuration('xyz')
assert cfg.language_menu_options == ['no']
assert cfg.subdomain_titles == {'no': 'Jordskjelv'}
assert cfg.keywords == 'foo, bar'
assert not cfg.use_family_name
assert not cfg.family_name_first
assert not cfg.use_postal_code
assert cfg.min_query_word_length == 1
assert cfg.map_default_zoom == 6
assert cfg.map_default_center == [4, 5]
assert cfg.map_size_pixels == [300, 300]
assert not cfg.read_auth_key_required
# Change settings again and make sure they took effect.
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["nl"]',
subdomain_titles='{"nl": "Aardbeving"}',
keywords='spam, ham',
use_family_name='true',
family_name_first='true',
use_postal_code='true',
min_query_word_length='2',
map_default_zoom='7',
map_default_center='[-3, -7]',
map_size_pixels='[123, 456]',
read_auth_key_required='true'
)
cfg = config.Configuration('xyz')
assert cfg.language_menu_options == ['nl']
assert cfg.subdomain_titles == {'nl': 'Aardbeving'}
assert cfg.keywords == 'spam, ham'
assert cfg.use_family_name
assert cfg.family_name_first
assert cfg.use_postal_code
assert cfg.min_query_word_length == 2
assert cfg.map_default_zoom == 7
assert cfg.map_default_center == [-3, -7]
assert cfg.map_size_pixels == [123, 456]
assert cfg.read_auth_key_required
def test_deactivation(self):
# Load the administration page.
doc = self.go('/admin?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
assert self.s.status == 200
# Deactivate an existing subdomain.
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["en"]',
subdomain_titles='{"en": "Foo"}',
keywords='foo, bar',
deactivated='true',
deactivation_message_html='de<i>acti</i>vated',
)
cfg = config.Configuration('haiti')
assert cfg.deactivated
assert cfg.deactivation_message_html == 'de<i>acti</i>vated'
# Ensure all paths listed in app.yaml are inaccessible, except /admin.
for path in ['/', '/query', '/results', '/create', '/view',
'/multiview', '/reveal', '/photo', '/embed',
'/gadget', '/delete', '/sitemap', '/api/read',
'/api/write', '/feeds/note', '/feeds/person']:
doc = self.go(path + '?subdomain=haiti')
assert 'de<i>acti</i>vated' in doc.content
assert doc.alltags('form') == []
assert doc.alltags('input') == []
assert doc.alltags('table') == []
assert doc.alltags('td') == []
def test_custom_messages(self):
# Load the administration page.
doc = self.go('/admin?subdomain=haiti')
button = doc.firsttag('input', value='Login')
doc = self.s.submit(button, admin='True')
assert self.s.status == 200
# Edit the custom text fields
settings_form = doc.first('form', id='subdomain_save')
doc = self.s.submit(settings_form,
language_menu_options='["en"]',
subdomain_titles='{"en": "Foo"}',
keywords='foo, bar',
main_page_custom_html='<b>main page</b> message',
results_page_custom_html='<u>results page</u> message',
view_page_custom_html='<a href="http://test">view page</a> message'
)
cfg = config.Configuration('haiti')
assert cfg.main_page_custom_html == '<b>main page</b> message'
assert cfg.results_page_custom_html == '<u>results page</u> message'
assert cfg.view_page_custom_html == \
'<a href="http://test">view page</a> message'
# Add a person record
db.put(Person(
key_name='haiti:test.google.com/person.1001',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_status_first_name',
last_name='_status_last_name',
author_name='_status_author_name'
))
# Check for custom message on main page
doc = self.go('/?subdomain=haiti&flush_cache=yes')
assert 'main page message' in doc.text
# Check for custom message on results page
doc = self.go('/results?subdomain=haiti&query=xy')
assert 'results page message' in doc.text
# Check for custom message on view page
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.1001')
assert 'view page message' in doc.text
class SecretTests(TestsBase):
"""Tests that modify Secret entities in the datastore go here.
The contents of the datastore will be reset for each test."""
kinds_written_by_tests = [Secret]
def test_analytics_id(self):
"""Checks that the analytics_id Secret is used for analytics."""
doc = self.go('/create?subdomain=haiti')
assert 'getTracker(' not in doc.content
db.put(Secret(key_name='analytics_id', secret='analytics_id_xyz'))
doc = self.go('/create?subdomain=haiti')
assert "getTracker('analytics_id_xyz')" in doc.content
def test_maps_api_key(self):
"""Checks that maps don't appear when there is no maps_api_key."""
db.put(Person(
key_name='haiti:test.google.com/person.1001',
subdomain='haiti',
entry_date=utils.get_utcnow(),
first_name='_status_first_name',
last_name='_status_last_name',
author_name='_status_author_name'
))
doc = self.go('/create?subdomain=haiti&role=provide')
assert 'map_canvas' not in doc.content
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.1001')
assert 'map_canvas' not in doc.content
assert 'id="map_' not in doc.content
db.put(Secret(key_name='maps_api_key', secret='maps_api_key_xyz'))
doc = self.go('/create?subdomain=haiti&role=provide')
assert 'maps_api_key_xyz' in doc.content
assert 'map_canvas' in doc.content
doc = self.go('/view?subdomain=haiti&id=test.google.com/person.1001')
assert 'maps_api_key_xyz' in doc.content
assert 'map_canvas' in doc.content
assert 'id="map_' in doc.content
def main():
parser = optparse.OptionParser()
parser.add_option('-a', '--address', default='localhost',
help='appserver hostname (default: localhost)')
parser.add_option('-p', '--port', type='int', default=8081,
help='appserver port number (default: 8081)')
parser.add_option('-m', '--mail_port', type='int', default=8025,
help='SMTP server port number (default: 8025)')
parser.add_option('-v', '--verbose', action='store_true')
options, args = parser.parse_args()
try:
threads = []
if options.address == 'localhost':
# We need to start up a clean new appserver for testing.
threads.append(AppServerRunner(options.port, options.mail_port))
threads.append(MailThread(options.mail_port))
for thread in threads:
thread.start()
for thread in threads:
thread.wait_until_ready()
# Connect to the datastore.
hostport = '%s:%d' % (options.address, options.port)
remote_api.connect(hostport, remote_api.get_app_id(), 'test', 'test')
TestsBase.hostport = hostport
TestsBase.verbose = options.verbose
reset_data() # Reset the datastore for the first test.
unittest.main() # You can select tests using command-line arguments.
except Exception, e:
# Something went wrong during testing.
for thread in threads:
if hasattr(thread, 'flush_output'):
thread.flush_output()
traceback.print_exc()
raise SystemExit
finally:
for thread in threads:
thread.stop()
thread.join()
if __name__ == '__main__':
main()
|
pet-finder/pet-finder
|
tests/server_tests.py
|
Python
|
apache-2.0
| 170,203
|
[
"VisIt"
] |
ccee82d1aeea1ab9fc9c7e7f5087ed51e7fc9a0fe1d67e8979d81ebcc0e6216b
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import (fft, ifft, ifftshift, fft2, ifft2, fftn,
ifftn, fftfreq)
from numpy.fft import rfftn, irfftn
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> lena = misc.lena()
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(lena, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) *
rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(lena, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena() - misc.lena().mean()
>>> template = np.copy(lena[235:295, 310:370]) # right eye
>>> template -= template.mean()
>>> lena = lena + np.random.randn(*lena.shape) * 50 # add noise
>>> corr = signal.correlate2d(lena, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype, copy=False)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
ind[axis] = slice(zi.shape[axis])
out[ind] += zi
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the instantaneous
phase in respect to time. The instantaneous phase corresponds to the phase
angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing,
Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input samples is large and prime, see
`scipy.fftpack.fft`.
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
|
rmcgibbo/scipy
|
scipy/signal/signaltools.py
|
Python
|
bsd-3-clause
| 87,008
|
[
"Gaussian"
] |
4980d5552d692ae7d22da081a0449f0301e24a67457e677de8cdf8f8768b6b09
|
"""
ocl_gts v0.01
pyopencl genetic trade simulator
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import pyopencl as cl
import numpy
# connect to the xml server
#
import xmlrpclib
import json
import gene_server_config
import time
import sys
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
print "Connected to",__server__,":",__port__
from bct import *
from genetic import *
from load_config import *
import pdb
import time
import hashlib
if __name__ == "__main__":
__appversion__ = "0.01a"
print "OpenCL Genetic Bitcoin Trade Simulator v%s"%__appversion__
deep_logging_enable = False;
max_length = 120000
load_throttle = 0 #go easy on cpu usage
calibrate = 1 #set to one to adjust the population size to maintain a one min test cycle
work_group_size = 6
work_item_size = 128
max_open_orders = 512 #MUST MATCH THE OPENCL KERNEL !!!!
order_array_size = 16 #MUST MATCH THE OPENCL KERNEL !!!!
#init pyopencl
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
#read in the OpenCL source file as a string
#f = open("gkernel.cl", 'r')
f = open("gkernel_macd.cl", 'r')
fstr = "".join(f.readlines())
#create the program
ocl_program = cl.Program(ctx, fstr).build('-w') #'-g -O0 -cl-opt-disable -w'
#kernel = ocl_program.fitness
kernel = ocl_program.macd
ocl_mb_wg_macd_pct = None
input_len = 0
def load():
global ocl_mb_wg_macd_pct
global input_len
#open the history file
#print "loading the data set"
f = open("./datafeed/bcfeed_mtgoxUSD_1min.csv",'r')
#f = open("./datafeed/test_data.csv",'r')
d = f.readlines()
f.close()
if len(d) > max_length:
#truncate the dataset
d = d[max_length * -1:]
#load the backtest dataset
input = []
for row in d[1:]:
r = row.split(',')[1] #last price
t = row.split(',')[0] #time
input.append([int(float(t)),float(r)])
#print "done loading:", str(len(input)),"records."
#allocate uninitalized buffer(s)
input_len = numpy.uint32(len(input))
buf_size = len(input) * work_group_size * work_item_size * 4 #float32 is four bytes
print "#DEBUG# Buffer size: ",buf_size
if ocl_mb_wg_macd_pct != None:
ocl_mb_wg_macd_pct.release()
ocl_mb_wg_macd_pct = cl.Buffer(ctx, mf.WRITE_ONLY, size=buf_size)
print ocl_mb_wg_macd_pct.get_info(cl.mem_info.SIZE)
queue.flush()
return input
#configure the gene pool
g = genepool()
g = load_config_into_object(load_config_from_file("gene_def.json"),g)
#g.set_log("winners.txt")
print "Creating the trade engine"
te = trade_engine()
te.score_only = True
print "preprocessing the input data..."
#load the inital data
input = load()
te.classify_market(input)
wg_market_classification = [int(i[1] * 4) for i in te.market_class] #use the python based bct trade engine market classification
wg_input = [i[1] for i in input]
#process command line args
quartile = ''
bs = ''
verbose = False
print sys.argv
if len(sys.argv) >= 3:
# Convert the two arguments from strings into numbers
quartile = sys.argv[1]
bs = sys.argv[2]
if len(sys.argv) == 4:
if sys.argv[3] == 'v':
verbose = True
#which quartile group to test
while not (quartile in ['1','2','3','4']):
print "Which quartile group to test? (1,2,3,4):"
quartile = raw_input()
quartile = int(quartile)
#bootstrap the population with the winners available from the gene_pool server
while not(bs == 'y' or bs == 'n'):
print "Bootstrap from the gene_server? (y/n)"
bs = raw_input()
if bs == 'y':
bob_simulator = True
g.local_optima_trigger = 10
calibrate = 1
bootstrap_bobs = json.loads(server.get_bobs(quartile))
bootstrap_all = json.loads(server.get_all(quartile))
if (type(bootstrap_bobs) == type([])) and (type(bootstrap_all) == type([])):
g.seed()
#g.pool = []
g.insert_genedict_list(bootstrap_bobs)
g.insert_genedict_list(bootstrap_all)
g.reset_scores()
else: #if no BOBS or high scores..seed with a new population
print "no BOBs or high scores available...seeding new pool."
g.seed()
print "%s BOBs loaded"%len(bootstrap_bobs)
print "%s high scores loaded"%len(bootstrap_all)
print "Pool size: %s"%len(g.pool)
else:
bob_simulator = False
g.local_optima_trigger = 5
print "Seeding the initial population"
g.seed()
cycle_time = 60 * 1#time in seconds to test the entire population
min_cycle_time = 50
cycle_time_step = 1
test_count = 0
total_count = 0
max_score = -10000
max_score_id = -1
start_time = time.time()
print "Running the simulator"
while 1:
#periodicaly reload the data set
test_count += work_group_size * work_item_size
total_count += work_group_size * work_item_size
if load_throttle == 1:
time.sleep(0.35)
if test_count > g.pool_size:
test_count = 0
#benchmark the cycle speed
current_time = time.time()
elapsed_time = current_time - start_time
gps = total_count / (elapsed_time + 0.0001)
if calibrate == 1:
#print "Recalibrating pool size..."
suggested_size = int(gps * cycle_time)
cycle_time -= cycle_time_step
if cycle_time < min_cycle_time:
cycle_time = min_cycle_time
if (suggested_size - g.pool_size) > 1000:
g.pool_size += 100
else:
g.pool_size = suggested_size
print "%.2f"%gps,"G/S; ","%.2f"%((gps*len(input))/1000.0),"KS/S;"," Pool Size: ",g.pool_size," Total Processed: ",total_count, " Quartile: ",quartile
#load the latest trade data
print "Loading the lastest trade data..."
te = trade_engine()
te.score_only = True
input = load()
#preprocess input data
te.classify_market(input)
wg_market_classification = [int(i[1] * 4) for i in te.market_class] #use the python based bct trade engine market classification
wg_input = [i[1] for i in input]
if g.local_optima_reached:
print '#'*10, " Local optima reached...sending bob to the gene_server ", '#'*10
max_score = 0
test_count = 0
max_gene = g.get_by_id(max_score_id)
if max_gene != None:
print "--\tSubmit BOB for id:%s to server (%.2f)"%(str(max_gene['id']),max_gene['score'])
server.put_bob(json.dumps(max_gene),quartile)
else:
print "--\tNo BOB to submit"
if bob_simulator == True:
bootstrap_bobs = json.loads(server.get_bobs(quartile))
bootstrap_all = json.loads(server.get_all(quartile))
if (type(bootstrap_bobs) == type([])) and (type(bootstrap_all) == type([])):
g.seed()
g.pool = []
g.insert_genedict_list(bootstrap_bobs)
g.insert_genedict_list(bootstrap_all)
g.reset_scores()
print "BOBs loaded...",len(g.pool)
else: #if no BOBS or high scores..seed with a new population
print "no BOBs or high scores available...seeding new pool."
g.seed()
else:
g.seed()
#automaticaly cycle through the four quartiles
quartile += 1
if quartile > 4:
quartile = 1
if test_count > (g.pool_size * 10):
test_count = 0
print "Reset scores to force retest of winners..."
test_count = 0
max_score = 0 #knock the high score down to prevent blocking
#latest scoring data which may fall due to
#the latest price data
g.next_gen()
g.reset_scores()
#build the opencl workgroup
wg_id = []
wg_gene = []
wg_shares = []
wg_wll = []
wg_wls = []
wg_wls = []
wg_buy_wait = []
wg_markup = []
wg_stop_loss = []
wg_stop_age = []
wg_macd_buy_trip = []
wg_buy_wait_after_stop_loss = []
wg_quartile = []
#the following lists are only populated (elsewhere) when new data is loaded:
#wg_market_classification = [int(i[1] * 4) for i in te.market_class] #use the python based bct trade engine market classification
#wg_input = [i[1] for i in input]
print "Batch processing",work_group_size * work_item_size,"genes from a pool of",len(g.pool), " and an input len of ",len(wg_input)
for i in range(work_group_size * work_item_size):
ag = g.get_next()
wg_id.append(ag['id'])
wg_gene.append(ag['gene'])
#wg_shares.append(ag['shares'])
wg_wll.append(ag['wll'] + ag['wls'] + 2) #add the two together to make sure
#the macd moving windows dont get inverted
wg_wls.append(ag['wls'] + 1)
#wg_buy_wait.append(ag['buy_wait'])
#wg_markup.append(ag['markup'] + (te.commision * 3.0)) #+ 0.025
#wg_stop_loss.append(ag['stop_loss'])
#wg_stop_age.append(float(ag['stop_age']))
#wg_macd_buy_trip.append(ag['macd_buy_trip'] * -1.0)
#wg_buy_wait_after_stop_loss.append(ag['buy_wait_after_stop_loss'])
#wg_quartile.append(quartile)
print "Global Work Items: ",work_group_size * work_item_size
#build the memory buffers
#mb_wg_shares = numpy.array(wg_shares, dtype=numpy.float32)
mb_wg_wll = numpy.array(wg_wll, dtype=numpy.uint32)
mb_wg_wls = numpy.array(wg_wls, dtype=numpy.uint32)
#mb_wg_buy_wait = numpy.array(wg_shares, dtype=numpy.uint32)
#mb_wg_markup = numpy.array(wg_markup, dtype=numpy.float32)
#mb_wg_stop_loss = numpy.array(wg_stop_loss, dtype=numpy.float32)
#mb_wg_stop_age = numpy.array(wg_stop_age, dtype=numpy.float32)
#mb_wg_macd_buy_trip = numpy.array(wg_macd_buy_trip, dtype=numpy.float32)
#mb_wg_buy_wait_after_stop_loss = numpy.array(wg_buy_wait_after_stop_loss, dtype=numpy.uint32)
#mb_wg_quartile = numpy.array(wg_quartile, dtype=numpy.uint32)
#mb_wg_market_classification = numpy.array(wg_market_classification, dtype=numpy.uint32)
mb_wg_input = numpy.array(wg_input, dtype=numpy.float32)
#mb_wg_score = numpy.array(range(work_group_size), dtype=numpy.float32)
#mb_wg_orders = numpy.array(range(work_group_size * max_open_orders * order_array_size), dtype=numpy.float32)
#create OpenCL buffers
#mapped - makes sure the data is completly loaded before processing begins
#ocl_mb_wg_market_classification = cl.Buffer(ctx, mf.READ_ONLY | mf.ALLOC_HOST_PTR | mf.COPY_HOST_PTR, hostbuf=mb_wg_market_classification)
ocl_mb_wg_input = cl.Buffer(ctx, mf.READ_ONLY | mf.ALLOC_HOST_PTR | mf.COPY_HOST_PTR, hostbuf=mb_wg_input)
#ocl_mb_wg_orders = cl.Buffer(ctx, mf.READ_WRITE | mf.ALLOC_HOST_PTR | mf.COPY_HOST_PTR, hostbuf=mb_wg_orders)#mb_wg_orders.nbytes
#unmapped - can be transferred on demand
#ocl_mb_wg_quartile = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_quartile)
#ocl_mb_wg_score = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=mb_wg_score)
#ocl_mb_wg_shares = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_shares)
ocl_mb_wg_wll = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_wll)
ocl_mb_wg_wls = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_wls)
#ocl_mb_wg_buy_wait = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_buy_wait)
#ocl_mb_wg_markup = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_markup)
#ocl_mb_wg_stop_loss = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_stop_loss)
#ocl_mb_wg_stop_age = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_stop_age)
#ocl_mb_wg_macd_buy_trip = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_macd_buy_trip)
#ocl_mb_wg_buy_wait_after_stop_loss = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mb_wg_buy_wait_after_stop_loss)
#allocate uninitalized buffer(s)
#input_len = numpy.uint32(len(input))
#buf_size = len(input) * work_group_size * work_item_size * 4 #float32 is four bytes
#print "#DEBUG# Buffer size: ",buf_size
#ocl_mb_wg_macd_pct = cl.Buffer(ctx, mf.WRITE_ONLY, size=buf_size)
#print ocl_mb_wg_macd_pct.get_info(cl.mem_info.SIZE)
#queue.flush()
#debug - used to make sure the datasets are constant (when input reloading is disabled)
#m = hashlib.md5()
#m.update(str(mb_wg_input))
#m.update(str(mb_wg_market_classification))
#print m.hexdigest()
gkernel_args = """
kernel.set_arg(0,ocl_mb_wg_shares)
kernel.set_arg(1,ocl_mb_wg_wll)
kernel.set_arg(2,ocl_mb_wg_wls)
kernel.set_arg(3,ocl_mb_wg_buy_wait)
kernel.set_arg(4,ocl_mb_wg_markup)
kernel.set_arg(5,ocl_mb_wg_stop_loss)
kernel.set_arg(6,ocl_mb_wg_stop_age)
kernel.set_arg(7,ocl_mb_wg_macd_buy_trip)
kernel.set_arg(8,ocl_mb_wg_buy_wait_after_stop_loss)
kernel.set_arg(9,ocl_mb_wg_quartile)
kernel.set_arg(10,ocl_mb_wg_market_classification)
kernel.set_arg(11,ocl_mb_wg_input)
kernel.set_arg(12,ocl_mb_wg_score)
kernel.set_arg(13,ocl_mb_wg_orders)
kernel.set_arg(14,input_len)
"""
kernel.set_arg(0,ocl_mb_wg_macd_pct)
kernel.set_arg(1,ocl_mb_wg_wll)
kernel.set_arg(2,ocl_mb_wg_wls)
kernel.set_arg(3,ocl_mb_wg_input)
kernel.set_arg(4,input_len)
#execute the workgroup
print "executing the workgroup"
event = cl.enqueue_nd_range_kernel(queue,kernel,mb_wg_wll.shape,(work_item_size,))
event.wait()
print "execution complete"
#copy the result buffer (scores) back to the host
#scores = numpy.empty_like(mb_wg_score)
#cl.enqueue_read_buffer(queue, ocl_mb_wg_score, scores).wait()
#time.sleep(0.01)
#dumps the orders array - used for debug
if deep_logging_enable == True:
#write out the orders array
orders = numpy.empty_like(mb_wg_orders)
cl.enqueue_read_buffer(queue, ocl_mb_wg_orders, orders).wait()
f = open('/tmp/orders/' + str(total_count),'w' )
for i in range(0,len(orders),order_array_size):
if int(abs(orders[i])) != i/(max_open_orders * order_array_size): #dont save untouched memory
f.write(wg_id[i/(max_open_orders * order_array_size)] + ':\t\t' + str(i/(max_open_orders * order_array_size))+': '+ "\t".join(map(str,(orders[i],orders[i+1],orders[i+2],orders[i+3],orders[i+4],orders[i+5],orders[i+6],orders[i+7],orders[i+8],orders[i+9],orders[i+10],orders[i+11],orders[i+12],orders[i+13],orders[i+14],orders[i+15]))))
f.write('\n')
f.close()
#release all the buffers
#ocl_mb_wg_shares.release()
ocl_mb_wg_wll.release()
ocl_mb_wg_wls.release()
#ocl_mb_wg_buy_wait.release()
#ocl_mb_wg_markup.release()
#ocl_mb_wg_stop_loss.release()
#ocl_mb_wg_stop_age.release()
#ocl_mb_wg_macd_buy_trip.release()
#ocl_mb_wg_buy_wait_after_stop_loss.release()
#ocl_mb_wg_quartile.release()
#ocl_mb_wg_market_classification.release()
ocl_mb_wg_input.release()
#ocl_mb_wg_score.release()
#process the results
for i in range(work_group_size):
#score = float(scores[i])
score = -10000
#dump the scores buffer to a file - used for debugging
if deep_logging_enable == True:
#write out the scores
if score > 0.1 or 1:
f = open('/tmp/scores/' + str(wg_id[i]),'a' )
f.write(",".join(map(str,(time.ctime(),total_count,score, wg_gene[i], \
wg_shares[i], \
wg_wll[i], \
wg_wls[i], \
wg_buy_wait[i], \
wg_markup[i], \
wg_stop_loss[i], \
wg_stop_age[i], \
wg_macd_buy_trip[i], \
wg_buy_wait_after_stop_loss[i]
))))
f.write('\n')
f.close()
if verbose:
indicator = ""
if max_score <= score:
indicator = "<------------------"
print wg_id[i],wg_gene[i],"\t".join(["%.5f"%max_score,"%.5f"%score]),indicator
#submit the score to the gene pool
g.set_score(wg_id[i],score)
#if a new high score is found (or revisited) submit the gene to
#the server
if score > max_score and score > -1000.00:
print "--\tSubmit high score for id:%s to server (%.2f)"%(str(wg_id[i]),score)
max_score = score
max_score_id = wg_id[i]
max_gene = g.get_by_id(max_score_id)
if max_gene != None:
server.put(json.dumps(max_gene),quartile)
else:
print "MAX_GENE is None!!"
#print "MAX_SCORE:",max_score,"MAX_SCORE_ID:",max_score_id,"OBJECT_TYPE:",type(g.get_by_id(max_score_id))
|
bshyong/ga-bitbot
|
ocl_gts.py
|
Python
|
gpl-3.0
| 19,100
|
[
"Brian"
] |
d54942b5e9970f33d2f9f5ccc173df6d2347c9105667c0687176d566e8fbb8a2
|
# -*- coding: utf-8 -*
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Running SageNB Tests
Functions for running SageNB tests. This can also be used a script.
NOTE:
The SageNB tests tests assume a Selenium server or Grid hub is running
with the options given in :mod:`sagenb.testing.notebook_test_case` or
set by :func:`setup_tests`.
Selenium server can be downloaded from the Selenium `download page
<http://seleniumhq.org/download/>`_ as part of the Selenium RC package
and can be run with `java -jar selenium-server.jar`. To set up
Selenium Grid, please visit its `home page
<http://selenium-grid.seleniumhq.org/>`_ for instructions.
TODO:
- Add extra functionality to this script
- Include a proxy to this script in the Sage scripts repo.
"""
# Developer note:
# The Selenium server cannot be included in the package because
# of the possibility of incompatible libraries and binaries with
# those of the user's browser (e.g., Python, etc.)
import unittest
import notebook_test_case
from sagenb.misc.misc import browser
from tests import test_accounts, test_worksheet, test_worksheet_list
CASES = {
'TestAccounts': test_accounts,
'TestWorksheet': test_worksheet,
'TestWorksheetList': test_worksheet_list
}
all_tests = unittest.TestSuite((test_accounts.suite,
test_worksheet.suite,
test_worksheet_list.suite))
def setup_tests(address='localhost', secure=False,
environment='*firefox3 /usr/bin/firefox'):
"""
Sets selected options for SageNB Selenium tests.
INPUT:
- ``address`` - a string (default: 'localhost'); address of the
network interface at which the notebook server listens. Do not
leave this empty; see :mod:`sagenb.testing.notebook_test_case`
for details.
- ``secure`` - a boolean (default: False); whether to launch a
secure notebook server. Note: Browser security warnings will
yield failed tests. To work around these in Firefox, close all
windows, create a new profile (e.g., `firefox -P selenium`),
browse to a secure notebook server, accept the certificate, and
quit. Then launch the Selenium server with, e.g.,
java -jar selenium-server -firefoxProfileTemplate $HOME/selenium/firefox
and run the tests. A minimal profile template directory can
contain just the files `cert8.db` and `cert_override.txt`.
- ``environment`` - a string (default: '*firefox3
/usr/bin/firefox'); the browser environment in which to run the
tests. The path is optional. However, for the Selenium server
to have complete control over the launched browser, it's best to
give the full path to the browser *executable* (i.e., not a
shell script).
Possible environments include '*chrome', '*firefox',
'*firefox3', '*googlechrome', '*iexplore', '*opera', '*safari'.
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: env = '*firefox3 /usr/lib64/firefox-3.5.6/firefox' # not tested
sage: rt.setup_tests('localhost', True, env) # not tested
sage: rt.run_any() # not tested
sage: rt.setup_tests('localhost', True, '*opera') # not tested
sage: rt.run_and_report() # not tested
"""
# TODO: Add a directory option for parallel testing.
notebook_test_case.NB_OPTIONS['address'] = address
notebook_test_case.NB_OPTIONS['secure'] = secure
notebook_test_case.SEL_OPTIONS['environment'] = environment
def run_any(tests=all_tests, make_report=False, **kwargs):
"""
Creates and runs an ad hoc test suite from a test name, case,
suite, or a mixed list thereof. If no matching tests are found,
no tests are run.
INPUT:
- ``tests`` - a string, :class:`unittest.TestCase`,
:class:`unittest.TestSuite`, or a mixed list thereof. Strings
can be test names, with or without the prefix 'test_'.
- ``make_report`` - a boolean (default: False); whether to
generate a HTML report of the test results.
- ``kwargs`` - a dictionary; additional keyword options to pass to
:func:`run_suite` or :func:`run_and_report`.
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: rt.run_any('simple_evaluation', make_report=True) # not tested
sage: rt.run_any(['4088', 'test_3711'], verbosity=1) # not tested
sage: rt.run_any('foo', False) # not tested
sage: rt.run_any(rt.test_accounts.TestAccounts) # not tested
sage: rt.run_any(make_report=True) # not tested
"""
import inspect
from_name = unittest.TestLoader().loadTestsFromName
from_case = unittest.TestLoader().loadTestsFromTestCase
if not isinstance(tests, list):
tests = [tests]
alist = []
for t in tests:
if isinstance(t, str):
if not t.startswith('test_'):
t = 'test_' + t
for c in CASES:
try:
alist.append(from_name(c + '.' + t, module = CASES[c]))
except AttributeError:
pass
elif inspect.isclass(t) and issubclass(t, unittest.TestCase):
alist.append(from_case(t))
elif isinstance(t, unittest.TestSuite):
alist.append(t)
if alist:
suite = unittest.TestSuite(alist)
tot = suite.countTestCases()
environment = notebook_test_case.SEL_OPTIONS['environment']
print 'Running %d test%s in environment %s...' % (tot, '' if tot == 1 else 's', environment)
if make_report:
run_and_report(suite, environment = environment, **kwargs)
else:
run_suite(suite, **kwargs)
def run_suite(suite=all_tests, verbosity=2):
"""
Runs a test suite.
For the SageNB test suite, this assumes a Selenium server or Grid
hub is running with the options given in
:mod:`sagenb.testing.notebook_test_case` or set by
:func:`setup_tests`
INPUT:
- ``suite`` - a TestSuite instance (default: all_tests); the test
suite to run
- ``verbosity`` - an integer (default: 2); how verbosely to report
instantaneous test results
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: rt.run_suite() # not tested
sage: rt.run_suite(rt.test_worksheet.suite, verbosity=1) # not tested
"""
unittest.TextTestRunner(verbosity=verbosity).run(suite)
def run_and_report(suite=all_tests, verbosity=2, report_filename='report.html',
title='Sage Notebook Tests',
description='Selenium test results',
open_viewer=True, **kwargs):
"""
Runs a test suite and generates a HTML report with the outcome
(pass, fail, or error) and output, including any tracebacks, for
each test, plus overall statistics.
For the SageNB test suite, this assumes a Selenium server or Grid
hub is running with the options given in
:mod:`sagenb.testing.notebook_test_case` or set by
:func:`setup_tests`.
INPUT:
- ``suite`` - a TestSuite instance (default: all_tests); the test
suite to run
- ``verbosity`` - an integer (default: 2); how verbosely to report
instantaneous test results
- ``report_filename`` - a string (default: 'report.html'); the
report's filename
- ``title`` - a string (default: 'Sage Notebook Tests'); the
report's title
- ``description`` - a string (default: 'Selenium test results'); a
description included near the beginning of the report
- ``open_viewer`` - a boolean (default: True); whether to open
the report in a web browser
- ``kwargs`` - a dictionary; extra keyword arguments passed to the
test runner's constructor
EXAMPLES::
sage: import sagenb.testing.run_tests as rt # not tested
sage: rt.run_and_report() # not tested
sage: rt.run_and_report(report_filename='test1.html') # not tested
sage: rt.run_and_report(rt.test_accounts.suite) # not tested
"""
from HTMLTestRunner import HTMLTestRunner
report_fd = open(report_filename, 'w')
runner = HTMLTestRunner(verbosity = verbosity, stream = report_fd,
title = title, description = description,
**kwargs)
runner.run(suite)
if open_viewer:
import os, subprocess
subprocess.Popen(browser() + ' ' + os.path.abspath(report_filename),
shell=True)
if __name__ == '__main__':
run_suite()
|
topisani/sagenb
|
sagenb/testing/run_tests.py
|
Python
|
gpl-3.0
| 8,898
|
[
"VisIt"
] |
fe638daf92ab3a43b3132a0c051f30cadb9649bd6a89a22e32b22c8a45d48594
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': 'type-PF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': 'type-VF',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.compute_node = None
self.tracker._get_service = mock.Mock(return_value=None)
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance_obj(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance_obj(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance_obj()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_move_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance_obj(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance_obj(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance_obj(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.unit.compute.monitors.test_monitors.CPUMonitor1',
'nova.tests.unit.compute.monitors.test_monitors.CPUMonitor2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=[])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class1 = test_monitors.CPUMonitor1(self.tracker)
self.tracker.monitors = [class1]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': timeutils.strtime(
test_monitors.CPUMonitor1.NOW_TS),
'name': 'cpu.frequency',
'value': 100,
'source': 'CPUMonitor1'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
CloudServer/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 58,490
|
[
"exciting"
] |
64c026185d4c2ae46ef31eb54ce23b0a54745ecb1880b8d4a45413d1c3563ede
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import sys, argparse, os, numpy as np
from horton import IOData, UniformGrid, log, angstrom, \
compute_esp_grid_cube, __version__
from horton.scripts.common import parse_h5, parse_ewald_args, store_args, \
check_output, write_script_output
from horton.scripts.espfit import load_charges
# All, except underflows, is *not* fine.
np.seterr(divide='raise', over='raise', invalid='raise')
def parse_args():
parser = argparse.ArgumentParser(prog='horton-esp-gen.py',
description='Generate electrostatic potential grid data from charges '
'for a 3D periodic system.')
parser.add_argument('-V', '--version', action='version',
version="%%(prog)s (HORTON version %s)" % __version__)
parser.add_argument('charges', type=str,
help='The atomic charges to be used in the form '
'"file.h5:group/charges". ')
parser.add_argument('grid', type=str,
help='Any type of file that contains a uniform grid specification and '
'atomic coordinates, e.g. a Gaussian cube file.')
parser.add_argument('output', type=str,
help='The output destination in the form file.h5:group. The colon and '
'the group name are optional. When omitted, the root group of the '
'HDF5 file is used.')
parser.add_argument('--overwrite', default=False, action='store_true',
help='Overwrite existing output in the HDF5 file')
parser.add_argument('--qtot', '-q', default=None, type=float,
help='The total charge of the system. When given, the charges from the '
'HDF5 file are corrected after they are read in.')
parser.add_argument('--rcut', default=10.0, type=float,
help='The real-space cutoff for the electrostatic interactions in '
'angstrom. [default=%(default)s]')
parser.add_argument('--alpha-scale', default=3.0, type=float,
help='The alpha scale (alpha = alpha_scale/rcut) for the separation '
'between short-range and long-range electrostatic interactions. '
'[default=%(default)s]')
parser.add_argument('--gcut-scale', default=1.1, type=float,
help='The gcut scale (gcut = gcut_scale*alpha) for the reciprocal '
'space constribution to the electrostatic interactions. '
'[default=%(default)s]')
return parser.parse_args()
def load_ugrid_coordinates(arg_grid):
mol = IOData.from_file(arg_grid)
return mol.grid, mol.coordinates
def main():
args = parse_args()
fn_h5, grp_name = parse_h5(args.output, 'output')
# check if the group is already present (and not empty) in the output file
if check_output(fn_h5, grp_name, args.overwrite):
return
# Load the charges from the HDF5 file
charges = load_charges(args.charges)
# Load the uniform grid and the coordintes
ugrid, coordinates = load_ugrid_coordinates(args.grid)
ugrid.pbc[:] = 1 # enforce 3D periodic
# Fix total charge if requested
if args.qtot is not None:
charges -= (charges.sum() - args.qtot)/len(charges)
# Store parameters in output
results = {}
results['qtot'] = charges.sum()
# Determine the grid specification
results['ugrid'] = ugrid
# Ewald parameters
rcut, alpha, gcut = parse_ewald_args(args)
# Some screen info
if log.do_medium:
log('Important parameters:')
log.hline()
log('Number of grid points: %12i' % ugrid.size)
log('Grid shape: [%8i, %8i, %8i]' % tuple(ugrid.shape))
log('Ewald real cutoff: %12.5e' % rcut)
log('Ewald alpha: %12.5e' % alpha)
log('Ewald reciprocal cutoff: %12.5e' % gcut)
log.hline()
# TODO: add summation ranges
log('Computing ESP (may take a while)')
# Allocate and compute ESP grid
esp = np.zeros(ugrid.shape, float)
compute_esp_grid_cube(ugrid, esp, coordinates, charges, rcut, alpha, gcut)
results['esp'] = esp
# Store the results in an HDF5 file
write_script_output(fn_h5, grp_name, results, args)
if __name__ == '__main__':
main()
|
crisely09/horton
|
scripts/horton-esp-gen.py
|
Python
|
gpl-3.0
| 4,975
|
[
"Gaussian"
] |
afc2c97e92c4b2125497ed077773c82dd5ea8c12e964e78b32ea0430d4a42f92
|
# -*- coding: utf-8 -*-
u"""
.. module:: fabfile
Be aware, that becaus fabric doesn't support py3k You need to execute this
particular script using Python 2.
"""
import contextlib
from fabric.api import cd
from fabric.api import env
from fabric.api import prefix
from fabric.api import run
env.user = 'root'
env.hosts = ['wysadzulice.pl']
env.forward_agent = True
def update():
u"""Function defining all steps required to properly update application."""
with contextlib.nested(
cd('/var/www/wysadzulice_pl'),
prefix('workon wysadzulice_pl')
):
run('git pull')
run('git checkout master')
run('python manage.py migrate --traceback')
run('npm cache clear')
run('rm -rf ./node_modules')
run('npm install')
run('gulp build')
run('service apache2 restart')
|
magul/wysadzulice.pl
|
fabfile.py
|
Python
|
mit
| 847
|
[
"GULP"
] |
1dcfbe29fdc4705359457019c383e2de50611746aa12469f74b9548bee28d653
|
../../../../../../../share/pyshared/orca/scripts/apps/soffice/script_utilities.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/soffice/script_utilities.py
|
Python
|
gpl-3.0
| 81
|
[
"ORCA"
] |
96ebfd2458aeddb3f60c9925e83ccd1cb2dfb2187252afa8c041a3078ef5e91c
|
import unittest
from simphony import engine as engine_api
from simphony.api import CUDS
from simphony.engine import EngineInterface, create_wrapper
from simlammps.lammps_wrapper import LammpsWrapper
# TODO: Use an enum instead, defined in a proper place
_LAMMPS = 'LAMMPS'
class TestPluginIntegration(unittest.TestCase):
"""Plugin integration tests."""
def test_plugin_integration(self):
from simphony.engine import lammps
self.assertTrue(hasattr(lammps, 'LammpsWrapper'))
def test_engine_registration(self):
self.assertIn(_LAMMPS, engine_api.get_supported_engine_names())
def test_lammps_internal_creation(self):
cuds = CUDS()
lammps = create_wrapper(cuds, _LAMMPS,
EngineInterface.Internal)
self.assertIsInstance(lammps, LammpsWrapper)
def test_lammps_fileio_creation(self):
cuds = CUDS()
lammps = create_wrapper(cuds, _LAMMPS,
EngineInterface.FileIO)
self.assertIsInstance(lammps, LammpsWrapper)
if __name__ == '__main__':
unittest.main()
|
simphony/simphony-lammps-md
|
simlammps/tests/test_plugin_integration.py
|
Python
|
bsd-2-clause
| 1,116
|
[
"LAMMPS"
] |
30a76e7fa4171d265cb43523d9d36005bb509ba1b52117960fbf0458b45c1b8b
|
#
# co_co_buffer_not_assigned.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.cocos.co_co import CoCo
from pynestml.symbols.symbol import SymbolKind
from pynestml.symbols.variable_symbol import BlockType
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.visitors.ast_visitor import ASTVisitor
class CoCoBufferNotAssigned(CoCo):
name = 'no assignments to Buffers'
description = 'This coco ensures that no values are assigned to buffers.\n'\
'Allowed:\n'\
' currentSum = current + 10mV # current being a buffer\n'\
'Not allowed\n:'\
' current = currentSum + 10mV'\
def check_co_co(self, node):
"""
Ensures the coco for the handed over neuron.
:param node: a single neuron instance.
:type node: ASTNeuron
"""
node.accept(NoBufferAssignedVisitor())
class NoBufferAssignedVisitor(ASTVisitor):
def visit_assignment(self, node):
symbol = node.get_scope().resolve_to_symbol(node.get_variable().get_name(), SymbolKind.VARIABLE)
if symbol is not None and (symbol.block_type == BlockType.INPUT_BUFFER_SPIKE or
symbol.block_type == BlockType.INPUT_BUFFER_CURRENT):
code, message = Messages.get_value_assigned_to_buffer(node.get_variable().get_complete_name())
Logger.log_message(code=code, message=message,
error_position=node.get_source_position(),
log_level=LoggingLevel.ERROR)
return
|
kperun/nestml
|
pynestml/cocos/co_co_buffer_not_assigned.py
|
Python
|
gpl-2.0
| 2,249
|
[
"NEURON"
] |
a8d11fc609c03e7d0c908c64e93971a876a8150fe885468da32e9d9f2b71c186
|
# coding: utf-8
# In[2]:
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from numpy import random
from math import floor
from random import randint
import warnings
# In[39]:
# subsampling a genome according to the exponential, with mean=~ 1kbp
# In[11]:
def make_subsample(genome, label, num_draws):
partial_subsample = []
assembly = list(SeqIO.parse(genome,'fasta')) # cast to a list; we need to address individual elements more than iterate reliably
samples = num_draws # this is how many reads from each scaffold - nb should be a global increment but... meh
outfile = 'subsample_'+label+'_'+str(samples).zfill(7)+'_draws.fasta'
print('making '+str(samples)+' draws, output to '+outfile)
for i in range(0,samples):
# randomly pick (uniform distribution) one of the scaffolds/contigs
# to draw this sample from
which_scaffold = randint(0,len(assembly)-1)
scaffold = assembly[which_scaffold]
#print(scaffold.id)
some_new_seq = False
nonzero_length_check = False
while not nonzero_length_check:
new_length = int(random.exponential(1500))
start_pos = int(random.uniform(1,(len(scaffold)-new_length)))
some_new_seq = scaffold[start_pos:(start_pos+new_length)]
some_new_seq.id = some_new_seq.id + "_sampled_" + str(start_pos) + "_" + str(new_length)
# WARNINGS if short (verbose)
#if not (len(some_new_seq) >0):
#warnings.warn('short sequence: '+str(len(some_new_seq))+' id '+some_new_seq.id)
nonzero_length_check = (len(some_new_seq) >0)
partial_subsample.append(some_new_seq)
if(SeqIO.write(partial_subsample,outfile,'fasta')):
print('wrote out '+str(len(partial_subsample))+' sequences to '+outfile)
else:
warnings.warn('COULD NOT write '+str(len(partial_subsample))+' sequences to '+outfile)
del partial_subsample[:]
pass
# input dir: /media/joe/BiSlDi/genomes/Arabidopsis_thaliana
# input file: arabidopsis_thaliana_GCF_000001735.3_TAIR10_genomic.fna
genome = "/media/joe/BiSlDi/genomes/Arabidopsis_thaliana/arabidopsis_thaliana_GCF_000001735.3_TAIR10_genomic.fna"
make_subsample(genome,'A.thal',10)
make_subsample(genome,'A.thal',100)
make_subsample(genome,'A.thal',1000)
make_subsample(genome,'A.thal',10000)
make_subsample(genome,'A.thal',100000)
# should now have input files with sub/resampled reads, exponentially distributed read lengths with median 1500,
# uniform picks from all seven A. thaliana TAIR10 chromosomes (e.g. mitochondrial and chloroplast genomes at same
# frequency as nuclear DNA)
# e.g. (example - random process means exact sizes will vary)
#
#joe-Tower:in-silico-reference-genome-digest (master*) joe$ lll *fasta
#-rw-rw-r-- 1 joe joe 1.1G May 18 12:19 subsample_100000_draws.fasta
#-rw-rw-r-- 1 joe joe 109M May 18 12:18 subsample_10000_draws.fasta
#-rw-rw-r-- 1 joe joe 11M May 18 12:18 subsample_1000_draws.fasta
#-rw-rw-r-- 1 joe joe 1.2M May 18 12:18 subsample_100_draws.fasta
#-rw-rw-r-- 1 joe joe 104K May 18 12:18 subsample_10_draws.fasta
# In[12]:
# repeat for Arabidopsis lyrata
genomic_data = "/media/joe/BiSlDi/genomes/Arabidopsis_lyrata_petraea/Arabidopsis_lytata_ADBK01.1.fsa_nt"
make_subsample(genomic_data,'A.lrya',10)
make_subsample(genomic_data,'A.lyra',100)
make_subsample(genomic_data,'A.lyra',1000)
make_subsample(genomic_data,'A.lyra',10000)
make_subsample(genomic_data,'A.lyra',100000)
# In[30]:
# then make a blast DB for each subsampled file, e.g.
#
# if:
#-rw-rw-r-- 1 joe joe 102K May 18 14:56 subsample_A.thal_0000010_draws.fasta
#
# then run:
# makeblastdb -dbtype nucl -out A.thal.subsample_10 -title 'subsampled A.thaliana genome, 10 draws per chromosome' -in subsample_A.thal_0000010_draws.fasta
#
# to give:
#-rw-rw-r-- 1 joe joe 11K May 18 15:02 A.thal.subsample_10.nhr
#-rw-rw-r-- 1 joe joe 964 May 18 15:02 A.thal.subsample_10.nin
#-rw-rw-r-- 1 joe joe 24K May 18 15:02 A.thal.subsample_10.nsq
#
# (the database will be referenced by 'A.thal.subsample_10' in blastn etc.)
#
# to automate this run:
#for i in *fasta;
#do
#echo $i;
#makeblastdb -dbtype nucl -out $i -title '$i: subsampled A.thaliana genome, 10 draws per chromosome' -in $i;
#done
# to crunch through, etc:
#makeblastdb -dbtype nucl -out A.thal.subsample_10E1 -title 'subsampled A.thaliana genome, 10E1 draws per chromosome' -in subsample_A.thal_0000010_draws.fasta
#makeblastdb -dbtype nucl -out A.thal.subsample_10E2 -title 'subsampled A.thaliana genome, 10E2 draws per chromosome' -in subsample_A.thal_0000100_draws.fasta
#makeblastdb -dbtype nucl -out A.thal.subsample_10E3 -title 'subsampled A.thaliana genome, 10E3 draws per chromosome' -in subsample_A.thal_0001000_draws.fasta
# In[ ]:
# In[ ]:
|
lonelyjoeparker/real-time-phylogenomics
|
wales_analyses/in-silico-reference-genome-digest/In silico genome skimming.py
|
Python
|
gpl-2.0
| 4,842
|
[
"BLAST"
] |
de6902e78e534f1756ea0fd9497bfe3f97d6d0d23e5995eda975bd7798afab07
|
""" The Job Scheduling Executor takes the information gained from all previous
optimizers and makes a scheduling decision for the jobs.
Subsequent to this jobs are added into a Task Queue and pilot agents can be submitted.
All issues preventing the successful resolution of a site candidate are discovered
here where all information is available.
This Executor will fail affected jobs meaningfully.
"""
import random
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Utilities.Time import fromString, toEpoch
from DIRAC.Core.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient, getFilesToStage
from DIRAC.WorkloadManagementSystem.Executor.Base.OptimizerExecutor import OptimizerExecutor
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.Client import JobStatus
class JobScheduling(OptimizerExecutor):
"""
The specific Optimizer must provide the following methods:
- optimizeJob() - the main method called for each job
and it can provide:
- initializeOptimizer() before each execution cycle
"""
@classmethod
def initializeOptimizer(cls):
"""Initialization of the optimizer."""
cls.siteClient = SiteStatus()
cls.__jobDB = JobDB()
return S_OK()
def optimizeJob(self, jid, jobState):
"""1. Banned sites are removed from the destination list.
2. Get input files
3. Production jobs are sent directly to TQ
4. Check if staging is necessary
"""
# Reschedule delay
result = jobState.getAttributes(["RescheduleCounter", "RescheduleTime", "ApplicationStatus"])
if not result["OK"]:
return result
attDict = result["Value"]
try:
reschedules = int(attDict["RescheduleCounter"])
except (ValueError, KeyError):
return S_ERROR("RescheduleCounter has to be an integer")
if reschedules != 0:
delays = self.ex_getOption("RescheduleDelays", [60, 180, 300, 600])
delay = delays[min(reschedules, len(delays) - 1)]
waited = toEpoch() - toEpoch(fromString(attDict["RescheduleTime"]))
if waited < delay:
return self.__holdJob(jobState, "On Hold: after rescheduling %s" % reschedules, delay)
# Get the job manifest for the later checks
result = jobState.getManifest()
if not result["OK"]:
self.jobLog.error("Could not retrieve job manifest", result["Message"])
return result
jobManifest = result["Value"]
# Get site requirements
result = self.__getSitesRequired(jobManifest)
if not result["OK"]:
return result
userSites, userBannedSites = result["Value"]
# Get job type
result = jobState.getAttribute("JobType")
if not result["OK"]:
self.jobLog.error("Could not retrieve job type", result["Message"])
return result
jobType = result["Value"]
# Get banned sites from DIRAC
result = self.siteClient.getSites("Banned")
if not result["OK"]:
self.jobLog.error("Cannot retrieve banned sites", result["Message"])
return result
wmsBannedSites = result["Value"]
# If the user has selected any site, filter them and hold the job if not able to run
if userSites:
if jobType not in self.ex_getOption("ExcludedOnHoldJobTypes", []):
result = self.siteClient.getUsableSites(userSites)
if not result["OK"]:
self.jobLog.error(
"Problem checking userSites for tuple of active/banned/invalid sites", result["Message"]
)
return result
usableSites = set(result["Value"])
bannedSites = []
invalidSites = []
for site in userSites:
if site in wmsBannedSites:
bannedSites.append(site)
elif site not in usableSites:
invalidSites.append(site)
if invalidSites:
self.jobLog.debug("Invalid site(s) requested: %s" % ",".join(invalidSites))
if not self.ex_getOption("AllowInvalidSites", True):
return self.__holdJob(jobState, "Requested site(s) %s are invalid" % ",".join(invalidSites))
if bannedSites:
self.jobLog.debug("Banned site(s) %s ignored" % ",".join(bannedSites))
if not usableSites:
return self.__holdJob(jobState, "Requested site(s) %s are inactive" % ",".join(bannedSites))
if not usableSites:
return self.__holdJob(jobState, "No requested site(s) are active/valid")
userSites = list(usableSites)
checkPlatform = self.ex_getOption("CheckPlatform", False)
jobPlatform = jobManifest.getOption("Platform", None)
# First check that the platform is valid (in OSCompatibility list)
if checkPlatform and jobPlatform:
result = gConfig.getOptionsDict("/Resources/Computing/OSCompatibility")
if not result["OK"]:
self.jobLog.error("Unable to get OSCompatibility list", result["Message"])
return result
allPlatforms = result["Value"]
if jobPlatform not in allPlatforms:
self.jobLog.error("Platform not supported", jobPlatform)
return S_ERROR("Platform is not supported")
# Filter the userSites by the platform selection (if there is one)
if checkPlatform and userSites:
if jobPlatform:
result = self.__filterByPlatform(jobPlatform, userSites)
if not result["OK"]:
self.jobLog.error("Failed to filter job sites by platform", result["Message"])
return result
userSites = result["Value"]
if not userSites:
# No sites left after filtering -> Invalid platform/sites combination
self.jobLog.error("No selected sites match platform", jobPlatform)
return S_ERROR("No selected sites match platform '%s'" % jobPlatform)
# Check if there is input data
result = jobState.getInputData()
if not result["OK"]:
self.jobLog.error("Failed to get input data from JobDB", result["Message"])
return result
if not result["Value"]:
# No input data? Just send to TQ
return self.__sendToTQ(jobState, jobManifest, userSites, userBannedSites)
self.jobLog.verbose("Has an input data requirement")
inputData = result["Value"]
# ===================================================================================
# Production jobs are sent to TQ, but first we have to verify if staging is necessary
# ===================================================================================
if jobType in Operations().getValue("Transformations/DataProcessing", []):
self.jobLog.info("Production job: sending to TQ, but first checking if staging is requested")
res = getFilesToStage(
inputData,
jobState=jobState,
checkOnlyTapeSEs=self.ex_getOption("CheckOnlyTapeSEs", True),
jobLog=self.jobLog,
)
if not res["OK"]:
return self.__holdJob(jobState, res["Message"])
if res["Value"]["absentLFNs"]:
# Some files do not exist at all... set the job Failed
# Reverse errors
reasons = {}
for lfn, reason in res["Value"]["absentLFNs"].items():
reasons.setdefault(reason, []).append(lfn)
for reason, lfns in reasons.items():
# Some files are missing in the FC or in SEs, fail the job
self.jobLog.error(reason, ",".join(lfns))
error = ",".join(reasons)
return S_ERROR(error)
if res["Value"]["failedLFNs"]:
return self.__holdJob(jobState, "Couldn't get storage metadata of some files")
stageLFNs = res["Value"]["offlineLFNs"]
if stageLFNs:
res = self.__checkStageAllowed(jobState)
if not res["OK"]:
return res
if not res["Value"]:
return S_ERROR("Stage not allowed")
self.__requestStaging(jobState, stageLFNs)
return S_OK()
else:
# No staging required
onlineSites = res["Value"]["onlineSites"]
if onlineSites:
# Set the online site(s) first
userSites = set(userSites)
onlineSites &= userSites
userSites = list(onlineSites) + list(userSites - onlineSites)
return self.__sendToTQ(jobState, jobManifest, userSites, userBannedSites, onlineSites=onlineSites)
# ===================================================
# From now on we know it's a user job with input data
# ===================================================
idAgent = self.ex_getOption("InputDataAgent", "InputData")
result = self.retrieveOptimizerParam(idAgent)
if not result["OK"]:
self.jobLog.error("Could not retrieve input data info", result["Message"])
return result
opData = result["Value"]
if "SiteCandidates" not in opData:
return S_ERROR("No possible site candidates")
# Filter input data sites with user requirement
siteCandidates = list(opData["SiteCandidates"])
self.jobLog.info("Site candidates are", siteCandidates)
if userSites:
siteCandidates = list(set(siteCandidates) & set(userSites))
siteCandidates = self._applySiteFilter(siteCandidates, banned=userBannedSites)
if not siteCandidates:
return S_ERROR("Impossible InputData * Site requirements")
idSites = {}
for site in siteCandidates:
idSites[site] = opData["SiteCandidates"][site]
# Check if sites have correct count of disk+tape replicas
numData = len(inputData)
errorSites = set()
for site in idSites:
if numData != idSites[site]["disk"] + idSites[site]["tape"]:
self.jobLog.error("Site candidate does not have all the input data", "(%s)" % site)
errorSites.add(site)
for site in errorSites:
idSites.pop(site)
if not idSites:
return S_ERROR("Site candidates do not have all the input data")
# Check if staging is required
stageRequired, siteCandidates = self.__resolveStaging(inputData, idSites)
if not siteCandidates:
return S_ERROR("No destination sites available")
# Is any site active?
stageSites = self._applySiteFilter(siteCandidates, banned=wmsBannedSites)
if not stageSites:
return self.__holdJob(jobState, "Sites %s are inactive or banned" % ", ".join(siteCandidates))
# If no staging is required send to TQ
if not stageRequired:
# Use siteCandidates and not stageSites because active and banned sites
# will be taken into account on matching time
return self.__sendToTQ(jobState, jobManifest, siteCandidates, userBannedSites)
# Check if the user is allowed to stage
if self.ex_getOption("RestrictDataStage", False):
res = self.__checkStageAllowed(jobState)
if not res["OK"]:
return res
if not res["Value"]:
return S_ERROR("Stage not allowed")
# Get stageSites[0] because it has already been randomized and it's as good as any in stageSites
stageSite = stageSites[0]
self.jobLog.verbose(" Staging site will be", stageSite)
stageData = idSites[stageSite]
# Set as if everything has already been staged
stageData["disk"] += stageData["tape"]
stageData["tape"] = 0
# Set the site info back to the original dict to save afterwards
opData["SiteCandidates"][stageSite] = stageData
stageRequest = self.__preRequestStaging(jobManifest, stageSite, opData)
if not stageRequest["OK"]:
return stageRequest
stageLFNs = stageRequest["Value"]
result = self.__requestStaging(jobState, stageLFNs)
if not result["OK"]:
return result
stageLFNs = result["Value"]
self.__updateSharedSESites(jobManifest, stageSite, stageLFNs, opData)
# Save the optimizer data again
self.jobLog.verbose("Updating Optimizer Info", ": %s for %s" % (idAgent, opData))
result = self.storeOptimizerParam(idAgent, opData)
if not result["OK"]:
return result
return self.__setJobSite(jobState, stageSites)
def _applySiteFilter(self, sites, banned=False):
"""Filters out banned sites"""
if not sites:
return sites
filtered = set(sites)
if banned and isinstance(banned, (list, set, dict)):
filtered -= set(banned)
return list(filtered)
def __holdJob(self, jobState, holdMsg, delay=0):
if delay:
self.freezeTask(delay)
else:
self.freezeTask(self.ex_getOption("HoldTime", 300))
self.jobLog.info("On hold", holdMsg)
return jobState.setAppStatus(holdMsg, source=self.ex_optimizerName())
def __getSitesRequired(self, jobManifest):
"""Returns any candidate sites specified by the job or sites that have been
banned and could affect the scheduling decision.
"""
bannedSites = jobManifest.getOption("BannedSites", [])
if not bannedSites:
bannedSites = jobManifest.getOption("BannedSite", [])
if bannedSites:
self.jobLog.info("Banned sites", ", ".join(bannedSites))
sites = jobManifest.getOption("Site", [])
# TODO: Only accept known sites after removing crap like ANY set in the original manifest
sites = [site for site in sites if site.strip().lower() not in ("any", "")]
if sites:
if len(sites) == 1:
self.jobLog.info("Single chosen site", ": %s specified" % (sites[0]))
else:
self.jobLog.info("Multiple sites requested", ": %s" % ",".join(sites))
sites = self._applySiteFilter(sites, banned=bannedSites)
if not sites:
return S_ERROR("Impossible site requirement")
return S_OK((sites, bannedSites))
def __filterByPlatform(self, jobPlatform, userSites):
"""Filters out sites that have no CE with a matching platform."""
basePath = "/Resources/Sites"
filteredSites = set()
# FIXME: can use Resources().getSiteCEMapping()
for site in userSites:
if "." not in site:
# Invalid site name: Doesn't contain a dot!
self.jobLog.warn("Skipped invalid site name", site)
continue
grid = site.split(".")[0]
sitePath = cfgPath(basePath, grid, site, "CEs")
result = gConfig.getSections(sitePath)
if not result["OK"]:
self.jobLog.info("Failed to get CEs", "at site %s" % site)
continue
siteCEs = result["Value"]
for CEName in siteCEs:
CEPlatform = gConfig.getValue(cfgPath(sitePath, CEName, "OS"))
if jobPlatform == CEPlatform:
# Site has a CE with a matchin platform
filteredSites.add(site)
return S_OK(list(filteredSites))
def _getTagsFromManifest(self, jobManifest):
"""helper method to add a list of tags to the TQ from the job manifest content"""
# Generate Tags from specific requirements
tagList = []
# sorting out the number of processors
nProcessors = 1
maxProcessors = 1
if "NumberOfProcessors" in jobManifest: # this should be the exact number
nProcessors = jobManifest.getOption("NumberOfProcessors", 0)
else: # is there a min? and in that case, is there a max?
if "MinNumberOfProcessors" in jobManifest:
nProcessors = jobManifest.getOption("MinNumberOfProcessors", 0)
if "MaxNumberOfProcessors" in jobManifest:
maxProcessors = jobManifest.getOption("MaxNumberOfProcessors", 0)
else:
maxProcessors = -1
if nProcessors and nProcessors > 1:
tagList.append("%dProcessors" % nProcessors)
tagList.append("MultiProcessor")
if maxProcessors == -1 or maxProcessors > 1:
tagList.append("MultiProcessor")
if "WholeNode" in jobManifest:
if jobManifest.getOption("WholeNode", "").lower() in ["1", "yes", "true", "y"]:
tagList.append("WholeNode")
tagList.append("MultiProcessor")
# sorting out the RAM (this should be probably coded ~same as number of processors)
if "MaxRAM" in jobManifest:
maxRAM = jobManifest.getOption("MaxRAM", 0)
if maxRAM:
tagList.append("%dGB" % maxRAM)
# other tags? Just add them
if "Tags" in jobManifest:
tagList.extend(jobManifest.getOption("Tags", []))
if "Tag" in jobManifest:
tagList.extend(jobManifest.getOption("Tag", []))
return tagList
def __sendToTQ(self, jobState, jobManifest, sites, bannedSites, onlineSites=None):
"""This method sends jobs to the task queue agent and if candidate sites
are defined, updates job JDL accordingly.
"""
tagList = self._getTagsFromManifest(jobManifest)
if tagList:
jobManifest.setOption("Tags", ", ".join(tagList))
reqSection = "JobRequirements"
if reqSection in jobManifest:
result = jobManifest.getSection(reqSection)
else:
result = jobManifest.createSection(reqSection)
if not result["OK"]:
self.jobLog.error("Cannot create jobManifest section", "(%s: %s)" % reqSection, result["Message"])
return result
reqCfg = result["Value"]
if sites:
reqCfg.setOption("Sites", ", ".join(sites))
if bannedSites:
reqCfg.setOption("BannedSites", ", ".join(bannedSites))
# Job multivalue requirement keys are specified as singles in the job descriptions
# but for backward compatibility can be also plurals
for key in ("SubmitPools", "SubmitPool", "JobType", "GridRequiredCEs", "GridCE", "Tags"):
reqKey = key
if key == "JobType":
reqKey = "JobTypes"
elif key == "GridRequiredCEs" or key == "GridCE": # Remove obsolete GridRequiredCEs
reqKey = "GridCEs"
elif key == "SubmitPools" or key == "SubmitPool": # Check: one of these 2 is obsolete
reqKey = "SubmitPools"
if key in jobManifest:
reqCfg.setOption(reqKey, ", ".join(jobManifest.getOption(key, [])))
result = self.__setJobSite(jobState, sites, onlineSites=onlineSites)
if not result["OK"]:
return result
self.jobLog.verbose("Done")
return self.setNextOptimizer(jobState)
def __resolveStaging(self, inputData, idSites):
diskSites = []
maxOnDisk = 0
bestSites = []
for site in idSites:
nTape = idSites[site]["tape"]
nDisk = idSites[site]["disk"]
if nTape > 0:
self.jobLog.debug("%s tape replicas on site %s" % (nTape, site))
if nDisk > 0:
self.jobLog.debug("%s disk replicas on site %s" % (nDisk, site))
if nDisk == len(inputData):
diskSites.append(site)
if nDisk > maxOnDisk:
maxOnDisk = nDisk
bestSites = [site]
elif nDisk == maxOnDisk:
bestSites.append(site)
# If there are selected sites, those are disk only sites
if diskSites:
self.jobLog.verbose("No staging required")
return (False, diskSites)
self.jobLog.verbose("Staging required")
if len(bestSites) > 1:
random.shuffle(bestSites)
return (True, bestSites)
def __preRequestStaging(self, jobManifest, stageSite, opData):
tapeSEs = []
diskSEs = []
vo = jobManifest.getOption("VirtualOrganization")
inputDataPolicy = jobManifest.getOption("InputDataPolicy", "Protocol")
connectionLevel = "DOWNLOAD" if "download" in inputDataPolicy.lower() else "PROTOCOL"
# Allow staging from SEs accessible by protocol
result = DMSHelpers(vo=vo).getSEsForSite(stageSite, connectionLevel=connectionLevel)
if not result["OK"]:
return S_ERROR("Could not determine SEs for site %s" % stageSite)
siteSEs = result["Value"]
for seName in siteSEs:
se = StorageElement(seName, vo=vo)
seStatus = se.getStatus()
if not seStatus["OK"]:
return seStatus
seStatus = seStatus["Value"]
if seStatus["Read"] and seStatus["TapeSE"]:
tapeSEs.append(seName)
if seStatus["Read"] and seStatus["DiskSE"]:
diskSEs.append(seName)
if not tapeSEs:
return S_ERROR("No Local SEs for site %s" % stageSite)
self.jobLog.debug("Tape SEs are %s" % (", ".join(tapeSEs)))
# I swear this is horrible DM code it's not mine.
# Eternity of hell to the inventor of the Value of Value of Success of...
inputData = opData["Value"]["Value"]["Successful"]
stageLFNs = {}
lfnToStage = []
for lfn in inputData:
replicas = inputData[lfn]
# Check SEs
seStage = []
for seName in replicas:
if seName in diskSEs:
# This lfn is in disk. Skip it
seStage = []
break
if seName not in tapeSEs:
# This lfn is not in this tape SE. Check next SE
continue
seStage.append(seName)
for seName in seStage:
if seName not in stageLFNs:
stageLFNs[seName] = []
stageLFNs[seName].append(lfn)
if lfn not in lfnToStage:
lfnToStage.append(lfn)
if not stageLFNs:
return S_ERROR("Cannot find tape replicas")
# Check if any LFN is in more than one SE
# If that's the case, try to stage from the SE that has more LFNs to stage to group the request
# 1.- Get the SEs ordered by ascending replicas
sortedSEs = reversed(sorted([(len(stageLFNs[seName]), seName) for seName in stageLFNs]))
for lfn in lfnToStage:
found = False
# 2.- Traverse the SEs
for _stageCount, seName in sortedSEs:
if lfn in stageLFNs[seName]:
# 3.- If first time found, just mark as found. Next time delete the replica from the request
if found:
stageLFNs[seName].remove(lfn)
else:
found = True
# 4.-If empty SE, remove
if not stageLFNs[seName]:
stageLFNs.pop(seName)
return S_OK(stageLFNs)
def __requestStaging(self, jobState, stageLFNs):
"""Actual request for staging LFNs through the StorageManagerClient"""
self.jobLog.debug(
"Stage request will be \n\t%s" % "\n\t".join(["%s:%s" % (lfn, stageLFNs[lfn]) for lfn in stageLFNs])
)
stagerClient = StorageManagerClient()
result = jobState.setStatus(
JobStatus.STAGING,
self.ex_getOption("StagingMinorStatus", "Request To Be Sent"),
appStatus="",
source=self.ex_optimizerName(),
)
if not result["OK"]:
return result
result = stagerClient.setRequest(
stageLFNs, "WorkloadManagement", "updateJobFromStager@WorkloadManagement/JobStateUpdate", int(jobState.jid)
)
if not result["OK"]:
self.jobLog.error("Could not send stage request", ": %s" % result["Message"])
return result
rid = str(result["Value"])
self.jobLog.info("Stage request sent", "(%s)" % rid)
self.storeOptimizerParam("StageRequest", rid)
result = jobState.setStatus(
JobStatus.STAGING,
self.ex_getOption("StagingMinorStatus", "Request Sent"),
appStatus="",
source=self.ex_optimizerName(),
)
if not result["OK"]:
return result
return S_OK(stageLFNs)
def __updateSharedSESites(self, jobManifest, stageSite, stagedLFNs, opData):
siteCandidates = opData["SiteCandidates"]
seStatus = {}
vo = jobManifest.getOption("VirtualOrganization")
for siteName in siteCandidates:
if siteName == stageSite:
continue
self.jobLog.debug("Checking %s for shared SEs" % siteName)
siteData = siteCandidates[siteName]
result = getSEsForSite(siteName)
if not result["OK"]:
continue
closeSEs = result["Value"]
diskSEs = []
for seName in closeSEs:
# If we don't have the SE status get it and store it
if seName not in seStatus:
seStatus[seName] = StorageElement(seName, vo=vo).status()
# get the SE status from mem and add it if its disk
status = seStatus[seName]
if status["Read"] and status["DiskSE"]:
diskSEs.append(seName)
self.jobLog.debug("Disk SEs for %s are %s" % (siteName, ", ".join(diskSEs)))
# Hell again to the dev of this crappy value of value of successful of ...
lfnData = opData["Value"]["Value"]["Successful"]
for seName in stagedLFNs:
# If the SE is not close then skip it
if seName not in closeSEs:
continue
for lfn in stagedLFNs[seName]:
self.jobLog.debug("Checking %s for %s" % (seName, lfn))
# I'm pretty sure that this cannot happen :P
if lfn not in lfnData:
continue
# Check if it's already on disk at the site
onDisk = False
for siteSE in lfnData[lfn]:
if siteSE in diskSEs:
self.jobLog.verbose("lfn on disk", ": %s at %s" % (lfn, siteSE))
onDisk = True
# If not on disk, then update!
if not onDisk:
self.jobLog.verbose("Setting LFN to disk", "for %s" % seName)
siteData["disk"] += 1
siteData["tape"] -= 1
def __setJobSite(self, jobState, siteList, onlineSites=None):
"""Set the site attribute"""
if onlineSites is None:
onlineSites = []
numSites = len(siteList)
if numSites == 0:
self.jobLog.info("Any site is candidate")
return jobState.setAttribute("Site", "ANY")
elif numSites == 1:
self.jobLog.info("Only 1 site is candidate", ": %s" % siteList[0])
return jobState.setAttribute("Site", siteList[0])
# If the job has input data, the online sites are hosting the data
if len(onlineSites) == 1:
siteName = "Group.%s" % ".".join(list(onlineSites)[0].split(".")[1:])
self.jobLog.info("Group %s is candidate" % siteName)
elif onlineSites:
# More than one site with input
siteName = "MultipleInput"
self.jobLog.info("Several input sites are candidate", ": %s" % ",".join(onlineSites))
else:
# No input site reported (could be a user job)
siteName = "Multiple"
self.jobLog.info("Multiple sites are candidate")
return jobState.setAttribute("Site", siteName)
def __checkStageAllowed(self, jobState):
"""Check if the job credentials allow to stage date"""
result = jobState.getAttribute("OwnerGroup")
if not result["OK"]:
self.jobLog.error("Cannot retrieve OwnerGroup from DB", ": %s" % result["Message"])
return result
group = result["Value"]
return S_OK(Properties.STAGE_ALLOWED in Registry.getPropertiesForGroup(group))
|
ic-hep/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Executor/JobScheduling.py
|
Python
|
gpl-3.0
| 29,885
|
[
"DIRAC"
] |
a5b13c37ffabb8c4da6028b4375d8095ac6ddd50b074c9588a4e8e200d48b766
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument,redefined-outer-name
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
insights_message = _("For analytics about your course, go to {analytics_dashboard_name}.")
insights_message = insights_message.format(
analytics_dashboard_name='{0}{1}</a>'.format(link_start, settings.ANALYTICS_DASHBOARD_NAME)
)
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'insights_message': insights_message,
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
ak2703/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 26,997
|
[
"VisIt"
] |
f0c1011926566f6148aa34587bc45c64837696150ae7aea61087e949a9ce1e24
|
# coding: utf-8
# # Handwritten Digit Recognition with Theano
#
# In this tutorial we will train a feed forward network / multi-layer-perceptron (MLP) to recognize handwritten digits using pure Theano.
# For a long version see: http://deeplearning.net/tutorial/mlp.html
#
#
#
# ## Layout
# The layout of our network
# <img src="http://deeplearning.net/tutorial/_images/mlp.png">
# Source of image: http://deeplearning.net/tutorial/mlp.html
#
# Our networks has 3 layers
# - Input layer, $28*28=786$ dimensional (the pixels of the images)
# - A hidden layer
# - A Softmax layer
#
# In order to make our lives easier, we will create the following files / classes / components:
# - HiddenLayer - To model a hidden layer
# - SoftmaxLayer - To model a softmax layer
# - MLP - Combines several hidden & softmax layers together to form a MLP
# - One file for reading the data and training the network
#
# ## HiddenLayer
#
# The hidden layer computes the following function:
# $$\text{output} = \tanh(xW + b)$$
#
# The matrix $W$ will be initialized Glorot-style (see 1. Lecture).
#
# This is the class we will use for the hidden layer:
# In[1]:
import numpy
import theano
import theano.tensor as T
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh):
"""
:param rng: Random number generator, for reproducable results
:param input: Symbolic Theano variable for the input
:param n_in: Number of incoming units
:param n_out: Number of outgoing units
:param W: Weight matrix
:param b: Bias
:param activation: Activation function to use
"""
self.input = input
self.rng = rng
self.n_in = n_in
self.n_out = n_out
self.activation=activation
if W is None: #Initialize Glorot Style
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid or activation == theano.tensor.nnet.hard_sigmoid or activation == theano.tensor.nnet.ultra_fast_sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W')
if b is None: #Initialize bias to zeor
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W = W
self.b = b
#Compute the activation
lin_output = T.dot(input, self.W) + self.b
#Compute the output
if activation is None:
self.output = lin_output
else:
self.output = activation(lin_output)
#Parameters of the model that can be trained
self.params = [self.W, self.b]
# ## Softmax Layer
# The softmax-layer computes:
# $$\text{output} = \text{softmax}(xW+b)$$
#
# As for the hidden layer, we allow the parameterization of the number of neurons. The weight matrix and bias vector is initialized to zero.
#
# As we performt a single label classification task, we use the negative log-likelihood as error function:
# $$E(x,W,b) = -log(o_y)$$
#
# with $o_y$ the output for label $y$.
# In[2]:
import numpy
import theano
import theano.tensor as T
class SoftmaxLayer(object):
def __init__(self, input, n_in, n_out):
self.W = theano.shared(value=numpy.zeros((n_in, n_out),
dtype=theano.config.floatX), name='W')
self.b = theano.shared(value=numpy.zeros((n_out,),
dtype=theano.config.floatX), name='b')
#Compute the output of the softmax layer, we call it P(y | x), i.e. how
#likely is the label y given the input x
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
#For prediction we select the most probable output
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
"""
Computes the negative log-likelihood. The function explained:
T.log(self.p_y_given_x): Compute the negative log-likelihood of p_y_given_x
T.arange(y.shape[0]), y]: Select the neuron at position y, our label
T.mean(): Compute the average over our mini batch
"""
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# ## MLP
# Our Multi-Layer-Perceptron now plugs everything together, i.e. one hidden layer and the softmax layer.
#
# In[3]:
import numpy
import theano
import theano.tensor as T
class MLP(object):
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""
:param rng: Our random number generator
:param input: Input variable (the data)
:param n_in: Input dimension
:param n_hidden: Hidden size
:param n_out: Output size
"""
self.hiddenLayer = HiddenLayer(rng=rng,
input=input, n_in=n_in, n_out=n_hidden,
activation=T.tanh)
self.softmaxLayer = SoftmaxLayer(
input=self.hiddenLayer.output,
n_in=n_hidden, n_out=n_out)
#Negative log likelihood of this MLP = neg. log likelihood of softmax layer
self.negative_log_likelihood = self.softmaxLayer.negative_log_likelihood
#Parameters of this MLP = Parameters offen Hidden + SoftmaxLayer
self.params = self.hiddenLayer.params + self.softmaxLayer.params
# ## Read data + train the network
# Finally we have all blocks to create a MLP for the MNIST dataset.
#
# You find the MNIST dataset in the data dir. Otherwise you can obtain it from http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
# In[ ]:
import cPickle
import gzip
import os
import sys
import timeit
import numpy as np
import theano
import theano.tensor as T
# Load the pickle file for the MNIST dataset.
dataset = 'data/mnist.pkl.gz'
f = gzip.open(dataset, 'rb')
train_set, dev_set, test_set = cPickle.load(f)
f.close()
#train_set contains 2 entries, first the X values, second the Y values
train_x, train_y = train_set
dev_x, dev_y = dev_set
test_x, test_y = test_set
#Created shared variables for these sets (for performance reasons)
train_x_shared = theano.shared(value=np.asarray(train_x, dtype='float32'), name='train_x')
train_y_shared = theano.shared(value=np.asarray(train_y, dtype='int32'), name='train_y')
print "Shape of train_x-Matrix: ",train_x_shared.get_value().shape
print "Shape of train_y-vector: ",train_y_shared.get_value().shape
print "Shape of dev_x-Matrix: ",dev_x.shape
print "Shape of test_x-Matrix: ",test_x.shape
###########################
#
# Start to build the model
#
###########################
# Hyper parameters
hidden_units = 50
learning_rate = 0.01
batch_size = 20
# Variables for our network
index = T.lscalar() # index to a minibatch
x = T.fmatrix('x') # the data, one image per row
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
rng = numpy.random.RandomState(1234) #To have deterministic results
# construct the MLP class
classifier = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=50, n_out=10)
# Define our cost function = error function
cost = classifier.negative_log_likelihood(y) #Here we could add L1 and L2 terms for regularization
# Update param := param - learning_rate * gradient(cost, param)
# See Lecture 1 slide 28
updates = [(param, param - learning_rate * T.grad(cost, param) ) for param in classifier.params]
# Now create a train function
# The train function needs the data, the index for the minibatch and the updates to work correctly
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_x_shared[index * batch_size: (index + 1) * batch_size],
y: train_y_shared[index * batch_size: (index + 1) * batch_size]
}
)
# Create a prediction function
predict_labels = theano.function(inputs=[x], outputs=classifier.softmaxLayer.y_pred)
print ">> train- and predict-functions are compiled <<"
# **Time to train the model**
#
# Now we can train our model by calling train_model(mini_batch_index). To predict labels, we can use the function predict_labels(data).
# In[ ]:
number_of_minibatches = len(train_x) / batch_size
print "%d mini batches" % (number_of_minibatches)
number_of_epochs = 10
print "%d epochs" % number_of_epochs
#
def compute_accurarcy(dataset_x, dataset_y):
predictions = predict_labels(dataset_x)
errors = sum(predictions != dataset_y) #Number of errors
accurarcy = 1 - errors/float(len(dataset_y))
return accurarcy
for epoch in xrange(number_of_epochs):
#Train the model on all mini batches
for idx in xrange(0, number_of_minibatches):
train_model(idx)
accurarcy_dev = compute_accurarcy(dev_x, dev_y)
accurarcy_test = compute_accurarcy(test_x, test_y)
print "%d epoch: Accurarcy on dev: %f, accurarcy on test: %f" % (epoch, accurarcy_dev, accurarcy_test)
print "DONE"
|
nreimers/deeplearning4nlp-tutorial
|
2015-10_Lecture/Lecture2/code/2_MNIST_solution.py
|
Python
|
apache-2.0
| 9,372
|
[
"NEURON"
] |
814e6144e8ccb15a4650c2d483d73ca1904d36259d665765fc51085eb253ff0b
|
"""
PDBBind binding pocket dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import pandas as pd
import shutil
import time
import re
from rdkit import Chem
import deepchem as dc
def compute_binding_pocket_features(pocket_featurizer, ligand_featurizer,
pdb_subdir, pdb_code, threshold=.3):
"""Compute features for a given complex"""
protein_file = os.path.join(pdb_subdir, "%s_protein.pdb" % pdb_code)
ligand_file = os.path.join(pdb_subdir, "%s_ligand.sdf" % pdb_code)
ligand_mol2 = os.path.join(pdb_subdir, "%s_ligand.mol2" % pdb_code)
# Extract active site
active_site_box, active_site_atoms, active_site_coords = (
dc.dock.binding_pocket.extract_active_site(
protein_file, ligand_file))
# Featurize ligand
mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
if mol is None:
return None, None
# Default for CircularFingerprint
n_ligand_features = 1024
ligand_features = ligand_featurizer.featurize([mol])
# Featurize pocket
finder = dc.dock.ConvexHullPocketFinder()
pockets, pocket_atoms, pocket_coords = finder.find_pockets(protein_file, ligand_file)
n_pockets = len(pockets)
n_pocket_features = dc.feat.BindingPocketFeaturizer.n_features
features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
pocket_features = pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms, pocket_coords)
# Note broadcast operation
features[:, :n_pocket_features] = pocket_features
features[:, n_pocket_features:] = ligand_features
# Compute labels for pockets
labels = np.zeros(n_pockets)
pocket_atoms[active_site_box] = active_site_atoms
for ind, pocket in enumerate(pockets):
overlap = dc.dock.binding_pocket.compute_overlap(
pocket_atoms, active_site_box, pocket)
if overlap > threshold:
labels[ind] = 1
else:
labels[ind] = 0
return features, labels
def load_pdbbind_labels(labels_file):
"""Loads pdbbind labels as dataframe"""
# Some complexes have labels but no PDB files. Filter these manually
missing_pdbs = ["1d2v", "1jou", "1s8j", "1cam", "4mlt", "4o7d"]
contents = []
with open(labels_file) as f:
for line in f:
if line.startswith("#"):
continue
else:
# Some of the ligand-names are of form (FMN ox). Use regex
# to merge into form (FMN-ox)
p = re.compile('\(([^\)\s]*) ([^\)\s]*)\)')
line = p.sub('(\\1-\\2)', line)
elts = line.split()
# Filter if missing PDB files
if elts[0] in missing_pdbs:
continue
contents.append(elts)
contents_df = pd.DataFrame(
contents,
columns=("PDB code", "resolution", "release year", "-logKd/Ki", "Kd/Ki",
"ignore-this-field", "reference", "ligand name"))
return contents_df
def featurize_pdbbind_pockets(data_dir=None, subset="core"):
"""Featurizes pdbbind according to provided featurization"""
tasks = ["active-site"]
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, "%s_pockets" % (subset))
if os.path.exists(data_dir):
return dc.data.DiskDataset(data_dir), tasks
pdbbind_dir = os.path.join(current_dir, "../pdbbind/v2015")
# Load PDBBind dataset
if subset == "core":
labels_file = os.path.join(pdbbind_dir, "INDEX_core_data.2013")
elif subset == "refined":
labels_file = os.path.join(pdbbind_dir, "INDEX_refined_data.2015")
elif subset == "full":
labels_file = os.path.join(pdbbind_dir, "INDEX_general_PL_data.2015")
else:
raise ValueError("Only core, refined, and full subsets supported.")
print("About to load contents.")
if not os.path.exists(labels_file):
raise ValueError("Run ../pdbbind/get_pdbbind.sh to download dataset.")
contents_df = load_pdbbind_labels(labels_file)
ids = contents_df["PDB code"].values
y = np.array([float(val) for val in contents_df["-logKd/Ki"].values])
# Define featurizers
pocket_featurizer = dc.feat.BindingPocketFeaturizer()
ligand_featurizer = dc.feat.CircularFingerprint(size=1024)
# Featurize Dataset
all_features = []
all_labels = []
missing_pdbs = []
all_ids = []
time1 = time.time()
for ind, pdb_code in enumerate(ids):
print("Processing complex %d, %s" % (ind, str(pdb_code)))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
if not os.path.exists(pdb_subdir):
print("%s is missing!" % pdb_subdir)
missing_pdbs.append(pdb_subdir)
continue
features, labels = compute_binding_pocket_features(
pocket_featurizer, ligand_featurizer, pdb_subdir, pdb_code)
if features is None:
print("Featurization failed!")
continue
all_features.append(features)
all_labels.append(labels)
ids = np.array(["%s%d" % (pdb_code, i) for i in range(len(labels))])
all_ids.append(ids)
time2 = time.time()
print("TIMING: PDBBind Pocket Featurization took %0.3f s" % (time2-time1))
X = np.vstack(all_features)
y = np.concatenate(all_labels)
w = np.ones_like(y)
ids = np.concatenate(all_ids)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids, data_dir=data_dir)
return dataset, tasks
def load_pdbbind_pockets(split="index", subset="core"):
"""Load PDBBind datasets. Does not do train/test split"""
dataset, tasks = featurize_pdbbind_pockets(subset=subset)
splitters = {'index': dc.splits.IndexSplitter(),
'random': dc.splits.RandomSplitter()}
splitter = splitters[split]
########################################################### DEBUG
print("dataset.X.shape")
print(dataset.X.shape)
print("dataset.y.shape")
print(dataset.y.shape)
print("dataset.w.shape")
print(dataset.w.shape)
print("dataset.ids.shape")
print(dataset.ids.shape)
########################################################### DEBUG
train, valid, test = splitter.train_valid_test_split(dataset)
transformers = []
for transformer in transformers:
train = transformer.transform(train)
for transformer in transformers:
valid = transformer.transform(valid)
for transformer in transformers:
test = transformer.transform(test)
return tasks, (train, valid, test), transformers
|
Agent007/deepchem
|
examples/binding_pockets/binding_pocket_datasets.py
|
Python
|
mit
| 6,311
|
[
"RDKit"
] |
87a5ce67a58c4a5e8ba392f0531d0b0323e8dc2622ffd6c983477605570aa0dd
|
# linearizedGP -- Implementation of extended and unscented Gaussian processes.
# Copyright (C) 2014 National ICT Australia (NICTA)
#
# This file is part of linearizedGP.
#
# linearizedGP is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# linearizedGP is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with linearizedGP. If not, see <http://www.gnu.org/licenses/>.
""" Gaussian Process Base Class
This is the GP base class for the nonlinear GPs. This also implements a
"vanilla" linear GP.
Author: Daniel Steinberg (daniel.steinberg@nicta.com.au)
Institute: NICTA
Date: 28 Sep 2014
"""
import numpy as np
import nlopt
from linearizedGP.kernels import kern_se
from linearizedGP.gputils import jitchol, cholsolve, logdet
import scipy.integrate as spint
class GP(object):
def __init__(self, kfunc=kern_se):
""" The base Gaussian Process class, which also implements a basic GP.
y ~ GP(0, C),
where,
C_ij = k(x_i, x_j|theta) + del_i=j * ynoise**2,
and the learn() method learns theta and ynoise using derivative
free methods from the NLopt library (BOBYQA).
Arguments:
kfunc: the kernel function, look in the kernels module for
more kernel functions (defaults to square exponential).
Returns:
An instance of the GP class.
Note:
Also see the
- learn()
- predict()
Methods for learning this GP and also for predicting E[y*] for
new inputs, x*. Also see
- learnLB()
- learnUB()
for placing lower and upper bounds on the hyperparameters.
"""
# Kernel Functions and Nonlinearities
self.kfunc = kfunc
self.nlfunc = None
# Lower and upper bound arrays
self.kparamsLB = None
self.ynoiseLB = None
self.nlparamsLB = None
self.kparamsUB = None
self.ynoiseUB = None
self.nlparamsUB = None
# Parameters
self.obj = None
self.m = None
self.C = None
self.Kchol = None
# Hyperparameters
self.kparams = None
self.ynoise = None
self.nlparams = None
# Clear the data and hyperparameters
self.__wipedata()
def learn(self, x, y, kparams, ynoise, dobj=1e-5, dparams=1e-8, maxit=200,
verbose=False):
""" Learn method for learning the likelihood noise and kernel
hyperparameters of the GP.
Arguments:
x: [DxN] array of N input samples with a dimensionality of D.
y: N array of training outputs (dimensionality of 1)
kparams: a tuple of initial values corresponding to the kernel
hyperparameters of the kernel function input to the
constructor.
ynoise: a scalar initial value for the observation (y) noise.
dobj: [optional] the convergence threshold for the objective
function (log-marginal likelihood) used by NLopt.
dparams: [optional] the convergence threshold for the
hyperparameter values.
maxit: [optional] maximum number of iterations for learning the
hyperparameters.
verbose: [optional] whether or not to display current learning
progress to the terminal.
Returns:
The final objective function value (log marginal likelihood).
Also internally all of the final parameters and hyperparameters
are stored.
Note:
This stops learning as soon as one of the convergence criterion
is met (objective, hyperparameters or iterations).
"""
return self._learn(self.__gplearn, x, y, kparams, ynoise, None, dobj,
dparams, maxit, verbose)
def predict(self, xs):
""" Predict the outputs and thier variance, y* and V[y*], given new
inputs, x*.
Arguments:
xs: [DxN] test points for prediction (x*)
Returns:
Ems: array of N predictions of m*
Vms: array of N predictions of the variance of m*, V[m*].
"""
# Check we have trained
D, N = self._check_prediction_inputs(xs)
# Pre-allocate
Ems, Vms = np.zeros(N), np.zeros(N)
# Evaluate test kernel vectors and do a stable inversion
ks = self.kfunc(self.x, np.atleast_2d(xs), *self.kparams)
Kinvks = cholsolve(jitchol(self.C), ks)
for n, xn in enumerate(xs.T):
# Evaluate the test kernel vectors
kss = self.kfunc(np.atleast_2d(xn).T, np.atleast_2d(xn).T,
*self.kparams)
# Predict the latent function
Ems[n] = (Kinvks[:, n].T).dot(self.m)
Vms[n] = kss - Kinvks[:, n].T.dot(ks[:, n])
return Ems, Vms
def learnLB(self, kparams=None, nlparams=None, ynoise=None):
""" Set the lower bounds for the parameters to be learned.
Arguments:
kparams: a tuple of values corresponding to the kernel
hyperparameters of the kernel function input to the
constructor.
nlparams: a tuple of values corresponding to the nonlinear
function parameters of the nonlinear function input to the
constructor (ignored for the basic BP).
ynoise: a scalar value for the observation (y) noise.
"""
self.kparamsLB = kparams
self.nlparamsLB = nlparams
self.ynoiseLB = ynoise
def learnUB(self, kparams=None, nlparams=None, ynoise=None):
""" Set the upper bounds for the parameters to be learned.
Arguments:
kparams: a tuple of values corresponding to the kernel
hyperparameters of the kernel function input to the
constructor.
nlparams: a tuple of values corresponding to the nonlinear
function parameters of the nonlinear function input to the
constructor (ignored for the basic BP).
ynoise: a scalar value for the observation (y) noise.
"""
self.kparamsUB = kparams
self.nlparamsUB = nlparams
self.ynoiseUB = ynoise
def _learn(self, learnfunc, x, y, kparams, ynoise, nlparams, dobj, dparams,
maxit, verbose):
""" Generic optimisation method for this and derived Gaussian Process
algorithms.
Essentially this manages the call to NLopt, establishes upper and
lower bounds on the hyperparameters, and sets the internal
parameters and hyperparameters to their learned values.
Arguments:
learnfunc: the learning function to call to actually learn the
*parameters* of the GP, i.e. the posterior mean and
covariance, it should have the following minimal form:
def learnfunc(y, K, delta=None, maxit=None, verbose=False):
... do some calcs
return m, C, obj
where K is the prior [NxN] covariance matrix built using
the current estimates of the hyperparameters, m is the
posterior mean of the GP, C is the posterior covariance of
the GP and obj is the final value of the objective function
(log marginal likelihood or some proxy). Also:
delta: is the convergence threshold, and is passed
dobj/10
maxit: the maximum number of iterations to perform
(passed maxit from this function)
verbose: toggle verbose output, also routed from this
function.
x: [DxN] array of N input samples with a dimensionality of D.
y: N array of training outputs (dimensionality of 1)
kparams: a tuple of initial values corresponding to the kernel
hyperparameters of the kernel function input to the
constructor.
ynoise: a scalar initial value for the observation (y) noise.
dobj: the convergence threshold for the objective function
(log-marginal likelihood) used by NLopt.
dparams: the convergence threshold for the hyperparameter
values.
maxit: maximum numbr of iterations for learning the
hyperparameters.
verbose: whether or not to display current learning progress to
the terminal.
Returns:
The final objective function value (log marginal likelihood).
Also internally all of the final parameters and hyperparameters
are stored.
Note:
This stops learning as soon as one of the convergence criterion
is met (objective, hyperparameters or iterations).
"""
# Check arguments
self.__wipedata()
D, N = self.__check_training_inputs(x, y)
# Check bounds with parameters to learn
lbounds, ubounds = self.__checkbounds(kparams, nlparams, ynoise)
# Make log-marginal-likelihood closure for optimisation
def objective(params, grad):
# Make sure grad is empty
assert not grad, "Grad is not empty!"
# Extract hyperparameters
self.__extractparams(params, kparams, nlparams, ynoise)
K = self.kfunc(self.x, self.x, *self.kparams)
m, C, obj = learnfunc(y, K, delta=dobj/10, maxit=maxit,
verbose=verbose)
if obj > self.obj:
self.m, self.C, self.obj = m, C, obj
if verbose is True:
print("\tObjective: {}, params: {}".format(obj, params))
return obj
# Get initial hyper-parameters
params = self.__catparams(kparams, nlparams, ynoise)
nparams = len(params)
# Set up optimiser with objective function and bounds
opt = nlopt.opt(nlopt.LN_BOBYQA, nparams)
opt.set_max_objective(objective)
opt.set_lower_bounds(lbounds)
opt.set_upper_bounds(ubounds)
opt.set_maxeval(maxit)
opt.set_ftol_rel(dobj)
opt.set_xtol_rel(dparams)
# Run the optimisation
params = opt.optimize(params)
optfval = opt.last_optimize_result()
if verbose is True:
print("Optimiser finish criterion: {0}".format(optfval))
# Store learned parameters (these have been over-written by nlopt)
self.__extractparams(params, kparams, nlparams, ynoise)
self.Kchol = jitchol(self.kfunc(self.x, self.x, *self.kparams))
return self.obj
def _quadpredict(self, xs):
""" Prediction of m* and E[y*] using quadrature to evaluate E[y*]. This
is primarily intended for the nonlinear GPs.
Arguments:
xs: [DxN] test points for prediction
Returns:
Eys: array of N predictions of E[y*]
eEys: array of N errors on each E[y*] integral evaluation
Ems: array of N predictions of m*
Vms: array of N predictions of the variance of m*, V[m*].
"""
# Check we have trained
D, N = self._check_prediction_inputs(xs)
# Pre-allocate
Ems, Vms, Eys, eEys = np.zeros(N), np.zeros(N), np.zeros(N), \
np.zeros(N)
# Expected predicted target (to be integrated)
def expecy(xsn, Emn, Vmn):
gxs = self._passnlfunc(self.nlfunc, xsn)
quad_msEf = (xsn - Emn)**2 / Vmn
return gxs * np.exp(-0.5 * (quad_msEf + np.log(2 * np.pi * Vmn)))
# Evaluate test kernel vectors and do a stable inversion
ks = self.kfunc(self.x, np.atleast_2d(xs), *self.kparams)
Kinvks = cholsolve(self.Kchol, ks)
for n, xn in enumerate(xs.T):
# Evaluate the test kernel vectors
kss = self.kfunc(np.atleast_2d(xn).T, np.atleast_2d(xn).T,
*self.kparams)
# Predict the latent function
Ems[n] = (Kinvks[:, n].T).dot(self.m)
Vms[n] = kss - Kinvks[:, n].T.dot(ks[:, n]
- self.C.dot(Kinvks[:, n]))
# Use Quadrature to get predicted target value
st = 4 * np.sqrt(Vms[n]) # Evaluate the integral to 4 sig
Eys[n], eEys[n] = spint.quad(expecy, a=Ems[n]-st, b=Ems[n]+st,
args=(Ems[n], Vms[n]))
return Eys, eEys, Ems, Vms
def _check_prediction_inputs(self, xs):
""" Check the prediction inputs for compatability.
Arguments:
xs: [DxN] test points for prediction
"""
# Check we have trained
if self.m is None:
raise ValueError("This GP needs to be learned first!")
D, N = (1, xs.shape[0]) if xs.ndim == 1 else xs.shape
if D != self.x.shape[0]:
raise ValueError("The test and training inputs are not the same"
" dimensionality!")
return D, N
def _passnlfunc(self, func, f):
""" Pass points f though the nonlinear function func.
This is a convenience method that also check whether or not
nlparams exists, and calls func accordingly.
Arguments:
func: a function that can take a vector input, and give a
vector of the same length on the output.
f: an array of N points to pass through func.
Returns:
an array of N points output from func(f).
"""
return func(f) if self.nlparams is None else func(f, *self.nlparams)
def __catparams(self, kparams, nlparams, ynoise):
""" Concatenate the parameters for optimisation. """
params = []
if kparams is not None:
params = kparams
if nlparams is not None:
params = np.hstack((params, nlparams))
if ynoise is not None:
params = np.hstack((params, [ynoise]))
return params
def __extractparams(self, params, kparams, nlparams=None, ynoise=None):
""" Extract the model parameters after optimisation from a list. """
self.kparams = params[0:len(kparams)]
self.nlparams = None if not nlparams else \
params[len(kparams):(len(kparams)+len(nlparams))]
self.ynoise = None if not ynoise else params[-1]
def __check_training_inputs(self, x, y):
""" Check the training inputs for compatability. """
# Check if there is already data
if self.x is not None:
raise ValueError("This GP already has data!")
# Check arguments
self.x = np.array(x[np.newaxis, :]) if x.ndim == 1 else np.array(x)
(D, N) = self.x.shape
if y.shape != (N,):
self.x = None
raise ValueError("x and y do not have the same number of points"
" or y is the wrong shape!")
return D, N
def __wipedata(self):
""" Clear this object """
# Hyper-parameters
self.kparams = None
self.ynoise = None
# Functions and their parameters
self.nlparams = None
# Matrices to store for fast computations
self.x = None
self.Kchol = None
self.m = None
self.C = None
self.obj = -np.inf
def __checkbounds(self, kparams, nlparams, ynoise):
""" Make sure the parameter bounds match the starting parameters. """
if self.kparamsLB is not None:
if len(kparams) != len(self.kparamsLB):
raise ValueError("Lower bound kernel parameters size mismatch")
else:
self.kparamsLB = -np.inf * np.ones(len(kparams))
if self.kparamsUB is not None:
if len(kparams) != len(self.kparamsUB):
raise ValueError("Upper bound kernel parameters size mismatch")
else:
self.kparamsUB = np.inf * np.ones(len(kparams))
if self.nlparamsLB is not None:
if len(nlparams) != len(self.nlparamsLB):
raise ValueError("Lower bound function parameters mismatch")
elif nlparams is not None:
self.nlparamsLB = -np.inf * np.ones(len(nlparams))
if self.nlparamsUB is not None:
if len(nlparams) != len(self.nlparamsUB):
raise ValueError("Upper bound function parameters mismatch")
elif nlparams is not None:
self.nlparamsUB = np.inf * np.ones(len(nlparams))
if (ynoise is not None) and (self.ynoiseLB is None):
self.ynoiseLB = -np.inf
elif (ynoise is None) and (self.ynoiseLB is not None):
raise ValueError("No noise parameter, but lower bound set!")
if (ynoise is not None) and (self.ynoiseUB is None):
self.ynoiseUB = np.inf
elif (ynoise is None) and (self.ynoiseUB is not None):
raise ValueError("No noise parameter, but upper bound set!")
lbounds = self.__catparams(self.kparamsLB, self.nlparamsLB,
self.ynoiseLB)
ubounds = self.__catparams(self.kparamsUB, self.nlparamsUB,
self.ynoiseUB)
return lbounds, ubounds
def __gplearn(self, y, K, delta=None, maxit=None, verbose=False):
""" Parameter learning for a basic GP. Called by the _learn method. """
N = y.shape[0]
# Make posterior GP
C = K + np.eye(N) * self.ynoise**2
Cchol = jitchol(C)
# Calculate the log-marginal-likelihood
lml = -0.5 * (logdet(Cchol) + y.T.dot(cholsolve(Cchol, y)) + N *
np.log(2 * np.pi))
return y, C, lml
|
NICTA/linearizedGP
|
linearizedGP/GP.py
|
Python
|
gpl-3.0
| 18,915
|
[
"Gaussian"
] |
c8543d3d056ea82aa316a28f592c354877f89bbccc036698d548646baba247d2
|
"""
UI-level acceptance tests for OpenAssessment.
"""
import ddt
import os
import unittest
import time
from functools import wraps
from pyinstrument import Profiler
from nose.plugins.attrib import attr
from bok_choy.web_app_test import WebAppTest
from bok_choy.promise import BrokenPromise, EmptyPromise
from auto_auth import AutoAuthPage
from pages import (
SubmissionPage, AssessmentPage, GradePage, StaffAreaPage
)
# This value is generally used in jenkins, but not locally
PROFILING_ENABLED = os.environ.get('ORA_PROFILING_ENABLED', False)
def retry(tries=2, delay=4, backoff=2):
"""
Retry decorator with exponential backoff.
Kwargs:
tries (int): Maximum number of times to execute the function.
delay (int): Starting delay between retries.
backoff (int): Multiplier applied to the delay.
"""
def _decorator(func):
@wraps(func)
def _inner(*args, **kwargs):
_delay = delay
for attempt_num in range(tries):
try:
return func(*args, **kwargs)
except (BrokenPromise, AssertionError) as ex:
if attempt_num >= (tries - 1):
raise
else:
print "Test failed with {err}, retrying in {sec} seconds...".format(err=ex, sec=_delay)
time.sleep(_delay)
_delay *= backoff
return _inner
return _decorator
class OpenAssessmentTest(WebAppTest):
"""
UI-level acceptance tests for Open Assessment.
"""
TEST_COURSE_ID = "course-v1:edx+ORA203+course"
PROBLEM_LOCATIONS = {
'staff_only':
u'courses/{test_course_id}/courseware/'
u'61944efb38a349edb140c762c7419b50/415c3ee1b7d04b58a1887a6fe82b31d6/'.format(test_course_id=TEST_COURSE_ID),
'self_only':
u'courses/{test_course_id}/courseware/'
u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/338a4affb58a45459629e0566291381e/'.format(test_course_id=TEST_COURSE_ID),
'peer_only':
u'courses/{test_course_id}/courseware/'
u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/417e47b2663a4f79b62dba20b21628c8/'.format(test_course_id=TEST_COURSE_ID),
'student_training':
u'courses/{test_course_id}/courseware/'
u'676026889c884ac1827688750871c825/5663e9b038434636977a4226d668fe02/'.format(test_course_id=TEST_COURSE_ID),
'file_upload':
u'courses/{test_course_id}/courseware/'
u'57a3f9d51d424f6cb922f0d69cba868d/bb563abc989340d8806920902f267ca3/'.format(test_course_id=TEST_COURSE_ID),
'full_workflow_staff_override':
u'courses/{test_course_id}/courseware/'
u'676026889c884ac1827688750871c825/181ea9ff144c4766be44eb8cb360e34f/'.format(test_course_id=TEST_COURSE_ID),
'full_workflow_staff_required':
u'courses/{test_course_id}/courseware/'
u'8d9584d242b44343bc270ea5ef04ab03/0b0dcc728abe45138c650732af178afb/'.format(test_course_id=TEST_COURSE_ID),
'feedback_only':
u'courses/{test_course_id}/courseware/'
u'8d9584d242b44343bc270ea5ef04ab03/a2875e0db1454d0b94728b9a7b28000b/'.format(test_course_id=TEST_COURSE_ID),
'multiple_ora':
u'courses/{test_course_id}/courseware/'
u'3b9aa6e06d8f48818ff6f364b5586f38/b79abd43bb11445486cd1874e6c71a64/'.format(test_course_id=TEST_COURSE_ID),
}
SUBMISSION = u"This is a test submission."
LATEX_SUBMISSION = u"[mathjaxinline]( \int_{0}^{1}xdx )[/mathjaxinline]"
OPTIONS_SELECTED = [1, 2]
STAFF_OVERRIDE_OPTIONS_SELECTED = [0, 1]
STAFF_OVERRIDE_SCORE = 1
STAFF_GRADE_EXISTS = "COMPLETE"
STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE = "You Must Complete the Steps Above to View Your Grade"
STAFF_AREA_SCORE = "Final grade: {} out of 8"
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE = "The problem has not been completed."
EXPECTED_SCORE = 6
STUDENT_TRAINING_OPTIONS = [
[1, 2],
[0, 2]
]
TEST_PASSWORD = "test_password"
def setUp(self, problem_type, staff=False):
"""
Configure page objects to test Open Assessment.
Args:
problem_type (str): The type of problem being tested,
used to choose which part of the course to load.
staff (bool): If True, runs the test with a staff user (defaults to False).
"""
super(OpenAssessmentTest, self).setUp()
if PROFILING_ENABLED:
self.profiler = Profiler(use_signal=False)
self.profiler.start()
self.problem_loc = self.PROBLEM_LOCATIONS[problem_type]
self.auto_auth_page = AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID, staff=staff)
self.submission_page = SubmissionPage(self.browser, self.problem_loc)
self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, self.problem_loc)
self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, self.problem_loc)
self.student_training_page = AssessmentPage('student-training', self.browser, self.problem_loc)
self.staff_asmnt_page = AssessmentPage('staff-assessment', self.browser, self.problem_loc)
self.grade_page = GradePage(self.browser, self.problem_loc)
def log_to_file(self):
with open('{}-profile.log'.format(self.id()), 'w') as f:
f.write(self.profiler.output_text())
def tearDown(self):
if PROFILING_ENABLED:
self.profiler.stop()
self.log_to_file()
def login_user(self, learner, email):
"""
Logs in an already existing user.
Args:
learner (str): the username of the user.
email (str): email address of the user.
"""
auto_auth_page = AutoAuthPage(
self.browser, email=email, password=self.TEST_PASSWORD, username=learner,
course_id=self.TEST_COURSE_ID, staff=True
)
auto_auth_page.visit()
def do_self_assessment(self):
"""
Creates a user, submits a self assessment, verifies the grade, and returns the username of the
learner for which the self assessment was submitted.
"""
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit().submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
# Submit a self-assessment
self.submit_self_assessment(self.OPTIONS_SELECTED)
# Verify the grade
self.assertEqual(self.EXPECTED_SCORE, self.grade_page.wait_for_page().score)
return username
def submit_self_assessment(self, options=OPTIONS_SELECTED):
"""
Submit a self assessment for the currently logged in student. Do not verify grade.
Args:
options: the options to select for the self assessment
(will use OPTIONS_SELECTED if not specified)
"""
self.self_asmnt_page.wait_for_page().wait_for_response()
self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
self.self_asmnt_page.assess("self", options).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
def _verify_staff_grade_section(self, expected_status, expected_message_title):
"""
Verifies the expected status and message text in the Staff Grade section
(as shown to the learner).
"""
self.staff_asmnt_page.wait_for_page()
self.assertEqual("Staff Grade", self.staff_asmnt_page.label)
self.staff_asmnt_page.verify_status_value(expected_status)
self.assertEqual(expected_message_title, self.staff_asmnt_page.message_title)
def do_training(self):
"""
Complete two training examples, satisfying the requirements.
"""
for example_num, options_selected in enumerate(self.STUDENT_TRAINING_OPTIONS):
if example_num > 0:
try:
self.student_training_page.wait_for_num_completed(example_num)
except BrokenPromise:
msg = "Did not complete at least {num} student training example(s).".format(num=example_num)
self.fail(msg)
self.student_training_page.wait_for_page().wait_for_response().assess("training", options_selected)
# Check that we've completed student training
try:
self.student_training_page.wait_for_complete()
except BrokenPromise:
self.fail("Student training was not marked complete.")
def do_peer_assessment(self, count=1, options=OPTIONS_SELECTED):
"""
Does the specified number of peer assessments.
Args:
count: the number of assessments that must be completed (defaults to 1)
options: the options to use (defaults to OPTIONS_SELECTED)
"""
self.peer_asmnt_page.visit()
for count_assessed in range(1, count + 1):
self.peer_asmnt_page.wait_for_page().wait_for_response().assess("peer", options)
self.peer_asmnt_page.wait_for_num_completed(count_assessed)
def do_staff_override(self, username, final_score=STAFF_AREA_SCORE.format(STAFF_OVERRIDE_SCORE)):
"""
Complete a staff assessment (grade override).
Args:
username: the learner to grade
final_score: the expected final score as shown in the staff area
(defaults to the staff override score value)
"""
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.staff_assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED, "override")
self.staff_area_page.verify_learner_final_score(final_score)
def do_staff_assessment(self, number_to_assess=0, options_selected=OPTIONS_SELECTED, feedback=None):
"""
Use staff tools to assess available responses.
Args:
number_to_assess: the number of submissions to assess. If not provided (or 0),
will grade all available submissions.
options_selected (dict): the options to choose when grading. Defaults to OPTIONS_SELECTED.
feedback (function(feedback_type)): if feedback is set, it will be used as a function that takes one
parameter to generate a feedback string.
"""
self.staff_area_page.visit()
self.staff_area_page.click_staff_toolbar_button("staff-grading")
# Get the counts before checking out a submission for assessment.
start_numbers = self.staff_area_page.available_checked_out_numbers
# Check out a submission.
self.staff_area_page.expand_staff_grading_section()
# Checked out number should increase, ungraded decrease.
ungraded = start_numbers[0]-1
checked_out = start_numbers[1]+1
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))
assessed = 0
while number_to_assess == 0 or assessed < number_to_assess:
continue_after = False if number_to_assess-1 == assessed else ungraded > 0
if feedback:
self.staff_area_page.provide_criterion_feedback(feedback("criterion"))
self.staff_area_page.provide_overall_feedback(feedback("overall"))
if options_selected:
self.staff_area_page.staff_assess(options_selected, "full-grade", continue_after)
assessed += 1
if not continue_after:
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out-1))
break
else:
ungraded -=1
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))
def refresh_page(self):
"""
Helper method that waits for "unsaved changes" warnings to clear before refreshing the page.
"""
EmptyPromise(
lambda: self.browser.execute_script("return window.onbeforeunload === null"),
"Unsubmitted changes exist on page."
).fulfill()
self.browser.refresh()
class SelfAssessmentTest(OpenAssessmentTest):
"""
Test the self-assessment flow.
"""
def setUp(self):
super(SelfAssessmentTest, self).setUp('self_only')
@retry()
@attr('acceptance')
def test_self_assessment(self):
# Submit a response
self.do_self_assessment()
# Check browser scrolled back to top of assessment
self.assertTrue(self.self_asmnt_page.is_on_top)
@retry()
@attr('acceptance')
def test_latex(self):
self.auto_auth_page.visit()
self.submission_page.visit()
# 'Preview in Latex' button should be disabled at the page load
self.assertTrue(self.submission_page.latex_preview_button_is_disabled)
# Fill latex expression, & Verify if 'Preview in Latex is enabled'
self.submission_page.visit().fill_latex(self.LATEX_SUBMISSION)
self.assertFalse(self.submission_page.latex_preview_button_is_disabled)
# Click 'Preview in Latex' button & Verify if it was rendered
self.submission_page.preview_latex()
class StaffAssessmentTest(OpenAssessmentTest):
"""
Test the staff-assessment flow.
"""
def setUp(self):
super(StaffAssessmentTest, self).setUp('staff_only', staff=True)
@retry()
@attr('acceptance')
def test_staff_assessment(self):
# Set up user and navigate to submission page
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit()
# Verify that staff grade step is shown initially
self._verify_staff_grade_section("NOT AVAILABLE", None)
# User submits a response
self.submission_page.submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
# Verify staff grade section appears as expected
self._verify_staff_grade_section("NOT AVAILABLE", "Waiting for a Staff Grade")
# Perform staff assessment
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
self.do_staff_assessment()
# Verify staff grade section appears as expected
self.staff_asmnt_page.visit()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
self.assertEqual(self.EXPECTED_SCORE, self.grade_page.wait_for_page().score)
# Verify that staff scores can be overriden
self.do_staff_override(username)
self.refresh_page()
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
class PeerAssessmentTest(OpenAssessmentTest):
"""
Test the peer-assessment flow.
It's complicated to guarantee that a student will both give and
receive enough assessments to receive a grade, so we stop
once we've given one peer assessment.
"""
def setUp(self):
super(PeerAssessmentTest, self).setUp('peer_only')
@retry()
@attr('acceptance')
def test_peer_assessment(self):
# Create a submission for the first student, so there's
# at least one submission to assess.
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Create a submission for the second student
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Assess the submission (there should be at least one available)
self.do_peer_assessment()
class PeerAssessmentTestStaffOverride(OpenAssessmentTest):
"""
Test setting a staff override on a problem which requires peer assessment.
"""
def setUp(self):
super(PeerAssessmentTestStaffOverride, self).setUp('peer_only', staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_staff_override(self):
"""
Scenario: staff can override a learner's grade
Given I am viewing a new peer assessment problem as a learner
And if I create a response to the problem
Then there is no Staff Grade section present
And if a staff member creates a grade override
Then when I refresh the page, I see that a staff override exists
And the message says that I must complete my steps to view the grade
And if I submit required peer assessments
Then the Staff Grade section is marked complete with no message
And I can see my final grade, even though no peers have assessed me
"""
# Create two students with a submission each so that there are 2 submissions to assess.
for _ in range(0, 2):
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Create a submission for the third student (used for the remainder of the test).
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Staff Grade field should not be visible yet.
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
# Submit a staff override.
self.do_staff_override(username, self.STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE)
# Refresh the page so the learner sees the Staff Grade section.
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, self.STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE)
# Verify no final grade yet.
self.assertIsNone(self.grade_page.wait_for_page().score)
# Assess two submissions
self.do_peer_assessment(count=2)
# Staff grade section is now marked complete, even though no students have submitted
# assessments for this particular student (no longer required since staff grade exists).
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
# Verify the staff override grade
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
class StudentTrainingTest(OpenAssessmentTest):
"""
Test student training (the "learning to assess" step).
"""
def setUp(self):
super(StudentTrainingTest, self).setUp('student_training')
@retry()
@attr('acceptance')
def test_student_training(self):
# Create a submission so we can get to student training
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
self.do_training()
@ddt.ddt
class StaffAreaTest(OpenAssessmentTest):
"""
Test the staff area.
This is testing a problem with "self assessment only".
"""
def setUp(self):
super(StaffAreaTest, self).setUp('self_only', staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_staff_area_buttons(self):
"""
Scenario: the staff area buttons should behave correctly
Given I am viewing the staff area of an ORA problem
Then none of the buttons should be active
When I click the "Manage Individual Learners" button
Then only the "Manage Individual Learners" button should be active
When I click the "View Assignment Statistics" button
Then only the "View Assignment Statistics" button should be active
When I click the "Staff Info" button again
Then none of the buttons should be active
"""
self.auto_auth_page.visit()
self.staff_area_page.visit()
self.assertEqual(self.staff_area_page.selected_button_names, [])
self.staff_area_page.click_staff_toolbar_button("staff-tools")
self.assertEqual(self.staff_area_page.selected_button_names, ["MANAGE INDIVIDUAL LEARNERS"])
self.staff_area_page.click_staff_toolbar_button("staff-info")
self.assertEqual(self.staff_area_page.selected_button_names, ["VIEW ASSIGNMENT STATISTICS"])
self.staff_area_page.click_staff_toolbar_button("staff-info")
self.assertEqual(self.staff_area_page.selected_button_names, [])
@retry()
@attr('acceptance')
def test_staff_area_panel(self):
"""
Scenario: the staff area panels should be shown correctly
Given I am viewing the staff area of an ORA problem
Then none of the panels should be shown
When I click a staff button
Then only the related panel should be shown
When I click the close button in the panel
Then none of the panels should be shown
"""
self.auto_auth_page.visit()
self.staff_area_page.visit()
# Verify that there is no selected panel initially
self.assertEqual(self.staff_area_page.selected_button_names, [])
self.assertEqual(self.staff_area_page.visible_staff_panels, [])
for panel_name, button_label in [
("staff-tools", "MANAGE INDIVIDUAL LEARNERS"),
("staff-info", "VIEW ASSIGNMENT STATISTICS"),
]:
# Click on the button and verify that the panel has opened
self.staff_area_page.click_staff_toolbar_button(panel_name)
self.assertEqual(self.staff_area_page.selected_button_names, [button_label])
visible_panels = self.staff_area_page.visible_staff_panels
self.assertEqual(1, len(visible_panels))
self.assertIn(u'openassessment__{button_name}'.format(button_name=panel_name), visible_panels[0])
# Click 'Close' and verify that the panel has been closed
self.staff_area_page.click_staff_panel_close_button(panel_name)
self.assertEqual(self.staff_area_page.selected_button_names, [])
self.assertEqual(self.staff_area_page.visible_staff_panels, [])
@retry()
@attr('acceptance')
def test_student_info(self):
"""
Scenario: staff tools shows learner response information
Given I am viewing the staff area of an ORA problem
When I search for a learner in staff tools
And the learner has submitted a response to an ORA problem with self-assessment
And I've made a staff override assessment of the learner
Then I see the correct learner information sections
"""
username = self.do_self_assessment()
self.do_staff_override(username)
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
self.assertEqual(
[u"Learner's Response", u"Learner's Self Assessment", u"Staff Assessment for This Learner",
u"Learner's Final Grade", u"Submit Assessment Grade Override", u"Remove Submission From Peer Grading"],
self.staff_area_page.learner_report_sections
)
self.assertNotIn('A response was not found for this learner', self.staff_area_page.learner_report_text)
@retry()
@attr('acceptance')
def test_student_info_no_submission(self):
"""
Scenario: staff tools indicates if no submission has been received for a given learner
Given I am viewing the staff area of an ORA problem
And I myself have submitted a response with self-assessment
When I search for a learner in staff tools
And the learner has not submitted a response to the ORA problem
Then I see a message indicating that the learner has not submitted a response
And there are no student information sections displayed
"""
self.auto_auth_page.visit()
# This is to catch a bug that existed when the user viewing staff tools had submitted an assessment,
# and had a grade stored (TNL-4060).
self.do_self_assessment()
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner('no-submission-learner')
self.staff_area_page.verify_learner_report_text('A response was not found for this learner.')
@retry()
@attr('acceptance')
def test_staff_override(self):
"""
Scenario: staff can override a learner's grade
Given I am viewing the staff area of an ORA problem
When I search for a learner in staff tools
And the learner has submitted a response to an ORA problem with self-assessment
Then I can submit a staff override of the self-assessment
And I see the updated final score
"""
username = self.do_self_assessment()
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
# Check the learner's current score.
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.EXPECTED_SCORE))
self.assertEquals(
['CRITERION', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Fair - 3 points', 'Good - 3 points'], self.staff_area_page.learner_final_score_table_values
)
# Do staff override and wait for final score to change.
self.staff_area_page.assess("staff-override", self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Verify that the new student score is different from the original one.
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Fair',
'Fair - 1 point', 'Good'],
self.staff_area_page.learner_final_score_table_values
)
@retry()
@attr('acceptance')
def test_cancel_submission(self):
"""
Scenario: staff can cancel a learner's submission
Given I am viewing the staff area of an ORA problem
When I search for a learner in staff tools
And the learner has submitted a response to an ORA problem with self-assessment
Then I can cancel the learner's submission
And I see an updated message indicating that the submission has been canceled.
"""
username = self.do_self_assessment()
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
# Check the learner's current score.
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.EXPECTED_SCORE))
# Cancel the student submission
self.staff_area_page.cancel_submission()
self.staff_area_page.verify_learner_final_score(
"The learner's submission has been removed from peer assessment. "
"The learner receives a grade of zero unless you delete the learner's state for the "
"problem to allow them to resubmit a response."
)
# Verify that the staff override and submission removal sections are now gone.
self.assertEqual(
[u"Learner's Response", u"Learner's Self Assessment", u"Learner's Final Grade"],
self.staff_area_page.learner_report_sections
)
# Verify that the Learner Response has been replaced with a message about the removal
self.staff_area_page.expand_learner_report_sections()
self.assertIn("Learner submission removed", self.staff_area_page.learner_response)
@retry()
@attr('acceptance')
def test_staff_grade_override(self):
"""
Scenario: the staff grade section displays correctly
Given I am viewing a new self assessment problem as a learner
Then there is no Staff Grade section present
And if I create a response to the problem
Then there is no Staff Grade section present
And if a staff member creates a grade override
Then when I refresh the page, I see that a staff override exists
And the message says that I must complete my steps to view the grade
And if I submit my self-assessment
Then the Staff Grade section is marked complete with no message
And I can see my final grade
"""
# View the problem-- no Staff Grade area.
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit()
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
self.submission_page.submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
# Submit a staff override
self.do_staff_override(username, self.STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE)
# Refresh the page so the learner sees the Staff Grade section.
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, self.STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE)
# Verify no final grade yet.
self.assertIsNone(self.grade_page.wait_for_page().score)
# Verify required staff grading section not available
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
self.assertFalse(self.staff_area_page.is_button_visible('staff-grading'))
# Learner does required self-assessment
self.self_asmnt_page.wait_for_page().wait_for_response()
self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
self.self_asmnt_page.assess("self", self.OPTIONS_SELECTED).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
# Verify the staff override grade
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
@retry()
@attr('acceptance')
def test_staff_grade_override_cancelled(self):
"""
Scenario: the staff grade section displays cancelled when the submission is cancelled
Given I have created a response and a self-assessment
And a staff member creates a grade override and then cancels my submission
Then when I refresh the page, the Staff Grade section is marked cancelled
And I have no final grade
"""
username = self.do_self_assessment()
# Submit a staff override
self.do_staff_override(username)
# And cancel the submission
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.cancel_submission()
# Refresh the page so the learner sees the Staff Grade section shows the submission has been cancelled.
self.refresh_page()
self._verify_staff_grade_section("CANCELLED", None)
self.assertIsNone(self.grade_page.wait_for_page().score)
class FileUploadTest(OpenAssessmentTest):
"""
Test file upload
"""
def setUp(self):
super(FileUploadTest, self).setUp('file_upload')
@retry()
@attr('acceptance')
def test_file_upload(self):
self.auto_auth_page.visit()
# trying to upload a unacceptable file
self.submission_page.visit()
# hide django debug tool, otherwise, it will cover the button on the right side,
# which will cause the button non-clickable and tests to fail
self.submission_page.hide_django_debug_tool()
self.submission_page.select_file(os.path.dirname(os.path.realpath(__file__)) + '/__init__.py')
self.assertTrue(self.submission_page.has_file_error)
# trying to upload a acceptable file
self.submission_page.visit().select_file(os.path.dirname(os.path.realpath(__file__)) + '/README.rst')
self.assertFalse(self.submission_page.has_file_error)
self.submission_page.upload_file()
self.assertTrue(self.submission_page.has_file_uploaded)
class FullWorkflowMixin(object):
"""
Mixin with helper methods and constants for testing a full workflow
(training, self assessment, peer assessment, staff override).
"""
PEER_ASSESSMENT = [0, 0]
STAFF_AREA_PEER_ASSESSMENT = ['Poor', u'', u'0', u'5', u'Poor', u'', u'0', u'3']
PEER_ASSESSMENT_SCORE = 0
PEER_ASSESSMENT_STAFF_AREA_SCORE = "Final grade: 0 out of 8"
SELF_ASSESSMENT = [2, 3]
STAFF_AREA_SELF_ASSESSMENT = ['Good', u'', u'5', u'5', u'Excellent', u'', u'3', u'3']
SUBMITTED_ASSESSMENT = [0, 3]
STAFF_AREA_SUBMITTED = ['Poor', u'', u'0', u'5', u'Excellent', u'', u'3', u'3']
def do_submission(self):
"""
Creates a user and submission.
Returns:
(str, str): the username and email of the newly created user
"""
auto_auth_page = AutoAuthPage(
self.browser, password=self.TEST_PASSWORD, course_id=self.TEST_COURSE_ID, staff=True
)
auto_auth_page.visit()
username_email = auto_auth_page.get_username_and_email()
self.submission_page.visit().submit_response(self.SUBMISSION)
return username_email
def do_submission_training_self_assessment(self):
"""
Creates a user and then does submission, training, and self assessment.
Returns:
(str, str): the username and password of the newly created user
"""
username, email = self.do_submission()
self.do_training()
self.submit_self_assessment(self.SELF_ASSESSMENT)
return username, email
def do_train_self_peer(self, peer_to_grade=True):
"""
Common functionality for executing training, self, and peer assessment steps.
Args:
peer_to_grade: boolean, defaults to True. Set to False to have learner complete their required steps,
but no peers to submit a grade for learner in return.
"""
# Create a learner with submission, training, and self assessment completed.
learner, learner_email = self.do_submission_training_self_assessment()
# Now create a second learner so that learner 1 has someone to assess.
# The second learner does all the steps as well (submission, training, self assessment, peer assessment).
self.do_submission_training_self_assessment()
if peer_to_grade:
self.do_peer_assessment(options=self.PEER_ASSESSMENT)
# Go back to the first learner to complete her workflow.
self.login_user(learner, learner_email)
# Learner 1 does peer assessment of learner 2 to complete workflow.
self.do_peer_assessment(options=self.SUBMITTED_ASSESSMENT)
# Continue grading by other students if necessary to ensure learner has a peer grade.
if peer_to_grade:
self.verify_submission_has_peer_grade(learner, learner_email)
return learner
def staff_assessment(self, peer_grades_me=True):
""" Do staff assessment workflow """
# Ensure grade is not present, since staff assessment has not been made
self.assertIsNone(self.grade_page.wait_for_page().score)
# Now do a staff assessment.
self.do_staff_assessment(options_selected=self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# As an add-on, let's make sure that both submissions (the learner's, and the additional one created
# in do_train_self_peer() above) were assessed using staff-grading's "submit and keep going"
self.assertEqual(0, self.staff_area_page.available_checked_out_numbers[0])
# At this point, the learner sees the score (1).
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
if peer_grades_me:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
else:
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'),
(u'PEER MEDIAN GRADE', u'Waiting for peer reviews')],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")],
])
def verify_staff_area_fields(self, username, peer_assessments, submitted_assessments, self_assessment):
"""
Verifies the expected entries in the staff area for peer assessments,
submitted assessments, and self assessment.
Args:
username (str): the username of the learner to check
peer_assessments: the expected fields in the peer assessment section
submitted_assessments: the expected fields in the submitted assessments section
self_assessment: the expected fields in the self assessment section
"""
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.assertEqual(peer_assessments, self.staff_area_page.status_text('peer__assessments'))
self.assertEqual(submitted_assessments, self.staff_area_page.status_text('submitted__assessments'))
self.assertEqual(self_assessment, self.staff_area_page.status_text('self__assessments'))
def verify_submission_has_peer_grade(self, learner, learner_email, max_attempts=5):
"""
If learner does not now have a score, it means that "extra" submissions are in the system,
and more need to be scored. Create additional learners and have them grade until learner has
a grade (stopping after specified max attempts).
Args:
learner: the learner whose grade will be checked
max_attempts: the maximum number of times an additional peer grading should be done
"""
def peer_grade_exists():
self.staff_area_page.visit()
self.staff_area_page.show_learner(learner)
return "Peer Assessments for This Learner" in self.staff_area_page.learner_report_sections
count = 0
while not peer_grade_exists() and count < max_attempts:
count += 1
self.do_submission_training_self_assessment()
self.do_peer_assessment(options=self.PEER_ASSESSMENT)
self.login_user(learner, learner_email)
self.assertTrue(
peer_grade_exists(),
"Learner still not graded after {} additional attempts".format(max_attempts)
)
def verify_grade_entries(self, expected_entries):
"""
Verify the grade entries (sources and values) as shown in the
"Your Grade" section.
Args:
expected_entries: array of expected entries, with each entry being an array
consisting of the data for a particular source. Note that order is important.
"""
for index, expected_entry in enumerate(expected_entries):
self.assertEqual(expected_entry[0], self.grade_page.grade_entry(0, index))
self.assertEqual(expected_entry[1], self.grade_page.grade_entry(1, index))
class MultipleOpenAssessmentMixin(FullWorkflowMixin):
"""
A Multiple ORA assessment mixin with helper methods and constants for testing a full workflow
(training, self assessment, peer assessment, staff override).
"""
def setup_vertical_index(self, vertical_index):
"""
Set the vertical index on the page.
Each problem has vertical index assigned and has a `vert-{vertical_index}` top level class.
Set up vertical index on the page so as to move to a different problem.
"""
self.submission_page.vertical_index = vertical_index
self.self_asmnt_page.vertical_index = vertical_index
self.peer_asmnt_page.vertical_index = vertical_index
self.student_training_page.vertical_index = vertical_index
self.staff_asmnt_page.vertical_index = vertical_index
self.grade_page.vertical_index = vertical_index
self.staff_area_page.vertical_index = vertical_index
def assess_component(self, vertical_index, peer_grades_me=True):
""" Assess the complete flow of an open assessment."""
self.setup_vertical_index(vertical_index)
self.do_train_self_peer(peer_grades_me)
self.staff_assessment(peer_grades_me)
class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Tests of complete workflows, combining multiple required steps together.
"""
def setUp(self):
super(FullWorkflowOverrideTest, self).setUp("full_workflow_staff_override", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_staff_override_at_end(self):
"""
Scenario: complete workflow with staff override at the very end
Given that I have created a submission, completed training, and done a self assessment
And a second learner has also created a submission, training, and self assessment
Then I can assess a learner
And when another learner assesses me
Then I see my score based on the peer assessment
And when a staff member overrides the score
Then I see the staff override score
And all fields in the staff area tool are correct
"""
learner = self.do_train_self_peer()
# At this point, the learner sees the peer assessment score (0).
self.assertEqual(self.PEER_ASSESSMENT_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(
learner, self.STAFF_AREA_PEER_ASSESSMENT, self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT
)
self.staff_area_page.verify_learner_final_score(self.PEER_ASSESSMENT_STAFF_AREA_SCORE)
self.assertEquals(
['CRITERION', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points\nPeer 1 - Poor', 'Good',
'Poor - 0 points\nPeer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor"), (u"PEER MEDIAN GRADE - 0 POINTS", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
# Now do a staff override, changing the score (to 1).
self.do_staff_override(learner)
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(
learner, self.STAFF_AREA_PEER_ASSESSMENT, self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT
)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Peer 1 - Poor', 'Good',
'Fair - 1 point', 'Peer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u"PEER MEDIAN GRADE", u"Poor"), (u"PEER MEDIAN GRADE", u"Poor")],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
@retry()
@attr('acceptance')
def test_staff_override_at_beginning(self):
"""
Scenario: complete workflow with staff override at the very beginning
Given that I have created a submission
Then I see no score yet
And when a staff member creates a grade override
Then I see that an override exists, but I cannot see the score
And when a second learner creates a submission
Then I can complete my required steps (training, self assessment, peer assesssment)
And I see my staff override score
And all fields in the staff area tool are correct
"""
# Create only the initial submission before doing the staff override.
learner, learner_email = self.do_submission()
# Verify no grade present (and no staff grade section), no assessment information in staff area.
self.assertIsNone(self.grade_page.wait_for_page().score)
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
self.verify_staff_area_fields(learner, [], [], [])
self.staff_area_page.verify_learner_final_score(self.STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE)
# Do staff override-- score still not shown due to steps not being complete.
self.do_staff_override(learner, self.STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE)
# Refresh the page so the learner sees the Staff Grade section.
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, self.STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE)
# Now create a second learner so that "learner" has someone to assess.
self.do_submission()
# Go back to the original learner to complete her workflow and view score.
self.login_user(learner, learner_email)
# Do training exercise and self assessment
self.student_training_page.visit()
self.do_training()
self.submit_self_assessment(self.SELF_ASSESSMENT)
# Verify staff grade still not available, as learner has not done peer assessment.
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, self.STAFF_OVERRIDE_LEARNER_STEPS_NOT_COMPLETE)
self.assertIsNone(self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(learner, [], [], self.STAFF_AREA_SELF_ASSESSMENT)
self.staff_area_page.verify_learner_final_score(self.STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE)
# Now do the final required step-- peer grading.
self.do_peer_assessment(options=self.SUBMITTED_ASSESSMENT)
# Grade is now visible to the learner (even though no student has graded the learner).
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS, None)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(learner, [], self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Waiting for peer reviews', 'Good',
'Fair - 1 point', 'Waiting for peer reviews', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries([
[(u"STAFF GRADE - 0 POINTS", u"Poor"), (u"STAFF GRADE - 1 POINT", u"Fair")],
[(u'PEER MEDIAN GRADE', u'Waiting for peer reviews'), (u'PEER MEDIAN GRADE', u'Waiting for peer reviews')],
[(u"YOUR SELF ASSESSMENT", u"Good"), (u"YOUR SELF ASSESSMENT", u"Excellent")]
])
@ddt.ddt
class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Tests of complete workflows, combining multiple required steps together.
"""
def setUp(self):
super(FullWorkflowRequiredTest, self).setUp("full_workflow_staff_required", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
@ddt.data(True, False)
def test_train_self_peer_staff(self, peer_grades_me):
"""
Scenario: complete workflow that included staff required step.
Given that I have created a submission, completed training, and done a self assessment
And a second learner has also created a submission, training, and self assessment
Then I can assess a learner
And when another learner assesses me
And a staff member submits a score
Then I see the staff score
And all fields in the staff area tool are correct
"""
# Using ddt booleans to confirm behavior independent of whether I receive a peer score or not
self.do_train_self_peer(peer_grades_me)
# Do staff assessment step
self.staff_assessment(peer_grades_me)
@ddt.ddt
class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Test for a problem that containing a criterion that only accepts feedback. Will make and verify self and staff assessments.
"""
def setUp(self):
super(FeedbackOnlyTest, self).setUp("feedback_only", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
def generate_feedback(self, assessment_type, feedback_type):
return "{}: {} feedback".format(assessment_type, feedback_type)
def assess_feedback(self, self_or_peer=""):
if self_or_peer != "self" and self_or_peer != "peer":
raise AssertionError("assert_feedback only works for self or peer assessments")
page = self.self_asmnt_page if self_or_peer == "self" else self.peer_asmnt_page
page.wait_for_page()
page.submit_assessment()
@retry()
@attr('acceptance')
def test_feedback_only(self):
# Make submission
user, pwd = self.do_submission()
# Make self assessment
self.self_asmnt_page.visit()
self.self_asmnt_page.wait_for_page()
self.self_asmnt_page.provide_criterion_feedback(self.generate_feedback("self", "criterion"))
self.self_asmnt_page.provide_overall_feedback(self.generate_feedback("self", "overall"))
self.self_asmnt_page.assess("self", [0])
self.self_asmnt_page.wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
# Staff assess all available submissions
self.do_staff_assessment(
options_selected = [0], # Select the 0-th option (Yes) on the single scored criterion
feedback=lambda feedback_type: self.generate_feedback("staff", feedback_type)
)
# Verify student-viewable grade report
self.refresh_page()
self.grade_page.wait_for_page()
self.assertEqual(self.grade_page.grade_entry(0, 0), (u'STAFF GRADE - 1 POINT', u'Yes')) # Reported answer 1
self.assertEqual(self.grade_page.grade_entry(0, 1), (u'YOUR SELF ASSESSMENT', u'Yes')) # Reported answer 2
for i, assessment_type in enumerate(["staff", "self"]):
# Criterion feedback first
expected = self.generate_feedback(assessment_type, "criterion")
actual = self.grade_page.feedback_entry(1, i)
self.assertEqual(actual, expected) # Reported answers 3 and 4
# Then overall
expected = self.generate_feedback(assessment_type, "overall")
actual = self.grade_page.feedback_entry("feedback", i)
self.assertEqual(actual, expected) # Reported answers 5 and 6
# Verify that no reported answers other than the 6 we already verified are present
self.assertEqual(self.grade_page.total_reported_answers, 6)
# Verify that the feedback-only criterion has no score
self.assertEqual(self.grade_page.number_scored_criteria, 1)
# Verify feedback appears from all assessments in staff tools
self.staff_area_page.show_learner(user)
self.staff_area_page.expand_learner_report_sections()
self.assertEqual(
self.staff_area_page.learner_final_score_table_headers,
[u'CRITERION', u'STAFF GRADE', u'SELF ASSESSMENT GRADE']
)
self.assertEqual(
self.staff_area_page.learner_final_score_table_values,
[u'Yes - 1 point', u'Yes', u'Feedback Recorded', u'Feedback Recorded']
)
self.assertEqual(
self.staff_area_page.status_text('staff__assessments')[5],
self.generate_feedback("staff", "criterion")
)
self.assertEqual(
self.staff_area_page.overall_feedback('staff__assessments'),
self.generate_feedback("staff", "overall")
)
self.assertEqual(
self.staff_area_page.status_text('self__assessments')[5],
self.generate_feedback("self", "criterion")
)
self.assertEqual(
self.staff_area_page.overall_feedback('self__assessments'),
self.generate_feedback("self", "overall")
)
# Verify correct score is shown
self.staff_area_page.verify_learner_final_score("Final grade: 1 out of 1")
class MultipleOpenAssessmentTest(OpenAssessmentTest, MultipleOpenAssessmentMixin):
"""
Test the multiple peer-assessment flow.
"""
def setUp(self):
super(MultipleOpenAssessmentTest, self).setUp('multiple_ora')
# Staff area page is not present in OpenAssessmentTest base class, so we are adding it here.
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_multiple_ora_complete_flow(self):
"""
Scenario: complete workflow on a unit containing multiple ORA blocks.
"""
# Each problem has vertical index assigned and has a `vert-{vertical_index}` top level class.
# That also means that all pages are being differentiated by their vertical index number that is assigned to
# each problem type. We are passing vertical index number and setting it by `self.setup_vertical_index` method
# so as to move to a different problem.
# Assess first ORA problem, pass the vertical index number
self.assess_component(0)
# Assess second ORA problem, pass the vertical index number
self.assess_component(1)
if __name__ == "__main__":
# Configure the screenshot directory
if 'SCREENSHOT_DIR' not in os.environ:
tests_dir = os.path.dirname(__file__)
os.environ['SCREENSHOT_DIR'] = os.path.join(tests_dir, 'screenshots')
unittest.main()
|
nttks/edx-ora2
|
test/acceptance/tests.py
|
Python
|
agpl-3.0
| 55,483
|
[
"VisIt"
] |
ccd8aa80d00e8a9b3effcdee2cb0fa7d2bf2e478865b541653782fb5c24fe0e9
|
#!/usr/bin/env python
"""Transport exersice
This file should do the same as pt_h2_lcao.py, but extracts the Hamiltonians
manually instead of using gpawtransport, which currently does not work
"""
from ase import Atoms
from gpaw import GPAW, Mixer, FermiDirac
from gpaw.lcao.tools import remove_pbc, get_lcao_hamiltonian, get_lead_lcao_hamiltonian
import cPickle as pickle
a = 2.41 # Pt binding lenght
b = 0.90 # H2 binding lenght
c = 1.70 # Pt-H binding lenght
L = 7.00 # width of unit cell
#####################
# Scattering region #
#####################
# Setup the Atoms for the scattering region.
atoms = Atoms('Pt5H2Pt5', pbc=(1, 0, 0), cell=[9 * a + b + 2 * c, L, L])
atoms.positions[:5, 0] = [i * a for i in range(5)]
atoms.positions[-5:, 0] = [i * a + b + 2 * c for i in range(4, 9)]
atoms.positions[5:7, 0] = [4 * a + c, 4 * a + c + b]
atoms.positions[:, 1:] = L / 2.
# Attach a GPAW calculator
calc = GPAW(h=0.3,
xc='PBE',
basis='szp(dzp)',
occupations=FermiDirac(width=0.1),
kpts=(1, 1, 1),
mode='lcao',
txt='pt_h2_lcao_scat.txt',
mixer=Mixer(0.1, 5, weight=100.0),
usesymm=None)
atoms.set_calculator(calc)
atoms.get_potential_energy() # Converge everything!
Ef = atoms.calc.get_fermi_level()
H_skMM, S_kMM = get_lcao_hamiltonian(calc)
# Only use first kpt, spin, as there are no more
H, S = H_skMM[0, 0], S_kMM[0]
H -= Ef * S
remove_pbc(atoms, H, S, 0)
# Dump the Hamiltonian and Scattering matrix to a pickle file
pickle.dump((H, S), open('scat_hs.pickle', 'wb'), 2)
########################
# Left principal layer #
########################
# Use four Pt atoms in the lead, so only take those from before
atoms = atoms[:4].copy()
atoms.set_cell([4 * a, L, L])
# Attach a GPAW calculator
calc = GPAW(h=0.3,
xc='PBE',
basis='szp(dzp)',
occupations=FermiDirac(width=0.1),
kpts=(4, 1, 1), # More kpts needed as the x-direction is shorter
mode='lcao',
txt='pt_h2_lcao_llead.txt',
mixer=Mixer(0.1, 5, weight=100.0),
usesymm=None)
atoms.set_calculator(calc)
atoms.get_potential_energy() # Converge everything!
Ef = atoms.calc.get_fermi_level()
ibz2d_k, weight2d_k, H_skMM, S_kMM = get_lead_lcao_hamiltonian(calc)
# Only use first kpt, spin, as there are no more
H, S = H_skMM[0, 0], S_kMM[0]
H -= Ef * S
# Dump the Hamiltonian and Scattering matrix to a pickle file
pickle.dump((H, S), open('lead1_hs.pickle', 'wb'), 2)
#########################
# Right principal layer #
#########################
# This is identical to the left prinicpal layer so we don't have to do anything
# Just dump the same Hamiltonian and Scattering matrix to a pickle file
pickle.dump((H, S), open('lead2_hs.pickle', 'wb'), 2)
|
robwarm/gpaw-symm
|
doc/exercises/transport/pt_h2_lcao_manual.py
|
Python
|
gpl-3.0
| 2,820
|
[
"ASE",
"GPAW"
] |
1eacbadfe137e6b0c346ccf5deef95d8ad06f9fa0313ae0ecfcb0d362f30ce0e
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from .mediaitem import MediaMediaItem
from .mediatab import MediaTab
__all__ = ['MediaMediaItem']
|
marmyshev/item_title
|
openlp/plugins/media/lib/__init__.py
|
Python
|
gpl-2.0
| 2,206
|
[
"Brian"
] |
df28e9d2bddbe49adc8876b2fd708bab39ec8ab1d72f97b2d0eeca02c830ea63
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from xml.dom import minidom
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
from nova.tests import utils as tests_utils
class SelectorTest(test.TestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertEqual(elem.text, None)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertEqual(elem.text, None)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertEqual(elem.text, None)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(MasterTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertNotEqual(MasterTemplateBuilder._tmpl, None)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(SlaveTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertNotEqual(SlaveTemplateBuilder._tmpl, None)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
def test_safe_parse_xml(self):
normal_body = ("""
<?xml version="1.0" ?><foo>
<bar>
<v1>hey</v1>
<v2>there</v2>
</bar>
</foo>""").strip()
dom = xmlutil.safe_minidom_parse_string(normal_body)
self.assertEqual(normal_body, str(dom.toxml()))
self.assertRaises(exception.MalformedRequestBody,
xmlutil.safe_minidom_parse_string,
tests_utils.killer_xml_body())
class SafeParserTestCase(test.TestCase):
def test_external_dtd(self):
xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head/>
<body>html with dtd</body>
</html>""")
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_external_file(self):
xml_string = """<!DOCTYPE external [
<!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
]>
<root>ⅇ</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_notation(self):
xml_string = """<?xml version="1.0" standalone="no"?>
<!-- comment data -->
<!DOCTYPE x [
<!NOTATION notation SYSTEM "notation.jpeg">
]>
<root attr1="value1">
</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
|
jessicalucci/NovaOrc
|
nova/tests/api/openstack/test_xmlutil.py
|
Python
|
apache-2.0
| 28,151
|
[
"VisIt"
] |
2f987b325d60b493703022b09b186d71338b97068060989d1081a20db9e0a555
|
"""
Non-linearity I: Detection Chain
================================
This simple script can be used to study the error in the non-linearity correction that can be tolerated given the
requirements.
The following requirements related to the non-linearity have been taken from GDPRD.
R-GDP-CAL-058: The contribution of the residuals of the non-linearity correction on the error on the determination
of each ellipticity component of the local PSF shall not exceed 3x10**-5 (one sigma).
R-GDP-CAL-068: The contribution of the residuals of the non-linearity correction on the error on the relative
error sigma(R**2)/R**2 on the determination of the local PSF R**2 shall not exceed 1x10**-4 (one sigma).
:requires: PyFITS
:requires: NumPy
:requires: SciPy
:requires: matplotlib
:requires: VISsim-Python
:version: 0.97
:author: Sami-Matias Niemi
:contact: s.niemi@ucl.ac.uk
"""
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import matplotlib.pyplot as plt
import pyfits as pf
import numpy as np
import datetime, cPickle, os
from analysis import shape
from scipy import interpolate
from support import logger as lg
from support import files as fileIO
from support import VISinstrumentModel
def testNonlinearity(log, file='data/psf12x.fits', oversample=12.0, sigma=0.75, phs=0.98,
phases=None, psfs=5000, amps=12, multiplier=1.5, minerror=-5., maxerror=-1,
linspace=False):
"""
Function to study the error in the non-linearity correction on the knowledge of the PSF ellipticity and size.
The error has been assumed to follow a sinusoidal curve with random phase and a given number of angular
frequencies (defined by the multiplier). The amplitudes being studied, i.e. the size of the maximum deviation,
can be spaced either linearly or logarithmically.
:param log: logger instance
:type log: instance
:param file: name of the PSF FITS files to use [default=data/psf12x.fits]
:type file: str
:param oversample: the PSF oversampling factor, which needs to match the input file [default=12]
:type ovesample: float
:param sigma: 1sigma radius of the Gaussian weighting function for shape measurements
:type sigma: float
:param phs: phase in case phases = None
:type phs: float
:param phases: if None then a single fixed phase will be applied, if an int then a given number of random
phases will be used
:type phases: None or int
:param psfs: the number of PSFs to use.
:type psfs: int
:param amps: the number of individual samplings used when covering the error space
:type amps: int
:param multiplier: the number of angular frequencies to be used
:type multiplier: int or float
:param minerror: the minimum error to be covered, given in log10(min_error) [default=-5 i.e. 0.001%]
:type minerror: float
:param maxerror: the maximum error to be covered, given in log10(max_error) [default=-1 i.e. 10%]
:type maxerror: float
:param linspace: whether the amplitudes of the error curves should be linearly or logarithmically spaced.
:type linspace: boolean
:return: reference value and results dictionaries
:rtype: list
"""
#read in PSF and renormalize it to norm
data = pf.getdata(file)
data /= np.max(data)
#derive reference values from clean PSF
settings = dict(sampling=1.0/oversample, sigma=sigma)
sh = shape.shapeMeasurement(data.copy()*1e5, log, **settings)
reference = sh.measureRefinedEllipticity()
#PSF scales
scales = np.random.random_integers(2e2, 2e5, psfs)
#range of amplitude to study
if linspace:
amplitudes = np.linspace(10**minerror, 1**maxerror, amps)[::-1] #flip so that the largest is first
else:
amplitudes = np.logspace(minerror, maxerror, amps)[::-1]
out = {}
#loop over all the amplitudes to be studied
for i, amp in enumerate(amplitudes):
print'Run %i / %i with amplitude=%e' % (i+1, amps, amp)
de1 = []
de2 = []
de = []
R2 = []
dR2 = []
e1 = []
e2 = []
e = []
if phases is None:
ph = (phs,)
else:
#random phases to Monte Carlo
ph = np.random.random(phases)
for phase in ph:
print 'Phase: %.3f' % phase
for scale in scales:
#apply nonlinearity model to the scaled PSF
scaled = data.copy() * scale
newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(scaled, amp, phase=phase, multi=multiplier)
newdata[newdata < 0.] = 0.
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(newdata.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
#save values
e1.append(results['e1'])
e2.append(results['e2'])
e.append(results['ellipticity'])
R2.append(results['R2'])
de1.append(results['e1'] - reference['e1'])
de2.append(results['e2'] - reference['e2'])
de.append(results['ellipticity'] - reference['ellipticity'])
dR2.append(results['R2'] - reference['R2'])
out[amp] = [e1, e2, e, R2, de1, de2, de, dR2]
return reference, out
def plotResults(results, reqe=3e-5, reqr2=1e-4, outdir='results', timeStamp=False):
"""
Creates a simple plot to combine and show the results.
:param res: results to be plotted [reference values, results dictionary]
:type res: list
:param reqe: the requirement for ellipticity [default=3e-5]
:type reqe: float
:param reqr2: the requirement for size R2 [default=1e-4]
:type reqr2: float
:param outdir: output directory to which the plots will be saved to
:type outdir: str
:return: None
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
ref = results[0]
res = results[1]
print '\nSigma results:'
txt = '%s' % datetime.datetime.isoformat(datetime.datetime.now())
fig = plt.figure()
plt.title(r'VIS Non-linearity: $\sigma(e)$')
ax = fig.add_subplot(111)
keys = res.keys()
keys.sort()
vals = []
for key in keys:
e1 = np.asarray(res[key][0])
e2 = np.asarray(res[key][1])
e = np.asarray(res[key][2])
std1 = np.std(e1)
std2 = np.std(e2)
std = np.std(e)
vals.append(std)
ax.scatter(key*0.9, std, c='m', marker='*')
ax.scatter(key, std1, c='b', marker='o')
ax.scatter(key, std2, c='y', marker='s')
print key, std, std1, std2
#find the crossing
ks = np.asarray(keys)
values = np.asarray(vals)
f = interpolate.interp1d(ks, values, kind='cubic')
x = np.logspace(np.log10(ks.min()*1.05), np.log10(ks.max()*0.95), 1000)
vals = f(x)
ax.plot(x, vals, '--', c='0.2', zorder=10)
msk = vals < reqe
maxn = np.max(x[msk])
plt.text(1e-3, 2e-5, r'Error for $e$ must be $\leq %.2e$ per cent' % (maxn * 100),
fontsize=11, ha='center', va='center')
#label
ax.scatter(key, std, c='m', marker='*', label=r'$\sigma (e)$')
ax.scatter(key*1.1, std1, c='b', marker='o', label=r'$\sigma (e_{1})$')
ax.scatter(key, std2, c='y', marker='s', label=r'$\sigma (e_{2})$')
ax.axhline(y=reqe, c='g', ls='--', label='Requirement')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(1e-6, 1e-3)
ax.set_xlabel('Error in the Non-linearity Correction')
ax.set_ylabel(r'$\sigma (e_{i})\ , \ \ \ i \in [1,2]$')
xmin, xmax = ax.get_xlim()
ax.fill_between(np.linspace(xmin, xmax, 10), np.ones(10)*reqe, 1.0, facecolor='red', alpha=0.08)
plt.text(xmin*1.05, 0.9*reqe, '%.1e' % reqe, ha='left', va='top', fontsize=11)
ax.set_xlim(xmin, xmax)
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=2.0, ncol=2, loc='upper left')
plt.savefig(outdir+'/NonLinCalibrationsigmaE.pdf')
plt.close()
#same for R2s
fig = plt.figure()
plt.title(r'VIS Non-linearity Calibration: $\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
ax = fig.add_subplot(111)
ax.axhline(y=0, c='k', ls=':')
#loop over
keys = res.keys()
keys.sort()
vals = []
for key in keys:
dR2 = np.asarray(res[key][3])
std = np.std(dR2) / ref['R2']
vals.append(std)
print key, std
ax.scatter(key, std, c='b', marker='s', s=35, zorder=10)
#find the crossing
ks = np.asarray(keys)
values = np.asarray(vals)
f = interpolate.interp1d(ks, values, kind='cubic')
x = np.logspace(np.log10(ks.min()*1.05), np.log10(ks.max()*0.95), 1000)
vals = f(x)
ax.plot(x, vals, '--', c='0.2', zorder=10)
msk = vals < reqr2
maxn = np.max(x[msk])
plt.text(1e-3, 7e-5, r'Error for $e$ must be $\leq %.2e$ per cent' % (maxn * 100),
fontsize=11, ha='center', va='center')
ax.scatter(key, std, c='b', marker='s', label=r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
ax.axhline(y=reqr2, c='g', ls='--', label='Requirement')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(6e-6, 4e-3)
ax.set_xlabel('Error in the Non-linearity Correction')
ax.set_ylabel(r'$\frac{\sigma (R^{2})}{R_{ref}^{2}}$')
ax.fill_between(np.linspace(xmin, xmax, 10), np.ones(10)*reqr2, 1.0, facecolor='red', alpha=0.08)
plt.text(xmin*1.05, 0.9*reqr2, '%.1e' % reqr2, ha='left', va='top', fontsize=11)
ax.set_xlim(xmin, xmax)
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8, loc='upper left')
plt.savefig(outdir+'/NonLinCalibrationSigmaR2.pdf')
plt.close()
print '\nDelta results:'
for i, key in enumerate(res):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title(r'VIS Non-linearity Correction (%f): $\delta e$' % key)
de1 = np.asarray(res[key][4])
de2 = np.asarray(res[key][5])
de = np.asarray(res[key][6])
avg1 = np.mean(de1) ** 2
avg2 = np.mean(de2) ** 2
avg = np.mean(de) ** 2
#write down the values
print i, key, avg, avg1, avg2
plt.text(0.08, 0.9, r'$\left< \delta e_{1} \right>^{2} = %e$' % avg1, fontsize=10, transform=ax.transAxes)
plt.text(0.08, 0.85, r'$\left< \delta e_{2}\right>^{2} = %e$' % avg2, fontsize=10, transform=ax.transAxes)
plt.text(0.08, 0.8, r'$\left< \delta | \bar{e} |\right>^{2} = %e$' % avg, fontsize=10, transform=ax.transAxes)
ax.hist(de, bins=15, color='y', alpha=0.2, label=r'$\delta | \bar{e} |$', normed=True, log=True)
ax.hist(de1, bins=15, color='b', alpha=0.5, label=r'$\delta e_{1}$', normed=True, log=True)
ax.hist(de2, bins=15, color='g', alpha=0.3, label=r'$\delta e_{2}$', normed=True, log=True)
ax.axvline(x=0, ls=':', c='k')
ax.set_ylabel('Probability Density')
ax.set_xlabel(r'$\delta e_{i}\ , \ \ \ i \in [1,2]$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=2.0, ncol=2)
plt.savefig(outdir + '/NonlinearityEDelta%i.pdf' % i)
plt.close()
#same for R2s
for i, key in enumerate(res):
fig = plt.figure()
plt.title(r'VIS Non-linearity Correction (%f): $\frac{\delta R^{2}}{R_{ref}^{2}}$' % key)
ax = fig.add_subplot(111)
dR2 = np.asarray(res[key][7])
avg = np.mean(dR2 / ref['R2']) ** 2
ax.hist(dR2, bins=15, color='y', label=r'$\frac{\delta R^{2}}{R_{ref}^{2}}$', normed=True, log=True)
print i, key, avg
plt.text(0.1, 0.9, r'$\left<\frac{\delta R^{2}}{R^{2}_{ref}}\right>^{2} = %e$' % avg,
fontsize=10, transform=ax.transAxes)
ax.axvline(x=0, ls=':', c='k')
ax.set_ylabel('Probability Density')
ax.set_xlabel(r'$\frac{\delta R^{2}}{R_{ref}^{2}}$')
if timeStamp:
plt.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax.transAxes, alpha=0.2)
plt.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.8)
plt.savefig(outdir + '/NonlinearityDeltaSize%i.pdf' % i)
plt.close()
def testNonlinearityModel(file='data/psf12x.fits', oversample=12.0, sigma=0.75,
scale=2e5, amp=1e-3, phase=0.98, multiplier=1.5, outdir='.'):
#read in PSF and renormalize it to norm
data = pf.getdata(file)
data /= np.max(data)
data *= scale
#derive reference values from clean PSF
settings = dict(sampling=1.0 / oversample, sigma=sigma)
sh = shape.shapeMeasurement(data, log, **settings)
reference = sh.measureRefinedEllipticity()
print reference
#apply nonlinearity model to the scaled PSF
newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(data.copy(), amp, phase=phase, multi=multiplier)
newdata[newdata < 0.] = 0.
#measure e and R2 from the postage stamp image
sh = shape.shapeMeasurement(newdata.copy(), log, **settings)
results = sh.measureRefinedEllipticity()
print results
print reference['ellipticity'] - results['ellipticity'], reference['R2'] - results['R2']
fileIO.writeFITS(data, outdir + '/scaledPSF.fits', int=False)
fileIO.writeFITS(newdata, outdir + '/nonlinearData.fits', int=False)
fileIO.writeFITS(newdata / data, outdir + '/nonlinearRatio.fits', int=False)
def testGaussian():
from support import gaussians
log = lg.setUpLogger('delete.me')
data = gaussians.Gaussian2D(100, 100, 200, 200, 20, 20)['Gaussian']
data /= np.max(data)
data *= 2.e5
#measure shape
sh = shape.shapeMeasurement(data, log)
reference = sh.measureRefinedEllipticity()
print reference
#non-linearity shape
newdata = VISinstrumentModel.CCDnonLinearityModelSinusoidal(data, 0.2)
newdata[newdata < 0.] = 0.
sh = shape.shapeMeasurement(newdata, log)
nonlin = sh.measureRefinedEllipticity()
print nonlin
print reference['ellipticity'] - nonlin['ellipticity']
print reference['e1'] - nonlin['e1']
print reference['e2'] - nonlin['e2']
print reference['R2'] - nonlin['R2']
if __name__ == '__main__':
run = True
debug = True
plot = True
#different runs
runs = {'run1': dict(phase=0.0, multiplier=1.5),
'run2': dict(phase=0.5, multiplier=1.5),
'run3': dict(phase=1.0, multiplier=1.5),
'run4': dict(phase=0.98, multiplier=0.5),
'run5': dict(phase=0.98, multiplier=2.0),
'run6': dict(phase=0.98, multiplier=3.0),
'run7': dict(phase=0.98, multiplier=4.0)}
for key, value in runs.iteritems():
print key, value
if not os.path.exists(key):
os.makedirs(key)
#start a logger
log = lg.setUpLogger(key+'/nonlinearityCalibration.log')
log.info('Testing non-linearity calibration...')
log.info('Phase = %f' % value['phase'])
log.info('Multiplier = %f' % value['multiplier'])
if run:
if debug:
testNonlinearityModel(phase=value['phase'], outdir=key)
res = testNonlinearity(log, psfs=2000, file='data/psf1x.fits', oversample=1.0, phs=value['phase'])
else:
res = testNonlinearity(log)
fileIO.cPickleDumpDictionary(res, key+'/nonlinResults.pk')
if plot:
if not run:
res = cPickle.load(open(key+'/nonlinResults.pk'))
plotResults(res, outdir=key)
log.info('Run finished...\n\n\n')
|
sniemi/EuclidVisibleInstrument
|
analysis/nonlinearityCalibration.py
|
Python
|
bsd-2-clause
| 16,305
|
[
"Gaussian"
] |
4bc7ab8601d3accaa14616bbd2dbcb659ed15ede3af133208e6e8dbcad1859df
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A Flow is a container for Works, and works consist of tasks.
Flows are the final objects that can be dumped directly to a pickle file on disk
Flows are executed using abirun (abipy).
"""
from __future__ import unicode_literals, division, print_function
import os
import sys
import time
import collections
import warnings
import shutil
import copy
import tempfile
import numpy as np
from pprint import pprint
from six.moves import map, StringIO
from tabulate import tabulate
from pydispatch import dispatcher
from collections import OrderedDict
from monty.collections import as_set, dict2namedtuple
from monty.string import list_strings, is_string, make_banner
from monty.operator import operator_from_str
from monty.io import FileLock
from monty.pprint import draw_tree
from monty.termcolor import cprint, colored, cprint_map, get_terminal_size
from monty.inspect import find_top_pyfile
from monty.dev import deprecated
from monty.json import MSONable
from pymatgen.util.serialization import pmg_pickle_load, pmg_pickle_dump, pmg_serialize
from pymatgen.core.units import Memory
from pymatgen.util.io_utils import AtomicFile
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from . import wrappers
from .nodes import Status, Node, NodeError, NodeResults, Dependency, GarbageCollector, check_spectator
from .tasks import ScfTask, DdkTask, DdeTask, TaskManager, FixQueueCriticalError
from .utils import File, Directory, Editor
from .abiinspect import yaml_read_irred_perts
from .works import NodeContainer, Work, BandStructureWork, PhononWork, BecWork, G0W0Work, QptdmWork, DteWork
from .events import EventsParser # autodoc_event_handlers
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Flow",
"G0W0WithQptdmFlow",
"bandstructure_flow",
"g0w0_flow",
"phonon_flow",
]
class FlowResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
#JSON_SCHEMA["properties"] = {
# "queries": {"type": "string", "required": True},
#}
@classmethod
def from_node(cls, flow):
"""Initialize an instance from a Work instance."""
new = super(FlowResults, cls).from_node(flow)
# Will put all files found in outdir in GridFs
d = {os.path.basename(f): f for f in flow.outdir.list_filepaths()}
# Add the pickle file.
d["pickle"] = flow.pickle_file if flow.pickle_protocol != 0 else (flow.pickle_file, "t")
new.add_gridfs_files(**d)
return new
class FlowError(NodeError):
"""Base Exception for :class:`Node` methods"""
class Flow(Node, NodeContainer, MSONable):
"""
This object is a container of work. Its main task is managing the
possible inter-dependencies among the work and the creation of
dynamic workflows that are generated by callbacks registered by the user.
.. attributes::
creation_date: String with the creation_date
pickle_protocol: Protocol for Pickle database (default: -1 i.e. latest protocol)
Important methods for constructing flows:
.. methods::
register_work: register (add) a work to the flow
resister_task: register a work that contains only this task returns the work
allocate: propagate the workdir and manager of the flow to all the registered tasks
build:
build_and_pickle_dump:
"""
VERSION = "0.1"
PICKLE_FNAME = "__AbinitFlow__.pickle"
Error = FlowError
Results = FlowResults
@classmethod
def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=-1, task_class=ScfTask,
work_class=Work, remove=False):
"""
Construct a simple flow from a list of inputs. The flow contains a single Work with
tasks whose class is given by task_class.
.. warning::
Don't use this interface if you have dependencies among the tasks.
Args:
workdir: String specifying the directory where the works will be produced.
inputs: List of inputs.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
task_class: The class of the :class:`Task`.
work_class: The class of the :class:`Work`.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
if not isinstance(inputs, (list, tuple)): inputs = [inputs]
flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol, remove=remove)
work = work_class()
for inp in inputs:
work.register(inp, task_class=task_class)
flow.register_work(work)
return flow.allocate()
@classmethod
def as_flow(cls, obj):
"""Convert obj into a Flow. Accepts filepath, dict, or Flow object."""
if isinstance(obj, cls): return obj
if is_string(obj):
return cls.pickle_load(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s into a Flow" % type(obj))
def __init__(self, workdir, manager=None, pickle_protocol=-1, remove=False):
"""
Args:
workdir: String specifying the directory where the works will be produced.
if workdir is None, the initialization of the working directory
is performed by flow.allocate(workdir).
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
super(Flow, self).__init__()
if workdir is not None:
if remove and os.path.exists(workdir): shutil.rmtree(workdir)
self.set_workdir(workdir)
self.creation_date = time.asctime()
if manager is None: manager = TaskManager.from_user_config()
self.manager = manager.deepcopy()
# List of works.
self._works = []
self._waited = 0
# List of callbacks that must be executed when the dependencies reach S_OK
self._callbacks = []
# Install default list of handlers at the flow level.
# Users can override the default list by calling flow.install_event_handlers in the script.
# Example:
#
# # flow level (common case)
# flow.install_event_handlers(handlers=my_handlers)
#
# # task level (advanced mode)
# flow[0][0].install_event_handlers(handlers=my_handlers)
#
self.install_event_handlers()
self.pickle_protocol = int(pickle_protocol)
# ID used to access mongodb
self._mongo_id = None
# Save the location of the script used to generate the flow.
# This trick won't work if we are running with nosetests, py.test etc
pyfile = find_top_pyfile()
if "python" in pyfile or "ipython" in pyfile: pyfile = "<" + pyfile + ">"
self.set_pyfile(pyfile)
# TODO
# Signal slots: a dictionary with the list
# of callbacks indexed by node_id and SIGNAL_TYPE.
# When the node changes its status, it broadcast a signal.
# The flow is listening to all the nodes of the calculation
# [node_id][SIGNAL] = list_of_signal_handlers
#self._sig_slots = slots = {}
#for work in self:
# slots[work] = {s: [] for s in work.S_ALL}
#for task in self.iflat_tasks():
# slots[task] = {s: [] for s in work.S_ALL}
@pmg_serialize
def as_dict(self, **kwargs):
"""
JSON serialization, note that we only need to save
a string with the working directory since the object will be
reconstructed from the pickle file located in workdir
"""
return {"workdir": self.workdir}
# This is needed for fireworks.
to_dict = as_dict
@classmethod
def from_dict(cls, d, **kwargs):
"""Reconstruct the flow from the pickle file."""
return cls.pickle_load(d["workdir"], **kwargs)
@classmethod
def temporary_flow(cls, manager=None):
"""Return a Flow in a temporary directory. Useful for unit tests."""
return cls(workdir=tempfile.mkdtemp(), manager=manager)
def set_workdir(self, workdir, chroot=False):
"""
Set the working directory. Cannot be set more than once unless chroot is True
"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
# Directories with (input|output|temporary) data.
self.workdir = os.path.abspath(workdir)
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def reload(self):
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new
@classmethod
def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False):
"""
Loads the object from a pickle file and performs initial setup.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
spectator_mode: If True, the nodes of the flow are not connected by signals.
This option is usually used when we want to read a flow
in read-only mode and we want to avoid callbacks that can change the flow.
remove_lock:
True to remove the file lock if any (use it carefully).
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
if remove_lock and os.path.exists(filepath + ".lock"):
try:
os.remove(filepath + ".lock")
except:
pass
with FileLock(filepath):
with open(filepath, "rb") as fh:
flow = pmg_pickle_load(fh)
# Check if versions match.
if flow.VERSION != cls.VERSION:
msg = ("File flow version %s != latest version %s\n."
"Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION))
warnings.warn(msg)
flow.set_spectator_mode(spectator_mode)
# Recompute the status of each task since tasks that
# have been submitted previously might be completed.
flow.check_status()
return flow
@classmethod
def pickle_loads(cls, s):
"""Reconstruct the flow from a string."""
strio = StringIO()
strio.write(s)
strio.seek(0)
flow = pmg_pickle_load(strio)
return flow
def __len__(self):
return len(self.works)
def __iter__(self):
return self.works.__iter__()
def __getitem__(self, slice):
return self.works[slice]
def set_pyfile(self, pyfile):
"""
Set the path of the python script used to generate the flow.
.. Example:
flow.set_pyfile(__file__)
"""
# TODO: Could use a frame hack to get the caller outside abinit
# so that pyfile is automatically set when we __init__ it!
self._pyfile = os.path.abspath(pyfile)
@property
def pyfile(self):
"""
Absolute path of the python script used to generate the flow. Set by `set_pyfile`
"""
try:
return self._pyfile
except AttributeError:
return None
@property
def pid_file(self):
"""The path of the pid file created by PyFlowScheduler."""
return os.path.join(self.workdir, "_PyFlowScheduler.pid")
def check_pid_file(self):
"""
This function checks if we are already running the :class:`Flow` with a :class:`PyFlowScheduler`.
Raises: Flow.Error if the pif file of the scheduler exists.
"""
if not os.path.exists(self.pid_file):
return 0
self.show_status()
raise self.Error("""\n\
pid_file
%s
already exists. There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % self.pid_file)
@property
def pickle_file(self):
"""The path of the pickle file."""
return os.path.join(self.workdir, self.PICKLE_FNAME)
@property
def mongo_id(self):
return self._mongo_id
@mongo_id.setter
def mongo_id(self, value):
if self.mongo_id is not None:
raise RuntimeError("Cannot change mongo_id %s" % self.mongo_id)
self._mongo_id = value
def mongodb_upload(self, **kwargs):
from abiflows.core.scheduler import FlowUploader
FlowUploader().upload(self, **kwargs)
def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task)
if not work.get_results().validate_json_schema():
errors.append(work)
if not self.get_results().validate_json_schema():
errors.append(self)
return errors
def get_mongo_info(self):
"""
Return a JSON dictionary with information on the flow.
Mainly used for constructing the info section in `FlowEntry`.
The default implementation is empty. Subclasses must implement it
"""
return {}
def mongo_assimilate(self):
"""
This function is called by client code when the flow is completed
Return a JSON dictionary with the most important results produced
by the flow. The default implementation is empty. Subclasses must implement it
"""
return {}
@property
def works(self):
"""List of :class:`Work` objects contained in self.."""
return self._works
@property
def all_ok(self):
"""True if all the tasks in works have reached `S_OK`."""
return all(work.all_ok for work in self)
@property
def num_tasks(self):
"""Total number of tasks"""
return len(list(self.iflat_tasks()))
@property
def errored_tasks(self):
"""List of errored tasks."""
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks)
@property
def num_errored_tasks(self):
"""The number of tasks whose status is `S_ERROR`."""
return len(self.errored_tasks)
@property
def unconverged_tasks(self):
"""List of unconverged tasks."""
return list(self.iflat_tasks(status=self.S_UNCONVERGED))
@property
def num_unconverged_tasks(self):
"""The number of tasks whose status is `S_UNCONVERGED`."""
return len(self.unconverged_tasks)
@property
def status_counter(self):
"""
Returns a :class:`Counter` object that counts the number of tasks with
given status (use the string representation of the status as key).
"""
# Count the number of tasks with given status in each work.
counter = self[0].status_counter
for work in self[1:]:
counter += work.status_counter
return counter
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if the task is not running but
we have submitted the task to the queue manager.
"""
return sum(work.ncores_reserved for work in self)
@property
def ncores_allocated(self):
"""
Returns the number of cores allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(work.ncores_allocated for work in self)
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(work.ncores_used for work in self)
@property
def has_chrooted(self):
"""
Returns a string that evaluates to True if we have changed
the workdir for visualization purposes e.g. we are using sshfs.
to mount the remote directory where the `Flow` is located.
The string gives the previous workdir of the flow.
"""
try:
return self._chrooted_from
except AttributeError:
return ""
def chroot(self, new_workdir):
"""
Change the workir of the :class:`Flow`. Mainly used for
allowing the user to open the GUI on the local host
and access the flow from remote via sshfs.
.. note::
Calling this method will make the flow go in read-only mode.
"""
self._chrooted_from = self.workdir
self.set_workdir(new_workdir, chroot=True)
for i, work in enumerate(self):
new_wdir = os.path.join(self.workdir, "w" + str(i))
work.chroot(new_wdir)
def groupby_status(self):
"""
Returns a ordered dictionary mapping the task status to
the list of named tuples (task, work_index, task_index).
"""
Entry = collections.namedtuple("Entry", "task wi ti")
d = collections.defaultdict(list)
for task, wi, ti in self.iflat_tasks_wti():
d[task.status].append(Entry(task, wi, ti))
# Sort keys according to their status.
return OrderedDict([(k, d[k]) for k in sorted(list(d.keys()))])
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks
def iflat_nodes(self, status=None, op="==", nids=None):
"""
Generators that produces a flat sequence of nodes.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the nodes.
"""
nids = as_set(nids)
if status is None:
if not (nids and self.node_id not in nids):
yield self
for work in self:
if nids and work.node_id not in nids: continue
yield work
for task in work:
if nids and task.node_id not in nids: continue
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
if not (nids and self.node_id not in nids):
if op(self.status, status): yield self
for wi, work in enumerate(self):
if nids and work.node_id not in nids: continue
if op(work.status, status): yield work
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status): yield task
def node_from_nid(self, nid):
"""Return the node in the `Flow` with the given `nid` identifier"""
for node in self.iflat_nodes():
if node.node_id == nid: return node
raise ValueError("Cannot find node with node id: %s" % nid)
def iflat_tasks_wti(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the `Flow`.
Yields:
(task, work_index, task_index)
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True)
def iflat_tasks(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the :class:`Flow`.
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False)
def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
"""
Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task
"""
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task
def abivalidate_inputs(self):
"""
Run ABINIT in dry mode to validate all the inputs of the flow.
Return:
(isok, tuples)
isok is True if all inputs are ok.
tuples is List of `namedtuple` objects, one for each task in the flow.
Each namedtuple has the following attributes:
retcode: Return code. 0 if OK.
log_file: log file of the Abinit run, use log_file.read() to access its content.
stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content.
Raises:
`RuntimeError` if executable is not in $PATH.
"""
if not self.allocated:
self.build()
#self.build_and_pickle_dump()
isok, tuples = True, []
for task in self.iflat_tasks():
t = task.input.abivalidate()
if t.retcode != 0: isok = False
tuples.append(t)
return isok, tuples
def check_dependencies(self):
"""Test the dependencies of the nodes for possible deadlocks."""
deadlocks = []
for task in self.iflat_tasks():
for dep in task.deps:
if dep.node.depends_on(task):
deadlocks.append((task, dep.node))
if deadlocks:
lines = ["Detect wrong list of dependecies that will lead to a deadlock:"]
lines.extend(["%s <--> %s" % nodes for nodes in deadlocks])
raise RuntimeError("\n".join(lines))
def find_deadlocks(self):
"""
This function detects deadlocks
Return:
named tuple with the tasks grouped in: deadlocks, runnables, running
"""
# Find jobs that can be submitted and and the jobs that are already in the queue.
runnables = []
for work in self:
runnables.extend(work.fetch_alltasks_to_run())
runnables.extend(list(self.iflat_tasks(status=self.S_SUB)))
# Running jobs.
running = list(self.iflat_tasks(status=self.S_RUN))
# Find deadlocks.
err_tasks = self.errored_tasks
deadlocked = []
if err_tasks:
for task in self.iflat_tasks():
if any(task.depends_on(err_task) for err_task in err_tasks):
deadlocked.append(task)
return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running)
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs)
@property
def status(self):
"""The status of the :class:`Flow` i.e. the minimum of the status of its tasks and its works"""
return min(work.get_all_status(only_min=True) for work in self)
#def restart_unconverged_tasks(self, max_nlauch, excs):
# nlaunch = 0
# for task in self.unconverged_tasks:
# try:
# logger.info("Flow will try restart task %s" % task)
# fired = task.restart()
# if fired:
# nlaunch += 1
# max_nlaunch -= 1
# if max_nlaunch == 0:
# logger.info("Restart: too many jobs in the queue, returning")
# self.pickle_dump()
# return nlaunch, max_nlaunch
# except task.RestartError:
# excs.append(straceback())
# return nlaunch, max_nlaunch
def fix_abicritical(self):
"""
This function tries to fix critical events originating from ABINIT.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_ABICRITICAL):
count += task.fix_abicritical()
return count
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_QCRITICAL):
logger.info("Will try to fix task %s" % str(task))
try:
print(task.fix_queue_critical())
count += 1
except FixQueueCriticalError:
logger.info("Not able to fix task %s" % task)
return count
def show_info(self, **kwargs):
"""
Print info on the flow i.e. total number of tasks, works, tasks grouped by class.
Example:
Task Class Number
------------ --------
ScfTask 1
NscfTask 1
ScrTask 2
SigmaTask 6
"""
stream = kwargs.pop("stream", sys.stdout)
lines = [str(self)]
app = lines.append
app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks) )
app("Number of tasks with a given class:\n")
# Build Table
data = [[cls.__name__, len(tasks)]
for cls, tasks in self.groupby_task_class().items()]
app(str(tabulate(data, headers=["Task Class", "Number"])))
stream.write("\n".join(lines))
def show_summary(self, **kwargs):
"""
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True
"""
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n")
def show_status(self, **kwargs):
"""
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
"""
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
wlist = None
if wslice is not None:
# Convert range to list of work indices.
wlist = list(range(wslice.start, wslice.step, wslice.stop))
#has_colours = stream_has_colours(stream)
has_colours = True
red = "red" if has_colours else None
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream)
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
# FIXME: This should not be done here.
# get_event_report should be called only in check_status
# Parse the events in the main output.
report = task.get_event_report()
# Get time info (run-time or time in queue or None)
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (
report.num_warnings, report.num_comments)))
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qinfo = str(task.queue_id) + "@" + str(task.qname)
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo,
para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events,
para_info] + task_info)
# Print table and write colorized line with the total number of errors.
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream)
def show_events(self, status=None, nids=None):
"""
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout
Args:
status: if not None, only the tasks with this status are select
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
for task in self.iflat_tasks(status=status, nids=nids):
report = task.get_event_report()
if report:
print(make_banner(str(task), width=ncols, mark="="))
print(report)
#report = report.filter_types()
def show_corrections(self, status=None, nids=None):
"""
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found.
"""
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="))
for corr in task.corrections:
pprint(corr)
if not count: print("No correction found.")
return count
def show_history(self, status=None, nids=None, full_history=False, metadata=False):
"""
Print the history of the flow to stdout.
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental)
"""
nrows, ncols = get_terminal_size()
works_done = []
# Loop on the tasks and show the history of the work is not in works_done
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if work not in works_done:
works_done.append(work)
if work.history or full_history:
cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts)
print(work.history.to_string(metadata=metadata))
if task.history or full_history:
cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts)
print(task.history.to_string(metadata=metadata))
# Print the history of the flow.
if self.history or full_history:
cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts)
print(self.history.to_string(metadata=metadata))
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):
"""
Print the input of the tasks to the given stream.
Args:
varnames:
List of Abinit variables. If not None, only the variable in varnames
are selected and printed.
nids:
List of node identifiers. By defaults all nodes are shown
wslice:
Slice object used to select works.
stream:
File-like object, Default: sys.stdout
"""
if varnames is not None:
# Build dictionary varname --> [(task1, value), (task2, value), ...]
varnames = [s.strip() for s in list_strings(varnames)]
dlist = collections.defaultdict(list)
for task in self.select_tasks(nids=nids, wslice=wslice):
dstruct = task.input.structure.as_dict(fmt="abivars")
for vname in varnames:
value = task.input.get(vname, None)
if value is None: # maybe in structure?
value = dstruct.get(vname, None)
if value is not None:
dlist[vname].append((task, value))
for vname in varnames:
tv_list = dlist[vname]
if not tv_list:
stream.write("[%s]: Found 0 tasks with this variable\n" % vname)
else:
stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list)))
for i, (task, value) in enumerate(tv_list):
stream.write(" %s --> %s\n" % (str(value), task))
stream.write("\n")
else:
lines = []
for task in self.select_tasks(nids=nids, wslice=wslice):
s = task.make_input(with_header=True)
# Add info on dependencies.
if task.deps:
s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps)
else:
s += "\n\nDependencies: None"
lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n")
stream.writelines(lines)
def listext(self, ext, stream=sys.stdout):
"""
Print to the given `stream` a table with the list of the output files
with the given `ext` produced by the flow.
"""
nodes_files = []
for node in self.iflat_nodes():
filepath = node.outdir.has_abiext(ext)
if filepath:
nodes_files.append((node, File(filepath)))
if nodes_files:
print("Found %s files with extension %s produced by the flow" % (len(nodes_files), ext), file=stream)
table = [[f.relpath, "%.2f" % (f.get_stat().st_size / 1024**2),
node.node_id, node.__class__.__name__]
for node, f in nodes_files]
print(tabulate(table, headers=["File", "Size [Mb]", "Node_ID", "Node Class"]), file=stream)
else:
print("No output file with extension %s has been produced by the flow" % ext, file=stream)
def select_tasks(self, nids=None, wslice=None):
"""
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned.
"""
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
# All tasks selected if no option is provided.
tasks = list(self.iflat_tasks())
return tasks
def inspect(self, nids=None, wslice=None, **kwargs):
"""
Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, all tasks in self are inspected.
Returns:
List of `matplotlib` figures.
"""
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % task, color="blue")
else:
figs.append(fig)
else:
cprint("Task %s does not provide an inspect method" % task, color="blue")
return figs
def get_results(self, **kwargs):
results = self.Results.from_node(self)
results.update(self.get_dict_for_mongodb_queries())
return results
def get_dict_for_mongodb_queries(self):
"""
This function returns a dictionary with the attributes that will be
put in the mongodb document to facilitate the query.
Subclasses may want to replace or extend the default behaviour.
"""
d = {}
return d
# TODO
all_structures = [task.input.structure for task in self.iflat_tasks()]
all_pseudos = [task.input.pseudos for task in self.iflat_tasks()]
def look_before_you_leap(self):
"""
This method should be called before running the calculation to make
sure that the most important requirements are satisfied.
Return:
List of strings with inconsistencies/errors.
"""
errors = []
try:
self.check_dependencies()
except self.Error as exc:
errors.append(str(exc))
if self.has_db:
try:
self.manager.db_connector.get_collection()
except Exception as exc:
errors.append("""
ERROR while trying to connect to the MongoDB database:
Exception:
%s
Connector:
%s
""" % (exc, self.manager.db_connector))
return "\n".join(errors)
@property
def has_db(self):
"""True if flow uses `MongoDB` to store the results."""
return self.manager.has_db
def db_insert(self):
"""
Insert results in the `MongDB` database.
"""
assert self.has_db
# Connect to MongoDb and get the collection.
coll = self.manager.db_connector.get_collection()
print("Mongodb collection %s with count %d", coll, coll.count())
start = time.time()
for work in self:
for task in work:
results = task.get_results()
pprint(results)
results.update_collection(coll)
results = work.get_results()
pprint(results)
results.update_collection(coll)
print("MongoDb update done in %s [s]" % time.time() - start)
results = self.get_results()
pprint(results)
results.update_collection(coll)
# Update the pickle file to save the mongo ids.
self.pickle_dump()
for d in coll.find():
pprint(d)
def tasks_from_nids(self, nids):
"""
Return the list of tasks associated to the given list of node identifiers (nids).
.. note::
Invalid ids are ignored
"""
if not isinstance(nids, collections.Iterable): nids = [nids]
tasks = []
for nid in nids:
for task in self.iflat_tasks():
if task.node_id == nid:
tasks.append(task)
break
return tasks
def wti_from_nids(self, nids):
"""Return the list of (w, t) indices from the list of node identifiers nids."""
return [task.pos for task in self.tasks_from_nids(nids)]
def open_files(self, what="o", status=None, op="==", nids=None, editor=None):
"""
Open the files of the flow inside an editor (command line interface).
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
status: if not None, only the tasks with this status are select
op: status operator. Requires status. A task is selected
if task.status op status evaluates to true.
nids: optional list of node identifiers used to filter the tasks.
editor: Select the editor. None to use the default editor ($EDITOR shell env var)
"""
# Build list of files to analyze.
files = []
for task in self.iflat_tasks(status=status, op=op, nids=nids):
lst = task.select_files(what)
if lst:
files.extend(lst)
return Editor(editor=editor).edit_files(files)
def parse_timing(self, nids=None):
"""
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error.
"""
# Get the list of output files according to nids.
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
# Parse data.
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None
def show_abierrors(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
app(header)
report = task.get_event_report()
if report is not None:
app("num_errors: %s, num_warnings: %s, num_comments: %s" % (
report.num_errors, report.num_warnings, report.num_comments))
app("*** ERRORS ***")
app("\n".join(str(e) for e in report.errors))
app("*** BUGS ***")
app("\n".join(str(b) for b in report.bugs))
else:
app("get_envent_report returned None!")
app("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def show_qouts(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, "rt") as fh:
lines += fh.readlines()
else:
lines.append("File does not exist!")
lines.append("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def debug(self, status=None, nids=None):
"""
This method is usually used when the flow didn't completed succesfully
It analyzes the files produced the tasks to facilitate debugging.
Info are printed to stdout.
Args:
status: If not None, only the tasks with this status are selected
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
# Test for scheduler exceptions first.
sched_excfile = os.path.join(self.workdir, "_exceptions")
if os.path.exists(sched_excfile):
with open(sched_excfile, "r") as fh:
cprint("Found exceptions raised by the scheduler", "red")
cprint(fh.read(), color="red")
return
if status is not None:
tasks = list(self.iflat_tasks(status=status, nids=nids))
else:
errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))
qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))
abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))
tasks = errors + qcriticals + abicriticals
# For each task selected:
# 1) Check the error files of the task. If not empty, print the content to stdout and we are done.
# 2) If error files are empty, look at the master log file for possible errors
# 3) If also this check failes, scan all the process log files.
# TODO: This check is not needed if we introduce a new __abinit_error__ file
# that is created by the first MPI process that invokes MPI abort!
#
ntasks = 0
for task in tasks:
print(make_banner(str(task), width=ncols, mark="="))
ntasks += 1
# Start with error files.
for efname in ["qerr_file", "stderr_file",]:
err_file = getattr(task, efname)
if err_file.exists:
s = err_file.read()
if not s: continue
print(make_banner(str(err_file), width=ncols, mark="="))
cprint(s, color="red")
#count += 1
# Check main log file.
try:
report = task.get_event_report()
if report and report.num_errors:
print(make_banner(os.path.basename(report.filename), width=ncols, mark="="))
s = "\n".join(str(e) for e in report.errors)
else:
s = None
except Exception as exc:
s = str(exc)
count = 0 # count > 0 means we found some useful info that could explain the failures.
if s is not None:
cprint(s, color="red")
count += 1
if not count:
# Inspect all log files produced by the other nodes.
log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
if not log_files:
cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta")
for log_file in log_files:
try:
report = EventsParser().parse(log_file)
if report.errors:
print(report)
count += 1
break
except Exception as exc:
cprint(str(exc), color="red")
count += 1
break
if not count:
cprint("Houston, we could not find any error message that can explain the problem", color="magenta")
print("Number of tasks analyzed: %d" % ntasks)
def cancel(self, nids=None):
"""
Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error
"""
if self.has_chrooted:
# TODO: Use paramiko to kill the job?
warnings.warn("Cannot cancel the flow via sshfs!")
return -1
# If we are running with the scheduler, we must send a SIGKILL signal.
if os.path.exists(self.pid_file):
cprint("Found scheduler attached to this flow.", "yellow")
cprint("Sending SIGKILL to the scheduler before cancelling the tasks!", "yellow")
with open(self.pid_file, "r") as fh:
pid = int(fh.readline())
retcode = os.system("kill -9 %d" % pid)
self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode)
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.manager.qadapter.get_njobs_in_queue(username=username)
def rmtree(self, ignore_errors=False, onerror=None):
"""Remove workdir (same API as shutil.rmtree)."""
if not os.path.exists(self.workdir): return
shutil.rmtree(self.workdir, ignore_errors=ignore_errors, onerror=onerror)
def rm_and_build(self):
"""Remove the workdir and rebuild the flow."""
self.rmtree()
self.build()
def build(self, *args, **kwargs):
"""Make directories and files of the `Flow`."""
# Allocate here if not done yet!
if not self.allocated: self.allocate()
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Check the nodeid file in workdir
nodeid_path = os.path.join(self.workdir, ".nodeid")
if os.path.exists(nodeid_path):
with open(nodeid_path, "rt") as fh:
node_id = int(fh.read())
if self.node_id != node_id:
msg = ("\nFound node_id %s in file:\n\n %s\n\nwhile the node_id of the present flow is %d.\n"
"This means that you are trying to build a new flow in a directory already used by another flow.\n"
"Possible solutions:\n"
" 1) Change the workdir of the new flow.\n"
" 2) remove the old directory either with `rm -rf` or by calling the method flow.rmtree()\n"
% (node_id, nodeid_path, self.node_id))
raise RuntimeError(msg)
else:
with open(nodeid_path, "wt") as fh:
fh.write(str(self.node_id))
for work in self:
work.build(*args, **kwargs)
def build_and_pickle_dump(self, abivalidate=False):
"""
Build dirs and file of the `Flow` and save the object in pickle format.
Returns 0 if success
Args:
abivalidate: If True, all the input files are validate by calling
the abinit parser. If the validation fails, ValueError is raise.
"""
self.build()
if not abivalidate: return self.pickle_dump()
# Validation with Abinit.
isok, errors = self.abivalidate_inputs()
if isok: return self.pickle_dump()
errlines = []
for i, e in enumerate(errors):
errlines.append("[%d] %s" % (i, e))
raise ValueError("\n".join(errlines))
@check_spectator
def pickle_dump(self):
"""
Save the status of the object in pickle format.
Returns 0 if success
"""
if self.has_chrooted:
warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted)
return -1
#if self.in_spectator_mode:
# warnings.warn("Cannot pickle_dump since flow is in_spectator_mode")
# return -2
protocol = self.pickle_protocol
# Atomic transaction with FileLock.
with FileLock(self.pickle_file):
with AtomicFile(self.pickle_file, mode="wb") as fh:
pmg_pickle_dump(self, fh, protocol=protocol)
return 0
def pickle_dumps(self, protocol=None):
"""
Return a string with the pickle representation.
`protocol` selects the pickle protocol. self.pickle_protocol is
used if `protocol` is None
"""
strio = StringIO()
pmg_pickle_dump(self, strio,
protocol=self.pickle_protocol if protocol is None
else protocol)
return strio.getvalue()
def register_task(self, input, deps=None, manager=None, task_class=None):
"""
Utility function that generates a `Work` made of a single task
Args:
input: :class:`AbinitInput`
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the :class:`TaskManager` specified during the creation of the work.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
The generated :class:`Work` for the task, work[0] is the actual task.
"""
work = Work(manager=manager)
task = work.register(input, deps=deps, task_class=task_class)
self.register_work(work)
return work
def register_work(self, work, deps=None, manager=None, workdir=None):
"""
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies.
Args:
work: :class:`Work` object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the :class:`Work`.
Returns:
The registered :class:`Work`.
"""
if getattr(self, "workdir", None) is not None:
# The flow has a directory, build the named of the directory of the work.
work_workdir = None
if workdir is None:
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
else:
work_workdir = os.path.join(self.workdir, os.path.basename(workdir))
work.set_workdir(work_workdir)
if manager is not None:
work.set_manager(manager)
self.works.append(work)
if deps:
deps = [Dependency(node, exts) for node, exts in deps.items()]
work.add_deps(deps)
return work
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None):
"""
Registers a callback function that will generate the :class:`Task` of the :class:`Work`.
Args:
cbk_name: Name of the callback function (must be a bound method of self)
cbk_data: Additional data passed to the callback function.
deps: List of :class:`Dependency` objects specifying the dependency of the work.
work_class: :class:`Work` class to instantiate.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`.
Returns:
The :class:`Work` that will be finalized by the callback.
"""
# TODO: pass a Work factory instead of a class
# Directory of the Work.
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
# Create an empty work and register the callback
work = work_class(workdir=work_workdir, manager=manager)
self._works.append(work)
deps = [Dependency(node, exts) for node, exts in deps.items()]
if not deps:
raise ValueError("A callback must have deps!")
work.add_deps(deps)
# Wrap the callable in a Callback object and save
# useful info such as the index of the work and the callback data.
cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data)
self._callbacks.append(cbk)
return work
@property
def allocated(self):
"""Numer of allocations. Set by `allocate`."""
try:
return self._allocated
except AttributeError:
return 0
def allocate(self, workdir=None):
"""
Allocate the `Flow` i.e. assign the `workdir` and (optionally)
the :class:`TaskManager` to the different tasks in the Flow.
Args:
workdir: Working directory of the flow. Must be specified here
if we haven't initialized the workdir in the __init__.
Return:
self
"""
if workdir is not None:
# We set the workdir of the flow here
self.set_workdir(workdir)
for i, work in enumerate(self):
work.set_workdir(os.path.join(self.workdir, "w" + str(i)))
if not hasattr(self, "workdir"):
raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__")
for work in self:
# Each work has a reference to its flow.
work.allocate(manager=self.manager)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
self.check_dependencies()
if not hasattr(self, "_allocated"): self._allocated = 0
self._allocated += 1
return self
def use_smartio(self):
"""
This function should be called when the entire `Flow` has been built.
It tries to reduce the pressure on the hard disk by using Abinit smart-io
capabilities for those files that are not needed by other nodes.
Smart-io means that big files (e.g. WFK) are written only if the calculation
is unconverged so that we can restart from it. No output is produced if
convergence is achieved.
Return:
self
"""
if not self.allocated:
#raise RuntimeError("You must call flow.allocate before invoking flow.use_smartio")
return self.allocate()
for task in self.iflat_tasks():
children = task.get_children()
if not children:
# Change the input so that output files are produced
# only if the calculation is not converged.
task.history.info("Will disable IO for task")
task.set_vars(prtwf=-1, prtden=0) # TODO: prt1wf=-1,
else:
must_produce_abiexts = []
for child in children:
# Get the list of dependencies. Find that task
for d in child.deps:
must_produce_abiexts.extend(d.exts)
must_produce_abiexts = set(must_produce_abiexts)
#print("must_produce_abiexts", must_produce_abiexts)
# Variables supporting smart-io.
smart_prtvars = {
"prtwf": "WFK",
}
# Set the variable to -1 to disable the output
for varname, abiext in smart_prtvars.items():
if abiext not in must_produce_abiexts:
print("%s: setting %s to -1" % (task, varname))
task.set_vars({varname: -1})
return self
#def new_from_input_decorators(self, new_workdir, decorators)
# """
# Return a new :class:`Flow` in which all the Abinit inputs have been
# decorated by decorators.
# """
# # The trick part here is how to assign a new id to the new nodes while maintaing the
# # correct dependencies! The safest approach would be to pass through __init__
# # instead of using copy.deepcopy()
# return flow
def show_dependencies(self, stream=sys.stdout):
"""Writes to the given stream the ASCII representation of the dependency tree."""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream)
def on_dep_ok(self, signal, sender):
# TODO
# Replace this callback with dynamic dispatch
# on_all_S_OK for work
# on_S_OK for task
logger.info("on_dep_ok with sender %s, signal %s" % (str(sender), signal))
for i, cbk in enumerate(self._callbacks):
if not cbk.handle_sender(sender):
logger.info("%s does not handle sender %s" % (cbk, sender))
continue
if not cbk.can_execute():
logger.info("Cannot execute %s" % cbk)
continue
# Execute the callback and disable it
self.history.info("flow in on_dep_ok: about to execute callback %s" % str(cbk))
cbk()
cbk.disable()
# Update the database.
self.pickle_dump()
@check_spectator
def finalize(self):
"""
This method is called when the flow is completed.
Return 0 if success
"""
if self.finalized:
self.history.warning("Calling finalize on an already finalized flow.")
return 1
self.history.info("Calling flow.finalize.")
self.finalized = True
if self.has_db:
self.history.info("Saving results in database.")
try:
self.flow.db_insert()
self.finalized = True
except Exception:
logger.critical("MongoDb insertion failed.")
return 2
# Here we remove the big output files if we have the garbage collector
# and the policy is set to "flow."
if self.gc is not None and self.gc.policy == "flow":
self.history.info("gc.policy set to flow. Will clean task output files.")
for task in self.iflat_tasks():
task.clean_output_files()
return 0
def set_garbage_collector(self, exts=None, policy="task"):
"""
Enable the garbage collector that will remove the big output files that are not needed.
Args:
exts: string or list with the Abinit file extensions to be removed. A default is
provided if exts is None
policy: Either `flow` or `task`. If policy is set to 'task', we remove the output
files as soon as the task reaches S_OK. If 'flow', the files are removed
only when the flow is finalized. This option should be used when we are dealing
with a dynamic flow with callbacks generating other tasks since a :class:`Task`
might not be aware of its children when it reached S_OK.
"""
assert policy in ("task", "flow")
exts = list_strings(exts) if exts is not None else ("WFK", "SUS", "SCR", "BSR", "BSC")
gc = GarbageCollector(exts=set(exts), policy=policy)
self.set_gc(gc)
for work in self:
#work.set_gc(gc) # TODO Add support for Works and flow policy
for task in work:
task.set_gc(gc)
def connect_signals(self):
"""
Connect the signals within the `Flow`.
The `Flow` is responsible for catching the important signals raised from its works.
"""
# Connect the signals inside each Work.
for work in self:
work.connect_signals()
# Observe the nodes that must reach S_OK in order to call the callbacks.
for cbk in self._callbacks:
#cbk.enable()
for dep in cbk.deps:
logger.info("connecting %s \nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
dispatcher.connect(self.on_dep_ok, signal=dep.node.S_OK, sender=dep.node, weak=False)
# Associate to each signal the callback _on_signal
# (bound method of the node that will be called by `Flow`
# Each node will set its attribute _done_signal to True to tell
# the flow that this callback should be disabled.
# Register the callbacks for the Work.
#for work in self:
# slot = self._sig_slots[work]
# for signal in S_ALL:
# done_signal = getattr(work, "_done_ " + signal, False)
# if not done_sig:
# cbk_name = "_on_" + str(signal)
# cbk = getattr(work, cbk_name, None)
# if cbk is None: continue
# slot[work][signal].append(cbk)
# print("connecting %s\nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
# dispatcher.connect(self.on_dep_ok, signal=signal, sender=dep.node, weak=False)
# Register the callbacks for the Tasks.
#self.show_receivers()
def disconnect_signals(self):
"""Disable the signals within the `Flow`."""
# Disconnect the signals inside each Work.
for work in self:
work.disconnect_signals()
# Disable callbacks.
for cbk in self._callbacks:
cbk.disable()
def show_receivers(self, sender=None, signal=None):
sender = sender if sender is not None else dispatcher.Any
signal = signal if signal is not None else dispatcher.Any
print("*** live receivers ***")
for rec in dispatcher.liveReceivers(dispatcher.getReceivers(sender, signal)):
print("receiver -->", rec)
print("*** end live receivers ***")
def set_spectator_mode(self, mode=True):
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals()
#def get_results(self, **kwargs)
def rapidfire(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits tasks in rapidfire mode.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).rapidfire()
def single_shot(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits one task.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).single_shot()
def make_scheduler(self, **kwargs):
"""
Build a return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass **kwargs to :class:`PyFlowScheduler` __init__ method.
"""
from .launcher import PyFlowScheduler
if not kwargs:
# User config if kwargs is empty
sched = PyFlowScheduler.from_user_config()
else:
# Use from_file if filepath if present, else call __init__
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched
def batch(self, timelimit=None):
"""
Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used.
"""
from .launcher import BatchLauncher
# Create a batch dir from the flow.workdir.
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch")
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
def make_light_tarfile(self, name=None):
"""Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file."""
name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name
return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])
def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
"""
Create a tarball file.
Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 Mb"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.
Returns:
The name of the tarfile.
"""
def any2bytes(s):
"""Convert string or number to memory in bytes."""
if is_string(s):
return int(Memory.from_string(s).to("b"))
else:
return int(s)
if max_filesize is not None:
max_filesize = any2bytes(max_filesize)
if exclude_exts:
# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc"
# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing.
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith(".nc"):
exts.append(e.replace(".nc", ""))
else:
exts.append(e + ".nc")
exclude_exts = exts
def filter(tarinfo):
"""
Function that takes a TarInfo object argument and returns the changed TarInfo object.
If it instead returns None the TarInfo object will be excluded from the archive.
"""
# Skip links.
if tarinfo.issym() or tarinfo.islnk():
if verbose: print("Excluding link: %s" % tarinfo.name)
return None
# Check size in bytes
if max_filesize is not None and tarinfo.size > max_filesize:
if verbose: print("Excluding %s due to max_filesize" % tarinfo.name)
return None
# Filter filenames.
if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts):
if verbose: print("Excluding %s due to extension" % tarinfo.name)
return None
# Exlude directories (use dir basenames).
if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)):
if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name)
return None
return tarinfo
back = os.getcwd()
os.chdir(os.path.join(self.workdir, ".."))
import tarfile
name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter)
# Add the script used to generate the flow.
if self.pyfile is not None and os.path.exists(self.pyfile):
tar.add(self.pyfile)
os.chdir(back)
return name
#def abirobot(self, ext, check_status=True, nids=None):
# """
# Builds and return the :class:`Robot` subclass from the file extension `ext`.
# `nids` is an optional list of node identifiers used to filter the tasks in the flow.
# """
# from abipy.abilab import abirobot
# if check_status: self.check_status()
# return abirobot(flow=self, ext=ext, nids=nids):
@add_fig_kwargs
def plot_networkx(self, mode="network", with_edge_labels=False, ax=None, arrows=False,
node_size="num_cores", node_label="name_class", layout_type="spring", **kwargs):
"""
Use networkx to draw the flow with the connections among the nodes and
the status of the tasks.
Args:
mode: `networkx` to show connections, `status` to group tasks by status.
with_edge_labels: True to draw edge labels.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
arrows: if True draw arrowheads.
node_size: By default, the size of the node is proportional to the number of cores used.
node_label: By default, the task class is used to label node.
layout_type: Get positions for all nodes using `layout_type`. e.g. pos = nx.spring_layout(g)
.. warning::
Requires networkx package.
"""
if not self.allocated: self.allocate()
import networkx as nx
# Build the graph
g = nx.Graph() if not arrows else nx.DiGraph()
edge_labels = {}
tasks = list(self.iflat_tasks())
for task in tasks:
g.add_node(task, name=task.name)
for child in task.get_children():
g.add_edge(task, child)
# TODO: Add getters! What about locked nodes!
i = [dep.node for dep in child.deps].index(task)
edge_labels[(task, child)] = " ".join(child.deps[i].exts)
# Get positions for all nodes using layout_type.
# e.g. pos = nx.spring_layout(g)
pos = getattr(nx, layout_type + "_layout")(g)
# Select function used to compute the size of the node
make_node_size = dict(num_cores=lambda task: 300 * task.manager.num_cores)[node_size]
# Select function used to build the label
make_node_label = dict(name_class=lambda task: task.pos_str + "\n" + task.__class__.__name__,)[node_label]
labels = {task: make_node_label(task) for task in g.nodes()}
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Select plot type.
if mode == "network":
nx.draw_networkx(g, pos, labels=labels,
node_color=[task.color_rgb for task in g.nodes()],
node_size=[make_node_size(task) for task in g.nodes()],
width=1, style="dotted", with_labels=True, arrows=arrows, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
elif mode == "status":
# Group tasks by status.
for status in self.ALL_STATUS:
tasks = list(self.iflat_tasks(status=status))
# Draw nodes (color is given by status)
node_color = status.color_opts["color"]
if node_color is None: node_color = "black"
#print("num nodes %s with node_color %s" % (len(tasks), node_color))
nx.draw_networkx_nodes(g, pos,
nodelist=tasks,
node_color=node_color,
node_size=[make_node_size(task) for task in tasks],
alpha=0.5, ax=ax
#label=str(status),
)
# Draw edges.
nx.draw_networkx_edges(g, pos, width=2.0, alpha=0.5, arrows=arrows, ax=ax) # edge_color='r')
# Draw labels
nx.draw_networkx_labels(g, pos, labels, font_size=12, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
#label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal',
# alpha=1.0, bbox=None, ax=None, rotate=True, **kwds)
else:
raise ValueError("Unknown value for mode: %s" % str(mode))
ax.axis("off")
return fig
class G0W0WithQptdmFlow(Flow):
def __init__(self, workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None):
"""
Build a :class:`Flow` for one-shot G0W0 calculations.
The computation of the q-points for the screening is parallelized with qptdm
i.e. we run independent calculations for each q-point and then we merge the final results.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: Input(s) for the SIGMA run(s).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
"""
super(G0W0WithQptdmFlow, self).__init__(workdir, manager=manager)
# Register the first work (GS + NSCF calculation)
bands_work = self.register_work(BandStructureWork(scf_input, nscf_input))
# Register the callback that will be executed the work for the SCR with qptdm.
scr_work = self.register_work_from_cbk(cbk_name="cbk_qptdm_workflow", cbk_data={"input": scr_input},
deps={bands_work.nscf_task: "WFK"}, work_class=QptdmWork)
# The last work contains a list of SIGMA tasks
# that will use the data produced in the previous two works.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
sigma_work = Work()
for sigma_input in sigma_inputs:
sigma_work.register_sigma_task(sigma_input, deps={bands_work.nscf_task: "WFK", scr_work: "SCR"})
self.register_work(sigma_work)
self.allocate()
def cbk_qptdm_workflow(self, cbk):
"""
This callback is executed by the flow when bands_work.nscf_task reaches S_OK.
It computes the list of q-points for the W(q,G,G'), creates nqpt tasks
in the second work (QptdmWork), and connect the signals.
"""
scr_input = cbk.data["input"]
# Use the WFK file produced by the second
# Task in the first Work (NSCF step).
nscf_task = self[0][1]
wfk_file = nscf_task.outdir.has_abiext("WFK")
work = self[1]
work.set_manager(self.manager)
work.create_tasks(wfk_file, scr_input)
work.add_deps(cbk.deps)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
# Add the garbage collector.
if self.gc is not None: task.set_gc(self.gc)
work.connect_signals()
work.build()
return work
class FlowCallbackError(Exception):
"""Exceptions raised by FlowCallback."""
class FlowCallback(object):
"""
This object implements the callbacks executed by the :class:`flow` when
particular conditions are fulfilled. See on_dep_ok method of :class:`Flow`.
.. note::
I decided to implement callbacks via this object instead of a standard
approach based on bound methods because:
1) pickle (v<=3) does not support the pickling/unplickling of bound methods
2) There's some extra logic and extra data needed for the proper functioning
of a callback at the flow level and this object provides an easy-to-use interface.
"""
Error = FlowCallbackError
def __init__(self, func_name, flow, deps, cbk_data):
"""
Args:
func_name: String with the name of the callback to execute.
func_name must be a bound method of flow with signature:
func_name(self, cbk)
where self is the Flow instance and cbk is the callback
flow: Reference to the :class:`Flow`
deps: List of dependencies associated to the callback
The callback is executed when all dependencies reach S_OK.
cbk_data: Dictionary with additional data that will be passed to the callback via self.
"""
self.func_name = func_name
self.flow = flow
self.deps = deps
self.data = cbk_data or {}
self._disabled = False
def __str__(self):
return "%s: %s bound to %s" % (self.__class__.__name__, self.func_name, self.flow)
def __call__(self):
"""Execute the callback."""
if self.can_execute():
# Get the bound method of the flow from func_name.
# We use this trick because pickle (format <=3) does not support bound methods.
try:
func = getattr(self.flow, self.func_name)
except AttributeError as exc:
raise self.Error(str(exc))
return func(self)
else:
raise self.Error("You tried to __call_ a callback that cannot be executed!")
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps)
def disable(self):
"""
True if the callback has been disabled. This usually happens when the callback has been executed.
"""
self._disabled = True
def enable(self):
"""Enable the callback"""
self._disabled = False
def handle_sender(self, sender):
"""
True if the callback is associated to the sender
i.e. if the node who sent the signal appears in the
dependencies of the callback.
"""
return sender in [d.node for d in self.deps]
# Factory functions.
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow
def g0w0_flow(workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for one-shot $G_0W_0$ calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: List of inputs for the SIGMA run.
flow_class: Flow class
manager: :class:`TaskManager` object used to submit the jobs.
Initialized from manager.yml if manager is None.
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = G0W0Work(scf_input, nscf_input, scr_input, sigma_inputs)
flow.register_work(work)
if allocate: flow.allocate()
return flow
class PhononFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for phonon calculations. Each work contains
nirred tasks where nirred is the number of irreducible phonon perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True):
"""
Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for
electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice
whereas ph_ngqpt = (3, 3, 3) is not!
with_becs: True if Born effective charges are wanted.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`PhononFlow` object.
"""
flow = cls(workdir, manager=manager)
# Register the SCF task
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
# Make sure k-mesh and q-mesh are compatible.
scf_ngkpt, ph_ngqpt = np.array(scf_input["ngkpt"]), np.array(ph_ngqpt)
if any(scf_ngkpt % ph_ngqpt != 0):
raise ValueError("ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s" % (ph_ngqpt, scf_ngkpt))
# Get the q-points in the IBZ from Abinit
qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0,0,0), kptopt=1).points
# Create a PhononWork for each q-point. Add DDK and E-field if q == Gamma and with_becs.
for qpt in qpoints:
if np.allclose(qpt, 0) and with_becs:
ph_work = BecWork.from_scf_task(scf_task)
else:
ph_work = PhononWork.from_scf_task(scf_task, qpoints=qpt)
flow.register_work(ph_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(PhononFlow, self).finalize()
return retcode
class NonLinearCoeffFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for electric field calculations. Each work contains
nirred tasks where nirred is the number of irreducible perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True):
"""
Create a `NonlinearFlow` for second order susceptibility calculations from
an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`NonlinearFlow` object.
"""
flow = cls(workdir, manager=manager)
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
nl_work = DteWork.from_scf_task(scf_task)
flow.register_work(nl_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(NonLinearCoeffFlow, self).finalize()
print("retcode", retcode)
#if retcode != 0: return retcode
return retcode
def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False,
manager=None, flow_class=PhononFlow, allocate=True):
"""
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
"""
logger.critical("phonon_flow is deprecated and could give wrong results")
if with_dde:
with_ddk = True
natom = len(scf_input.structure)
# Create the container that will manage the different works.
flow = flow_class(workdir, manager=manager)
# Register the first work (GS calculation)
# register_task creates a work for the task, registers it to the flow and returns the work
# the 0the element of the work is the task
scf_task = flow.register_task(scf_input, task_class=ScfTask)[0]
# Build a temporary work with a shell manager just to run
# ABINIT to get the list of irreducible pertubations for this q-point.
shell_manager = flow.manager.to_shell_manager(mpi_procs=1)
if with_ddk:
logger.info('add ddk')
# TODO
# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!)
ddk_input = ph_inputs[0].deepcopy()
ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1])
ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0]
if with_dde:
logger.info('add dde')
dde_input = ph_inputs[0].deepcopy()
dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2)
dde_input_idir = dde_input.deepcopy()
dde_input_idir.set_vars(rfdir=[1, 1, 1])
dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0]
if not isinstance(ph_inputs, (list, tuple)):
ph_inputs = [ph_inputs]
for i, ph_input in enumerate(ph_inputs):
fake_input = ph_input.deepcopy()
# Run abinit on the front-end to get the list of irreducible pertubations.
tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__")
w = PhononWork(workdir=tmp_dir, manager=shell_manager)
fake_task = w.register(fake_input)
# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point.
abivars = dict(
paral_rf=-1,
rfatpol=[1, natom], # Set of atoms to displace.
rfdir=[1, 1, 1], # Along this set of reduced coordinate axis.
)
fake_task.set_vars(abivars)
w.allocate()
w.start(wait=True)
# Parse the file to get the perturbations.
try:
irred_perts = yaml_read_irred_perts(fake_task.log_file.path)
except:
print("Error in %s" % fake_task.log_file.path)
raise
logger.info(irred_perts)
w.rmtree()
# Now we can build the final list of works:
# One work per q-point, each work computes all
# the irreducible perturbations for a singe q-point.
work_qpt = PhononWork()
if with_nscf:
# MG: Warning this code assume 0 is Gamma!
nscf_input = copy.deepcopy(scf_input)
nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1)
nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
deps = {nscf_task: "WFQ", scf_task: "WFK"}
else:
deps = {scf_task: "WFK"}
if with_ddk:
deps[ddk_task] = 'DDK'
logger.info(irred_perts[0]['qpt'])
for irred_pert in irred_perts:
#print(irred_pert)
new_input = ph_input.deepcopy()
#rfatpol 1 1 # Only the first atom is displaced
#rfdir 1 0 0 # Along the first reduced coordinate axis
qpt = irred_pert["qpt"]
idir = irred_pert["idir"]
ipert = irred_pert["ipert"]
# TODO this will work for phonons, but not for the other types of perturbations.
rfdir = 3 * [0]
rfdir[idir -1] = 1
rfatpol = [ipert, ipert]
new_input.set_vars(
#rfpert=1,
qpt=qpt,
rfdir=rfdir,
rfatpol=rfatpol,
)
if with_ddk:
new_input.set_vars(rfelfd=3)
work_qpt.register_phonon_task(new_input, deps=deps)
flow.register_work(work_qpt)
if allocate: flow.allocate()
return flow
def phonon_conv_flow(workdir, scf_input, qpoints, params, manager=None, allocate=True):
"""
Create a :class:`Flow` to perform convergence studies for phonon calculations.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object defining a GS-SCF calculation.
qpoints: List of list of lists with the reduced coordinates of the q-point(s).
params:
To perform a converge study wrt ecut: params=["ecut", [2, 4, 6]]
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
allocate: True if the flow should be allocated before returning.
Return:
:class:`Flow` object.
"""
qpoints = np.reshape(qpoints, (-1, 3))
flow = Flow(workdir=workdir, manager=manager)
for qpt in qpoints:
for gs_inp in scf_input.product(*params):
# Register the SCF task
work = flow.register_scf_task(gs_inp)
# Add the PhononWork connected to this scf_task.
flow.register_work(PhononWork.from_scf_task(work[0], qpoints=qpt))
if allocate: flow.allocate()
return flow
|
setten/pymatgen
|
pymatgen/io/abinit/flows.py
|
Python
|
mit
| 106,096
|
[
"ABINIT",
"pymatgen"
] |
1bfc5ade35f8d5de69ee9889856aa802ccd0d746774661a66f81c283997add6e
|
import os
import tempfile
import shutil
import yaml
import mdtraj
from subprocess import Popen, PIPE
from ensembler.core import get_most_advanced_ensembler_modeling_stage, default_project_dirnames
from ensembler.core import model_filenames_by_ensembler_stage, get_valid_model_ids, mpistate
from ensembler.core import YamlDumper, YamlLoader, logger, get_targets
from ensembler.utils import notify_when_done, set_loglevel
# includes types
molprobity_oneline_analysis_colnames = [
('#pdbFileName', None),
('x-H_type', None),
('chains', int),
('residues', int),
('nucacids', int),
('resolution', float),
('rvalue', float),
('rfree', float),
('clashscore', float),
('clashscoreB<40', float),
('minresol', float),
('maxresol', float),
('n_samples', int),
('pct_rank', int),
('pct_rank40', int),
('cbeta>0.25', int),
('numCbeta', int),
('rota<1%', int),
('numRota', int),
('ramaOutlier', int),
('ramaAllowed', int),
('ramaFavored', int),
('numRama', int),
('numbadbonds', int),
('numbonds', int),
('pct_badbonds', float),
('pct_resbadbonds', float),
('numbadangles', int),
('numangles', int),
('pct_badangles', float),
('pct_resbadangles', float),
('MolProbityScore', float),
('Mol_pct_rank', int),
]
molprobity_oneline_analysis_colnames_to_output = [
'MolProbityScore',
'clashscore',
'numRota',
'rota<1%',
'numRama',
'ramaOutlier',
'ramaFavored',
'cbeta>0.25',
'pct_badbonds',
'pct_badangles',
]
@notify_when_done
def molprobity_validation_multiple_targets(targetids=None, modeling_stage=None, loglevel=None):
"""
Calculate model quality using MolProbity ``oneline-analysis`` command.
For each target, this function outputs a text file named
``models/[targetid]/validation_scores_sorted-[method]-[ensembler_stage]`` which contains a list of
targetids sorted by validation score. This can be used by the subsequent ``package_models`` command
to filter out models below a specified quality threshold.
Typically, this should be run after models have been refined to the desired extent (e.g. after
implicit or explicit MD refinement)
More detailed validation results are written to the individual model directories.
MPI-enabled.
Parameters
----------
targetids: list of str or str
modeling_stage: str
{None|build_models|refine_implicit_md|refine_explicit_md}
Default: None (automatically selects most advanced stage)
"""
set_loglevel(loglevel)
if targetids is None:
targetids = [target.id for target in get_targets()]
elif type(targetids) is str:
targetids = [targetids]
for targetid in targetids:
logger.info('Working on target {}'.format(targetid))
molprobity_validation(targetid=targetid, ensembler_stage=modeling_stage, loglevel=loglevel)
def molprobity_validation(targetid, ensembler_stage=None, loglevel=None):
set_loglevel(loglevel)
valid_model_ids = []
if mpistate.rank == 0:
if ensembler_stage is None:
ensembler_stage = get_most_advanced_ensembler_modeling_stage(targetid)
valid_model_ids = get_valid_model_ids(ensembler_stage, targetid)
if ensembler_stage is None:
ensembler_stage = mpistate.comm.bcast(ensembler_stage, root=0)
valid_model_ids = mpistate.comm.bcast(valid_model_ids, root=0)
nvalid_model_ids = len(valid_model_ids)
model_structure_filename = model_filenames_by_ensembler_stage[ensembler_stage]
models_target_dir = os.path.join(default_project_dirnames.models, targetid)
molprobity_results_filepath = os.path.join(
models_target_dir, 'validation_scores_sorted-molprobity-{}'.format(ensembler_stage)
)
molprobity_scores_sublist = []
for model_index in range(mpistate.rank, nvalid_model_ids, mpistate.size):
model_id = valid_model_ids[model_index]
logger.debug('MPI process {} working on model {}'.format(mpistate.rank, model_id))
molprobity_score = run_molprobity_oneline_analysis_and_write_results(
targetid,
model_id,
ensembler_stage,
model_structure_filename=model_structure_filename,
models_target_dir=models_target_dir,
)
molprobity_scores_sublist.append((model_id, molprobity_score))
molprobity_scores_gathered_list = mpistate.comm.gather(molprobity_scores_sublist, root=0)
if mpistate.rank == 0:
molprobity_scores_list_of_tuples = [item for sublist in molprobity_scores_gathered_list for item in sublist]
molprobity_scores_sorted = sorted(molprobity_scores_list_of_tuples, key=lambda x: x[1])
write_molprobity_scores_list(molprobity_scores_sorted, molprobity_results_filepath)
def run_molprobity_oneline_analysis_and_write_results(targetid,
model_id,
ensembler_stage,
model_structure_filename=None,
models_target_dir=None,
check_for_existing_results=True,
):
if model_structure_filename is None:
model_structure_filename = model_filenames_by_ensembler_stage[ensembler_stage]
if models_target_dir is None:
models_target_dir = os.path.join(default_project_dirnames.models, targetid)
results_output_filepath = os.path.join(
models_target_dir, model_id, 'molprobity-{}.yaml'.format(ensembler_stage)
)
if check_for_existing_results:
if os.path.exists(results_output_filepath):
with open(results_output_filepath) as results_output_file:
prev_results = yaml.load(stream=results_output_file, Loader=YamlLoader)
prev_molprobity_score = prev_results.get('MolProbityScore')
if prev_molprobity_score is not None:
logger.debug(
'Existing MolProbity score of {} found for model {}'.format(
prev_molprobity_score, model_id
)
)
return prev_molprobity_score
molprobity_results = run_molprobity_oneline_analysis(
targetid, model_id, model_structure_filename
)
if molprobity_results is None:
logger.debug('MolProbity returned no results for model {}'.format(model_id))
return None
logger.debug('MolProbity score of {} calculated for model {}'.format(molprobity_results.get('MolProbityScore'), model_id))
molprobity_score = molprobity_results.get('MolProbityScore')
if molprobity_score is not None:
write_molprobity_results_for_target(
molprobity_results, models_target_dir, model_id, ensembler_stage
)
return molprobity_score
def run_molprobity_oneline_analysis(targetid, model_id, model_structure_filename, tmp_model_dir=None):
"""
Runs oneline_analysis for a single model in a temp dir, and cleans up after.
"""
if tmp_model_dir is None:
tmp_model_dir = tempfile.mkdtemp()
try:
source_path = os.path.join(
default_project_dirnames.models,
targetid,
model_id,
model_structure_filename
)
dest_path = os.path.join(
tmp_model_dir,
model_id + '.pdb'
)
source_model_traj = mdtraj.load_pdb(source_path)
protein_only_traj = source_model_traj.atom_slice(
source_model_traj.top.select('protein')
)
protein_only_traj.save_pdb(dest_path)
stdout, stderr = molprobity_oneline_analysis_cmd(tmp_model_dir)
output_text = '\n'.join([stdout, stderr])
molprobity_results = parse_molprobity_oneline_analysis_output(output_text)
molprobity_model_results = molprobity_results.get(model_id)
finally:
shutil.rmtree(tmp_model_dir)
return molprobity_model_results
def molprobity_oneline_analysis_cmd(dir_path):
p = Popen(
[
'oneline-analysis',
dir_path
],
stdout=PIPE,
stderr=PIPE,
)
stdout, stderr = p.communicate()
return stdout, stderr
def parse_molprobity_oneline_analysis_output(output_text):
results_lines = []
for line in output_text.splitlines():
if len(line) == 0 or line[0] == '#':
continue
ncolons = line.count(':')
if ncolons == 32:
results_lines.append(line)
molprobity_results = {}
for results_line in results_lines:
results_line_split = results_line.split(':')
filename = results_line_split[0]
targetid = filename[: filename.find('.pdb')]
target_results = {}
for c, coltuple in enumerate(molprobity_oneline_analysis_colnames):
colname, coltype = coltuple
value = results_line_split[c]
try:
if coltype is not None:
value = coltype(value)
except (ValueError, TypeError):
pass
target_results[colname] = value
molprobity_results[targetid] = target_results
return molprobity_results
def write_molprobity_results_for_target(molprobity_model_results, models_target_dir, model_id, ensembler_stage):
output_dict = {
colname: molprobity_model_results[colname] for colname in molprobity_oneline_analysis_colnames_to_output
}
results_output_filepath = os.path.join(
models_target_dir, model_id, 'molprobity-{}.yaml'.format(ensembler_stage)
)
with open(results_output_filepath, 'w') as results_output_file:
yaml.dump(output_dict, stream=results_output_file, default_flow_style=False, Dumper=YamlDumper)
def write_molprobity_scores_list(molprobity_scores_sorted, molprobity_results_filepath):
output_text = '\n'.join(
['{} {}'.format(score_tuple[0], score_tuple[1]) for score_tuple in molprobity_scores_sorted]
)
with open(molprobity_results_filepath, 'w') as molprobity_results_file:
molprobity_results_file.write(output_text)
|
danielparton/ensembler
|
ensembler/validation.py
|
Python
|
gpl-2.0
| 10,250
|
[
"MDTraj"
] |
6d8dedef2ed6af8bf69b44806d92b16f7f874f4898335173cd149fdb06b2400c
|
# coding: utf8
{
'!langcode!': 'nl',
'!langname!': 'Nederlands',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is een optionele expressie zoals "veld1=\'nieuwewaarde\'". Je kan de resultaten van een JOIN niet updaten of verwijderen.',
'"User Exception" debug mode. ': '"Gebruiker Exceptie" debug mode.',
'"User Exception" debug mode. An error ticket could be issued!': '"Gebruiker Exceptie" debug mode. Een error ticket kan worden aangemaakt!',
'%s': '%s',
'%s %%{row} deleted': '%s %%{row} verwijderd',
'%s %%{row} updated': '%s %%{row} geupdate',
'%s Recent Tweets': '%s Recente Tweets',
'%s students registered': '%s studenten geregistreerd',
'%Y-%m-%d': '%Y/%m/%d',
'%Y-%m-%d %H:%M:%S': '%Y/%m/%d %H:%M:%S',
'(requires internet access)': '(vereist internettoegang)',
'(something like "it-it")': '(zoiets als "it-it")',
'@markmin\x01Searching: **%s** %%{file}': '@markmin: zoeken: **%s** %%{file}',
'Abort': 'Afbreken',
'About': 'Over',
'about': 'over',
'About application': 'Over applicatie',
'Add breakpoint': 'Voeg breakpoint toe',
'Additional code for your application': 'Additionele code voor je applicatie',
'admin disabled because no admin password': 'admin uitgezet omdat er geen admin wachtwoord is',
'admin disabled because not supported on google app engine': 'admin uitgezet omdat dit niet ondersteund wordt op google app engine',
'admin disabled because too many invalid login attempts': 'admin is uitgezet omdat er te veel ongeldige login attempts zijn geweest',
'admin disabled because unable to access password file': 'admin is uitgezet omdat er geen toegang was tot het wachtwoordbestand',
'Admin is disabled because insecure channel': 'Admin is uitgezet vanwege onveilig kanaal',
'Admin language': 'Admintaal',
'administrative interface': 'administratieve interface',
'Administrator Password:': 'Administrator Wachtwoord:',
'and rename it:': 'en hernoem het:',
'App does not exist or your are not authorized': 'App bestaat niet of je bent niet geautoriseerd',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is uitgezet vanwege een onveilig kanaal',
'application "%s" uninstalled': 'applicatie "%s" gedeïnstalleerd',
'application %(appname)s installed with md5sum: %(digest)s': 'applicatie %(appname)s geïnstalleerd met md5sum: %(digest)s',
'Application cannot be generated in demo mode': 'Applicatie kan niet gegenereerd worden in demo-mode',
'application compiled': 'applicatie gecompileerd',
'application is compiled and cannot be designed': 'applicatie is gecompileerd en kan niet worden ontworpen',
'Application name:': 'Applicatienaam:',
'are not used': 'worden niet gebruikt',
'are not used yet': 'worden nog niet gebruikt',
'Are you sure you want to delete file "%s"?': 'Weet je zeker dat je bestand "%s" wilt verwijderen?',
'Are you sure you want to delete plugin "%s"?': 'Weet je zeker dat je plugin "%s"? wilt verwijderen?',
'Are you sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Are you sure you want to uninstall application "%s"?': 'Weet je zeker dat je applicatie "%s" wilt deïnstalleren?',
'arguments': 'argumenten',
'at char %s': 'bij karakter %s',
'at line %s': 'op regel %s',
'ATTENTION:': 'LET OP:',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'LET OP: Login heeft beveiligde (HTTPS) verbinding nodig of moet draaien op localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'LET OP: TESTEN IS NIET THREAD SAFE EN PROBEER NIET MEERDERE TESTEN TEGELIJK TE DOEN.',
'ATTENTION: you cannot edit the running application!': 'LET OP: je kan de draaiende applicatie niet bewerken!',
'Available databases and tables': 'Beschikbare databases en tabellen',
'back': 'terug',
'bad_resource': 'slechte_resource',
'Basics': 'Basics',
'Begin': 'Begin',
'breakpoint': 'breakpoint',
'Breakpoints': 'Breakpoints',
'breakpoints': 'breakpoints',
'Bulk Register': 'Bulk Registreer',
'Bulk Student Registration': 'Bulk Studentenregistratie',
'cache': 'cache',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errors en sessies geleegd',
'can be a git repo': 'can een git repo zijn',
'Cancel': 'Cancel',
'Cannot be empty': 'Kan niet leeg zijn',
'Cannot compile: there are errors in your app:': 'Kan niet compileren: er bevinden zich fouten in je app:',
'cannot create file': 'kan bestand niet maken',
'cannot upload file "%(filename)s"': 'kan bestand "%(filename)s" niet uploaden',
'Change admin password': 'Verander admin wachtwoord',
'check all': 'vink alles aan',
'Check for upgrades': 'Controleer voor upgrades',
'Check to delete': 'Vink aan om te verwijderen',
'Checking for upgrades...': 'Controleren voor upgrades...',
'Clean': 'Clean',
'Clear CACHE?': 'Leeg CACHE?',
'Clear DISK': 'Leeg DISK',
'Clear RAM': 'Leeg RAM',
'Click row to expand traceback': 'Klik rij om traceback uit te klappen',
'Click row to view a ticket': 'Klik rij om ticket te bekijken',
'code': 'code',
'Code listing': 'Code listing',
'collapse/expand all': 'klap in/klap alles uit',
'Command': 'Commando',
'Commit': 'Commit',
'Compile': 'Compileer',
'compiled application removed': 'Gecompileerde applicatie verwijderd',
'Condition': 'Conditie',
'contact_admin': 'contact_admin',
'continue': 'ga door',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Count': 'Count',
'Create': 'Maak',
'create': 'maak',
'create file with filename:': 'maak bestand met naam:',
'create plural-form': 'maak meervoudsvorm',
'Create rules': 'Maak regels',
'created by': 'gemaakt door:',
'Created On': 'Gemaakt Op',
'crontab': 'crontab',
'Current request': 'Huidige request',
'Current response': 'Huidige response',
'Current session': 'Huidige sessie',
'currently running': 'draait op het moment',
'currently saved or': 'op het moment opgeslagen of',
'data uploaded': 'data geupload',
'database': 'database',
'database %s select': 'database %s select',
'database administration': 'database administratie',
'Date and Time': 'Datum en Tijd',
'db': 'db',
'Debug': 'Debug',
'defines tables': 'definieert tabellen',
'Delete': 'Verwijder',
'delete': 'verwijder',
'delete all checked': 'verwijder alle aangevinkten',
'delete plugin': 'verwijder plugin',
'Delete this file (you will be asked to confirm deletion)': 'Verwijder dit bestand (je zal worden gevraagd om de verwijdering te bevestigen)',
'Delete:': 'Verwijder:',
'deleted after first hit': 'verwijder na eerste hit',
'Deploy': 'Deploy',
'Deploy on Google App Engine': 'Deploy op Google App Engine (GAE)',
'Deploy to OpenShift': 'Deploy op OpenShift',
'Deployment form': 'Deploymentformulier',
'design': 'design',
'Detailed traceback description': 'Gedetailleerde traceback beschrijving',
'details': 'details',
'direction: ltr': 'directie: ltr',
'directory not found': 'directory niet gevonden',
'Disable': 'Zet uit',
'Disabled': 'Uitgezet',
'disabled in demo mode': 'uitgezet in demo-modus',
'disabled in multi user mode': 'uitgezet in multi-usermodus',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'docs': 'docs',
'done!': 'gereed!',
'Downgrade': 'Downgrade',
'download layouts': 'download layouts',
'download plugins': 'download plugins',
'Edit': 'Bewerk',
'edit all': 'bewerk alles',
'Edit application': 'Bewerk applicatie',
'edit controller': 'bewerk controller',
'Edit current record': 'Bewerk huidige record',
'edit views:': 'bewerk views:',
'Editing file "%s"': 'Bewerk bestand "%s"',
'Editing Language file': 'Taalbestand aan het bewerken',
'Editing Plural Forms File': 'Meervoudsvormenbestand aan het bewerken',
'Enable': 'Zet aan',
'enter a value': 'geef een waarde',
'Error': 'Error',
'Error logs for "%(app)s"': 'Error logs voor "%(app)s"',
'Error snapshot': 'Error snapshot',
'Error ticket': 'Errorticket',
'Errors': 'Errors',
'Exception %(extype)s: %(exvalue)s': 'Exceptie %(extype)s: %(exvalue)s',
'Exception %s': 'Exceptie %s',
'Exception instance attributes': 'Exceptie instantie attributen',
'Expand Abbreviation': 'Klap Afkorting uit',
'export as csv file': 'exporteer als csv-bestand',
'exposes': 'stelt bloot',
'exposes:': 'stelt bloot:',
'extends': 'extends',
'failed to compile file because:': 'niet gelukt om bestand te compileren omdat:',
'failed to reload module because:': 'niet gelukt om module te herladen omdat:',
'faq': 'faq',
'File': 'Bestand',
'file "%(filename)s" created': 'bestand "%(filename)s" gemaakt',
'file "%(filename)s" deleted': 'bestand "%(filename)s" verwijderd',
'file "%(filename)s" uploaded': 'bestand "%(filename)s" geupload',
'file "%s" of %s restored': 'bestand "%s" van %s hersteld',
'file changed on disk': 'bestand veranderd op schijf',
'file does not exist': 'bestand bestaat niet',
'file not found': 'bestand niet gevonden',
'file saved on %(time)s': 'bestand opgeslagen op %(time)s',
'file saved on %s': 'bestand bewaard op %s',
'Filename': 'Bestandsnaam',
'filter': 'filter',
'Frames': 'Frames',
'Functions with no doctests will result in [passed] tests.': 'Functies zonder doctests zullen resulteren in [passed] tests.',
'GAE Email': 'GAE Email',
'GAE Output': 'GAE Output',
'GAE Password': 'GAE Password',
'Generate': 'Genereer',
'Get from URL:': 'Krijg van URL:',
'Git Pull': 'Git Pull',
'Git Push': 'Git Push',
'Globals##debug': 'Globals##debug',
'Go to Matching Pair': 'Ga naar Matchende Paar',
'go!': 'ga!',
'Google App Engine Deployment Interface': 'Google App Engine Deployment Interface',
'Google Application Id': 'Google Application Id',
'Goto': 'Ga naar',
'Help': 'Help',
'Hide/Show Translated strings': 'Verberg/Toon Vertaalde strings',
'Hits': 'Hits',
'Home': 'Home',
'honored only if the expression evaluates to true': 'wordt alleen gerespecteerd als expressie waar is',
'If start the downgrade, be patient, it may take a while to rollback': 'Wees geduldig na het starten van de downgrade, het kan een tijd duren om de rollback uit te voeren',
'If start the upgrade, be patient, it may take a while to download': 'Wees geduldig na het starten van de upgrade, het kan een tijd duren om de download te voltooien',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Als de bovenstaande report een ticketnummer bevat indiceert dit een fout in het uitvoeren van de controller, nog voor een poging wordt gedaan om de doctests uit te voeren. Dit wordt meestal veroorzaak door een inspringfout of een fout buiten de functie-code. Een groene titel indiceert dat alle tests (wanneer gedefinieerd) geslaagd zijn. In dit geval worden testresultaten niet getoond.',
'Import/Export': 'Import/Export',
'In development, use the default Rocket webserver that is currently supported by this debugger.': 'Binnen ontwikkeling, gebruik de default Rocket webserver die op het moment ondersteund wordt door deze debugger.',
'includes': 'includes',
'index': 'index',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'inspect attributes': 'inspecteer attributen',
'Install': 'Install',
'Installed applications': 'Geïnstalleerde applicaties',
'Interaction at %s line %s': 'Interactie op %s regel %s',
'Interactive console': 'Interactieve console',
'internal error': 'interne error',
'internal error: %s': 'interne error: %s',
'Internal State': 'Interne State',
'Invalid action': 'Ongeldige actie',
'invalid circual reference': 'ongeldige cirkelreferentie',
'invalid circular reference': 'Ongeldige circulaire referentie',
'invalid password': 'ongeldig wachtwoord',
'invalid password.': 'ongeldig wachtwoord.',
'Invalid Query': 'Ongeldige Query',
'invalid request': 'ongeldige request',
'invalid request ': 'ongeldige request',
'invalid table names (auth_* tables already defined)': 'ongeldige tabelnamen (auth_* tabellen zijn al gedefinieerd)',
'invalid ticket': 'ongeldige ticket',
'Key': 'Key',
'Key bindings': 'Key bindings',
'Key bindings for ZenCoding Plugin': 'Key bindings voor ZenCoding Plugin',
'kill process': 'kill proces',
'language file "%(filename)s" created/updated': 'taalbestand "%(filename)s" gemaakt/geupdate',
'Language files (static strings) updated': 'Taalbestanden (statische strings) geupdate',
'languages': 'talen',
'Languages': 'Talen',
'Last saved on:': 'Laatst opgeslagen op:',
'License for': 'Licentie voor',
'Line number': 'Regelnummer',
'LineNo': 'RegelNr',
'loading...': 'laden...',
'locals': 'locals',
'Locals##debug': 'Locals##debug',
'Login': 'Login',
'login': 'Login',
'Login to the Administrative Interface': 'Login op de Administratieve Interface',
'Logout': 'Logout',
'Main Menu': 'Hoofdmenu',
'Manage Admin Users/Students': 'Beheer Admin Gebruikers/Studenten',
'Manage Students': 'Beheer Studenten',
'Match Pair': 'Match Pair',
'merge': 'samenvoegen',
'Merge Lines': 'Voeg Regels Samen',
'Minimum length is %s': 'Minimale lengte is %s',
'Models': 'Modellen',
'models': 'modellen',
'Modified On': 'Verandert Op',
'Modules': 'Modules',
'modules': 'modules',
'Must include at least %s %s': 'Moet ten minste bevatten %s %s',
'Must include at least %s lower case': 'Moet ten minste bevatten %s lower case',
'Must include at least %s of the following : %s': 'Moet ten minste bevatten %s van het volgende : %s',
'Must include at least %s upper case': 'Moet ten minste bevatten %s upper case',
'new application "%s" created': 'nieuwe applicatie "%s" gemaakt',
'New Application Wizard': 'Nieuwe Applicatie Wizard',
'New application wizard': 'Nieuwe applicatie wizard',
'new plugin installed': 'nieuwe plugin geïnstalleerd',
'New Record': 'Nieuw Record',
'new record inserted': 'nieuw record ingevoegd',
'New simple application': 'Nieuwe eenvoudige applicatie',
'next': 'volgende',
'next 100 rows': 'volgende 100 rijen',
'Next Edit Point': 'Volgende Bewerkpunt',
'NO': 'NEE',
'No databases in this application': 'Geen databases in deze applicatie',
'No Interaction yet': 'Nog geen interactie',
'no match': 'geen match',
'no permission to uninstall "%s"': 'geen permissie om "%s" te deïnstalleren',
'No ticket_storage.txt found under /private folder': 'Geen ticket_storage.txt gevonden onder /private directory',
'Not Authorized': 'Geen Rechten',
'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.': 'Notitie: Bij een Github error status code 128, zorg ervoor dat het systeem en het account dat je aan het deployen bent een correspondeerde ssh key geconfigureerd heeft in het openshift account. ',
"On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.": 'Om op productie deze debugger te gebruiken, moet je webserver configureren om een proces en meerdere threads te gebruiken.',
'online designer': 'online designer',
'OpenShift Deployment Interface': 'OpenShift Deployment Interface',
'OpenShift Output': 'OpenShift Output',
'or import from csv file': 'of importeer van csv-bestand',
'Original/Translation': 'Oorspronkelijk/Vertaling',
'Overwrite installed app': 'Overschrijf geïnstalleerde app',
'Pack all': 'Pack all',
'Pack compiled': 'Pack compiled',
'pack plugin': 'pack plugin',
'PAM authenticated user, cannot change password here': 'PAM geauthenticeerde gebruiker, kan wachtwoord hier niet wijzigen',
'password changed': 'wachtwoord gewijzigd',
'Path to appcfg.py': 'Pad naar appcfg.py',
'Path to local openshift repo root.': 'Pad naar lokale openshift repo root.',
'peek': 'gluur',
'Peeking at file': 'Gluren naar bestand',
'Please': 'Alstublieft',
'plugin': 'plugin',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" gedetecteerd',
'Plugin "%s" in application': 'Plugin "%s" in applicatie',
'plugin not specified': 'plugin niet gespecialiseerd',
'plugins': 'plugins',
'Plugins': 'Plugins',
'Plural Form #%s': 'Meervoudsvorm #%s',
'Plural-Forms:': 'Meervoudsvormen',
'Powered by': 'Powered by',
'previous 100 rows': 'vorige 100 rijen',
'Previous Edit Point': 'Vorige Bewerkpunt',
'Private files': 'Privébestanden',
'private files': 'privébestanden',
'Project Progress': 'Projectvoortgang',
'Pull': 'Pull',
'Push': 'Push',
'Query:': 'Query:',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'record': 'record',
'record does not exist': 'record bestaat niet',
'record id': 'record id',
'refresh': 'ververs',
'Reload routes': 'Herlaadt routes',
'Remove compiled': 'Verwijder gecompileerde',
'Removed Breakpoint on %s at line %s': 'Verwijder Breakpoint op %s op regel %s',
'request': 'request',
'requires python-git, but not installed': 'vereist python-git, maar niet geïnstalleerd',
'resolve': 'oplossen',
'Resolve Conflict file': 'Los Conflictbestand op',
'response': 'antwoord',
'restart': 'herstart',
'restore': 'herstel',
'return': 'keer terug',
'revert': 'herstel',
'Rows in table': 'Rijen in tabel',
'Rows selected': 'Rijen geselecteerd',
'rules are not defined': 'regels zijn niet gedefinieerd',
'rules parsed with errors': 'regels geparsed met errors',
'rules:': 'regels:',
'Run tests': 'Draai testen',
'Run tests in this file': 'Draai testen in dit bestand',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Draai testen in dit bestand (om alle bestanden te draaien, kun je ook de button genaamd 'test' gebruiken)",
'Running on %s': 'Draait op %s',
'runonce': 'runonce',
'Save': 'Bewaar',
'Save via Ajax': 'Bewaar via Ajax',
'Saved file hash:': 'Opgeslagen bestandhash:',
'search': 'zoek',
'selected': 'Geselecteerd',
'session': 'sessie',
'session expired': 'sessie verlopen',
'Set Breakpoint on %s at line %s: %s': 'Zet Breakpoint op %s op regel %s: %s',
'shell': 'shell',
'signup': 'signup',
'signup_requested': 'signup_requested',
'Singular Form': 'Enkelvoudsvorm',
'site': 'site',
'Site': 'Site',
'skip to generate': 'sla over om te genereren',
'some files could not be removed': 'sommige bestanden konden niet worden verwijderd',
'Sorry, could not find mercurial installed': 'Sorry, mercurial is niet geïnstalleerd',
'Start a new app': 'Start een nieuwe app',
'Start wizard': 'Start wizard',
'state': 'state',
'static': 'statisch',
'Static files': 'Statische bestanden',
'Step': 'Stap',
'step': 'stap',
'stop': 'stop',
'submit': 'submit',
'Submit': 'Submit',
'successful': 'succesvol',
'table': 'tabel',
'tags': 'tags',
'Temporary': 'tijdelijk',
'test': 'test',
'Testing application': 'Applicatie Testen',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'De "query" is een conditie zoals "db.tabel1.veld1==\'waarde\'". Zoiets als "db.tabel1.veld1==db.tabel2.veld2" resulteert in een SQL JOIN. ',
'The app exists, was created by wizard, continue to overwrite!': 'De app bestaat, was gemaakt door de wizard, ga door om te overschrijven!',
'The app exists, was NOT created by wizard, continue to overwrite!': 'De app bestaat, wat NIET gemaakt door de wizard, ga door om te overschrijven!',
'The application logic, each URL path is mapped in one exposed function in the controller': 'De applicatie logica, elk URL pad is gemapped in een blootgestelde functie in de controller',
'The data representation, define database tables and sets': 'De data representatie, definieer database tabellen en sets',
'The presentations layer, views are also known as templates': 'De presentatielaag, views ook bekend als templates',
'There are no controllers': 'Er zijn geen controllers',
'There are no models': 'Er zijn geen modellen',
'There are no modules': 'Er zijn geen modules',
'There are no plugins': 'Er zijn geen plugins',
'There are no static files': 'Er zijn geen statische bestanden',
'There are no translators': 'Er zijn geen vertalingen',
'There are no translators, only default language is supported': 'Er zijn geen vertalingen, alleen de standaardtaal wordt ondersteund.',
'There are no views': 'Er zijn geen views',
'These files are not served, they are only available from within your app': 'Deze bestanden worden niet geserveerd, ze zijn alleen beschikbaar vanuit binnen je app.',
'These files are served without processing, your images go here': 'Deze bestanden worden geserveerd zonder verwerkingen, je afbeeldingen horen hier',
"This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.": 'Deze debugger werkt misschien niet goed, of je hebt geen threaded webserver, of je gebruikt multiple daemon processen.',
'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk': 'Dit is een experimentele feature en heeft meer tests nodig. Downgraden op eigen risico.',
'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk': 'Dit is een experimentele feature en heeft meer tests nodig. Upgraden op eigen risico.',
'This is the %(filename)s template': 'Dit is de %(filename)s template',
"This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.": 'Op deze pagina kan je veranderingen naar een openshift app repo committen en pushen naar je cloud-instantie. Dit gaat er vanuit dat je de applicatie-instantie al gemaakt hebt met de web2py skeleton en de repo ergens op een bestandssysteem hebt waar de web2py-instantie toegang tot heeft. Deze functionaliteit vereist ook een geïnstalleerde GitPython welke beschikbaar is in het python pad van de runtime waar web2py ook in opereert.',
'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.': 'Op deze pagina kan je applicatie uploaden naar Google App Engine. Let op, dat je eerst je indexes lokaal aanmaakt. Dit kun je doen door de Google appserver te installeren en de app lokaal eenmaal te draaien, anders krijg je error wanneer je records selecteert. Attentie: deployment kan een lange tijd duren, afhankelijk van de netwerksnelheid. Attentie: het overschrijft je app.yaml. SUBMIT NIET TWEE KEER!',
'this page to see if a breakpoint was hit and debug interaction is required.': 'deze pagina om te zien of een breakpoint geraakt is en debug-interactie nodig is',
'This will pull changes from the remote repo for application "%s"?': 'Dit zal veranderingen van de remote repo for applicatie "%s" pullen. ',
'This will push changes to the remote repo for application "%s".': 'Dit zal veranderingen naar de remote repo for applicatie "%s" pushen. ',
'ticket': 'ticket',
'Ticket': 'Ticket',
'Ticket ID': 'Ticket ID',
'tickets': 'tickets',
'Time in Cache (h:m:s)': 'Tijd in Cache (u:m:s)',
'to previous version.': 'naar vorige versie.',
'To create a plugin, name a file/folder plugin_[name]': 'Om een plugin te maken, neem een bestand/directory plugin_[naam]',
'To emulate a breakpoint programatically, write:': 'Om een breakpoint programmatische te emuleren, schrijf:',
'to use the debugger!': 'om de debugger te gebruiken!',
'toggle breakpoint': 'toggle breakpoint',
'Traceback': 'Traceback',
'Translation strings for the application': 'Vertaalstrings van de applicatie',
'try something like': 'probeer zoiets als',
'try view': 'try view',
'Type PDB debugger command in here and hit Return (Enter) to execute it.': 'Type PDB debugger commando hier en druk op Return (Enter) om het uit te voeren.',
'Type python statement in here and hit Return (Enter) to execute it.': 'Type python statement hier en druk op Return (Enter) om het uit te voeren.',
'Unable to check for upgrades': 'Onmogelijk om voor upgrades te checken',
'unable to create application "%s"': 'onmogelijk om applicatie "%s" te maken',
'unable to create application "%s" (it may exist already)': 'onmogelijk om applicatie "%s" te maken (mogelijk bestaat deze al)',
'unable to delete file "%(filename)s"': 'onmogelijk om bestand "%(filename)s" te verwijderen',
'unable to delete file plugin "%(plugin)s"': 'onmogelijk om pluginbestand "%(plugin)s" te verwijderen',
'Unable to determine the line number!': 'Onmogelijk om regelnummer te bepalen!',
'Unable to download app because:': 'Onmogelijk om app the downloaden omdat:',
'Unable to download because:': 'Onmogelijk om te downloaden omdat:',
'unable to download layout': 'onmogelijk om layout te downloaden',
'unable to download plugin: %s': 'onmogelijk om plugin te downloaden: %s',
'unable to install application "%(appname)s"': 'onmogelijk om applicatie "%(appname)s" te installeren',
'unable to parse csv file': 'onmogelijk om csv-bestand te parsen',
'unable to uninstall "%s"': 'onmogelijk om te deïnstalleren "%s"',
'unable to upgrade because "%s"': 'onmogelijk om te upgraden omdat "%s"',
'unauthorized': 'niet geautoriseerd ',
'uncheck all': 'vink alles uit',
'uninstall': 'deïnstalleer',
'Uninstall': 'Deïnstalleer',
'Unsupported webserver working mode: %s': 'Niet ondersteunde webserver werkmodus: %s',
'update': 'update',
'update all languages': 'update alle talen',
'Update:': 'Update:',
'Upgrade': 'Upgrade',
'upgrade now': 'upgrade now',
'upgrade_web2py': 'upgrade_web2py',
'upload': 'upload',
'Upload a package:': 'Upload een package:',
'Upload and install packed application': 'Upload en installeer packed applicatie',
'upload file:': 'upload bestand:',
'upload plugin file:': 'upload pluginbestand:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Gebruik (...)&(...) voor AND, (...)|(...) voor OR, en ~(...) voor NOT om meer complexe queries te maken.',
'user': 'gebruiker',
'Using the shell may lock the database to other users of this app.': 'Het gebruik van de shell kan database locken voor andere gebruikers van deze app.',
'value not allowed': 'waarde is niet toegestaan',
'variables': 'variabelen',
'Version': 'Versie',
'Version %s.%s.%s (%s) %s': 'Versie %s.%s.%s (%s) %s',
'Versioning': 'Versionering',
'view': 'view',
'Views': 'Views',
'views': 'views',
'WARNING:': 'WAARSCHUWING:',
'Web Framework': 'Web Framework',
'web2py apps to deploy': 'web2py apps om te deployen',
'web2py Debugger': 'web2py Debugger',
'web2py downgrade': 'web2py downgrade',
'web2py is up to date': 'web2py is up to date',
'web2py online debugger': 'web2py online debugger',
'web2py Recent Tweets': 'web2py Recente Tweets',
'web2py upgrade': 'web2py upgrade',
'web2py upgraded; please restart it': 'web2py geupgrade; herstart alstublieft',
'Wrap with Abbreviation': 'Wrap met Afkorting',
'WSGI reference name': 'WSGI reference name',
'YES': 'JA',
'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button': 'Je kan ook een breakpoint zetten of verwijderen in het bewerkscherm met de Toggle Breakpoint-knop',
'You have one more login attempt before you are locked out': 'Je hebt nog een poging om in te loggen voor je buitengesloten wordt.',
'you must specify a name for the uploaded application': 'je moet een naam specificeren voor de geuploade applicatie',
'You need to set up and reach a': 'Je moet het volgende opzetten en bereiken:',
'Your application will be blocked until you click an action button (next, step, continue, etc.)': 'Je applicatie zal geblokkeerd zijn tot je een actie button aanklikt (volgende, step, ga door, etc.)',
'Your can inspect variables using the console bellow': 'Je kan je variabelen inspecteren in de console hieronder',
}
|
ccpgames/eve-metrics
|
web2py/applications/admin/languages/nl.py
|
Python
|
mit
| 28,530
|
[
"Elk"
] |
57d8dffad294d9744d41ceef5a0077367c1fc7f234451cd5fb50d97b8f3dd357
|
"""
Performance test created using multi-mechnize to analyze time
for update processing with MySQL.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import random
import string
import time
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
def random_generator(size=6, chars=string.ascii_letters):
return ''.join(random.choice(chars) for x in range(size))
class Transaction(object):
def __init__(self):
self.JobDB = JobDB()
self.custom_timers = {}
def run(self):
start_time = time.time()
for i in range(0, random.randint(1000, 3000)):
key = random_generator()
value = random_generator(size=12)
self.JobDB.setJobParameter(2, key, value)
end_time = time.time()
self.custom_timers['Execution_Time'] = end_time - start_time
if __name__ == '__main__':
trans = Transaction()
trans.run()
print(trans.custom_timers)
|
yujikato/DIRAC
|
tests/Performance/MySQLJobMonitoring/test_scripts/update.py
|
Python
|
gpl-3.0
| 950
|
[
"DIRAC"
] |
0569ee544e90799e85b4529f17c71064faadc31b853721c2b99a5f0f69aaca71
|
#!/usr/bin/env python3
'''Manual DDNS testing'''
from dnstest.utils import *
from dnstest.test import Test
t = Test()
def check_soa(master, prev_soa):
soa_resp = master.dig("ddns.", "SOA")
compare(prev_soa, soa_resp.resp.answer, "SOA changed when it shouldn't")
def verify(master, zone, dnssec):
if not dnssec:
return
master.flush()
t.sleep(1)
master.zone_verify(zone)
def do_normal_tests(master, zone, dnssec=False):
# add node
check_log("Node addition")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "A", "1.2.3.4")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
verify(master, zone, dnssec)
# add record to existing rrset
check_log("Node update - new record")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "A", "1.2.3.5")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
verify(master, zone, dnssec)
# add records to existing rrset
check_log("Node update - new records")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "A", "1.2.3.7")
up.add("rrtest.ddns.", 3600, "A", "1.2.3.0")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# add rrset to existing node
check_log("Node update - new rrset")
up = master.update(zone)
up.add("rrtest.ddns.", 3600, "TXT", "abcedf")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "TXT")
resp.check(rcode="NOERROR", rdata="abcedf")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# remove rrset
check_log("Node update - rrset removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "TXT")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "TXT")
resp.check(rcode="NOERROR")
compare(resp.count(section="answer"), 0, "TXT rrset removal")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# remove record
check_log("Node update - record removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "A", "1.2.3.5")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", nordata="1.2.3.5")
resp.check(rcode="NOERROR", rdata="1.2.3.0")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
resp.check(rcode="NOERROR", rdata="1.2.3.7")
verify(master, zone, dnssec)
# remove records
check_log("Node update - records removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "A", "1.2.3.0")
up.delete("rrtest.ddns.", "A", "1.2.3.7")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NOERROR", nordata="1.2.3.0")
resp.check(rcode="NOERROR", nordata="1.2.3.7")
resp.check(rcode="NOERROR", rdata="1.2.3.4")
verify(master, zone, dnssec)
# remove node
check_log("Node removal")
up = master.update(zone)
up.delete("rrtest.ddns.", "ANY")
up.send("NOERROR")
resp = master.dig("rrtest.ddns.", "A")
resp.check(rcode="NXDOMAIN")
verify(master, zone, dnssec)
# add delegation
check_log("Delegation addition")
up = master.update(zone)
up.add("deleg.ddns.", 3600, "NS", "a.deleg.ddns.")
up.add("a.deleg.ddns.", 3600, "A", "1.2.3.4")
up.send("NOERROR")
resp = master.dig("deleg.ddns.", "NS")
resp.check_record(section="authority", rtype="NS", rdata="a.deleg.ddns.")
resp.check_record(section="additional", rtype="A", rdata="1.2.3.4")
verify(master, zone, dnssec)
# add CNAME to node with A records, should be ignored
check_log("Add CNAME to A node")
up = master.update(zone)
up.add("dns1.ddns.", "3600", "CNAME", "ignore.me.ddns.")
up.send("NOERROR")
resp = master.dig("dns1.ddns.", "CNAME")
compare(resp.count(), 0, "Added CNAME when it shouldn't")
verify(master, zone, dnssec)
# create new node by adding RR + try to add CNAME
# the update should ignore the CNAME
check_log("Add new node + add CNAME to it")
up = master.update(zone)
up.add("rrtest2.ddns.", "3600", "MX", "10 something.ddns.")
up.add("rrtest2.ddns.", "3600", "CNAME", "ignore.me.ddns.")
up.send("NOERROR")
resp = master.dig("rrtest2.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="MX", rdata="10 something.ddns.")
resp = master.dig("rrtest2.ddns.", "CNAME")
compare(resp.count(section="answer"), 0, "Added CNAME when it shouldn't")
verify(master, zone, dnssec)
# add A to CNAME node, should be ignored
check_log("Add A to CNAME node")
up = master.update(zone)
up.add("cname.ddns.", "3600", "A", "1.2.3.4")
up.send("NOERROR")
resp = master.dig("cname.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="A", nordata="1.2.3.4")
resp.check_record(rtype="CNAME", rdata="mail.ddns.")
verify(master, zone, dnssec)
# add new node with CNAME + add A to the same node, A should be ignored
check_log("Add new CNAME node + add A to it")
up = master.update(zone)
up.add("rrtest3.ddns.", "3600", "CNAME", "dont.ignore.me.ddns.")
up.add("rrtest3.ddns.", "3600", "TXT", "ignore")
up.send("NOERROR")
resp = master.dig("rrtest3.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="TXT", nordata="ignore")
resp.check_record(rtype="CNAME", rdata="dont.ignore.me.ddns.")
verify(master, zone, dnssec)
# add CNAME to CNAME node, should be replaced
check_log("CNAME to CNAME addition")
up = master.update(zone)
up.add("cname.ddns.", 3600, "CNAME", "new-cname.ddns.")
up.send("NOERROR")
resp = master.dig("cname.ddns.", "CNAME")
resp.check(rcode="NOERROR", rdata="new-cname.ddns.")
resp.check(rcode="NOERROR", nordata="mail.ddns.")
verify(master, zone, dnssec)
# add new CNAME node + another CNAME to it; last CNAME should stay in zone
check_log("Add two CNAMEs to a new node")
up = master.update(zone)
up.add("rrtest4.ddns.", "3600", "CNAME", "ignore.me.ddns.")
up.add("rrtest4.ddns.", "3600", "CNAME", "dont.ignore.me.ddns.")
up.send("NOERROR")
resp = master.dig("rrtest3.ddns.", "ANY")
resp.check(rcode="NOERROR")
resp.check_record(rtype="CNAME", rdata="dont.ignore.me.ddns.")
resp.check_record(rtype="CNAME", nordata="ignore.me.ddns")
verify(master, zone, dnssec)
# add SOA with higher than current serial, serial starting from 2010111213
check_log("Newer SOA addition")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2011111213 10800 3600 1209600 7200")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2011111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with higher serial + remove it in the same UPDATE
# should result in replacing the SOA (i.e. the remove should be ignored)
check_log("Newer SOA addition + removal")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2012111213 10800 3600 1209600 7200")
up.delete("ddns.", "SOA",
"dns1.ddns. hostmaster.ddns. 2012111213 10800 3600 1209600 7200")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2012111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with higher serial + remove all SOA in the same UPDATE
# the removal should be ignored, only replacing the SOA
check_log("Newer SOA addition + removal of all SOA")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2013111213 10800 3600 1209600 7200")
up.delete("ddns.", "SOA")
up.send("NOERROR")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR")
resp.check_record(rtype="SOA", rdata="dns1.ddns. hostmaster.ddns. 2013111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add SOA with lower serial, should be ignored
check_log("Older SOA addition")
up = master.update(zone)
up.add("ddns.", 3600, "SOA",
"dns1.ddns. hostmaster.ddns. 2010111213 10800 3600 1209600 7200")
resp = master.dig("ddns.", "SOA")
resp.check(rcode="NOERROR",
rdata="dns1.ddns. hostmaster.ddns. 2013111213 10800 3600 1209600 7200")
verify(master, zone, dnssec)
# add and remove the same record
check_log("Add and remove same record")
up = master.update(zone)
up.add("testaddrem.ddns.", 3600, "TXT", "record")
up.delete("testaddrem.ddns.", "TXT", "record")
up.send("NOERROR")
resp = master.dig("testaddrem.ddns.", "TXT")
resp.check(rcode="NXDOMAIN")
verify(master, zone, dnssec)
# add and remove the same record, delete whole RRSet
check_log("Add and remove same record, delete whole")
up = master.update(zone)
up.add("testaddrem.ddns.", 3600, "TXT", "record")
up.delete("testaddrem.ddns.", "TXT")
up.send("NOERROR")
resp = master.dig("testaddrem.ddns.", "TXT")
resp.check(rcode="NXDOMAIN")
verify(master, zone, dnssec)
# remove non-existent record
check_log("Remove non-existent record")
up = master.update(zone)
up.delete("testaddrem.ddns.", "TXT", "record")
up.send("NOERROR")
verify(master, zone, dnssec)
# remove NS from APEX (NS should stay)
check_log("Remove NS")
up = master.update(zone)
up.delete("ddns.", "NS")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# remove all from APEX (NS should stay)
check_log("Remove all NS")
up = master.update(zone)
up.delete("ddns.", "ANY")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
resp = master.dig("ddns.", "MX")
resp.check(rcode="NOERROR")
compare(resp.count(section="answer"), 0, "MX rrset removal")
verify(master, zone, dnssec)
# remove all NS + add 1 new; result: 3 RRs
check_log("Remove all NS + add 1 new")
up = master.update(zone)
up.delete("ddns.", "NS")
up.add("ddns.", 3600, "NS", "dns3.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
resp.check_record(rtype="NS", rdata="dns3.ddns.")
verify(master, zone, dnssec)
# remove NSs one at a time + add one new
# the last one + the new one should remain in the zone
check_log("Remove NSs one at a time + add 1 new")
up = master.update(zone)
up.delete("ddns.", "NS", "dns1.ddns.")
up.delete("ddns.", "NS", "dns2.ddns.")
up.delete("ddns.", "NS", "dns3.ddns.")
up.add("ddns.", 3600, "NS", "dns4.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns1.ddns.")
resp.check(nordata="dns2.ddns.")
resp.check_record(rtype="NS", rdata="dns3.ddns.")
resp.check_record(rtype="NS", rdata="dns4.ddns.")
verify(master, zone, dnssec)
# add new NS + remove all one at a time
# only the new NS should remain in the zone
check_log("Add 1 NS + remove all NSs one at a time")
up = master.update(zone)
up.add("ddns.", 3600, "NS", "dns5.ddns.")
up.delete("ddns.", "NS", "dns3.ddns.")
up.delete("ddns.", "NS", "dns4.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns3.ddns.")
resp.check(nordata="dns4.ddns.")
resp.check_record(rtype="NS", rdata="dns5.ddns.")
verify(master, zone, dnssec)
# add new NS + remove the old one; only the new one should remain
check_log("Add 1 NS + remove old NS")
up = master.update(zone)
up.add("ddns.", 3600, "NS", "dns1.ddns.")
up.delete("ddns.", "NS", "dns5.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns5.ddns.")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
verify(master, zone, dnssec)
# remove old NS + add new NS; both should remain in the zone
check_log("Remove old NS + add 1 NS")
up = master.update(zone)
up.delete("ddns.", "NS", "dns1.ddns.")
up.add("ddns.", 3600, "NS", "dns2.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# remove NSs one at a time; the last one should remain in the zone
check_log("Remove NSs one at a time")
up = master.update(zone)
up.delete("ddns.", "NS", "dns1.ddns.")
up.delete("ddns.", "NS", "dns2.ddns.")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check(rcode="NOERROR", nordata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# add new NS + remove ALL NS; should ignore the remove and add the NS
check_log("Add new NS + remove ALL NSs at once")
up = master.update(zone)
up.add("ddns.", 3600, "NS", "dns1.ddns.")
up.delete("ddns.", "NS")
up.send("NOERROR")
resp = master.dig("ddns.", "NS")
resp.check_record(rtype="NS", rdata="dns1.ddns.")
resp.check_record(rtype="NS", rdata="dns2.ddns.")
verify(master, zone, dnssec)
# add empty generic record
check_log("Add empty generic record")
up = master.update(zone)
up.add("empty.ddns.", 300, "TYPE999", "\# 0")
up.send("NOERROR")
resp = master.dig("empty.ddns.", "TYPE999")
resp.check_record(rtype="TYPE999", rdata="\# 0")
verify(master, zone, dnssec)
# add NAPTR record (NAPTR has special processing)
check_log("Add NAPTR record")
up = master.update(zone)
up.add("3.1.1.1.1.1.1.1.1.2.7.9.9.ddns.", 172800, "NAPTR", "1 1 \"u\" \"E2U+sip\" \"!^.*$!sip:123@freeswitch.org!\" .")
up.send("NOERROR")
resp = master.dig("3.1.1.1.1.1.1.1.1.2.7.9.9.ddns.", "NAPTR")
resp.check_record(rtype="NAPTR", rdata="1 1 \"u\" \"E2U+sip\" \"!^.*$!sip:123@freeswitch.org!\" .")
verify(master, zone, dnssec)
# modify zone apex
check_log("Add TXT into apex")
up = master.update(zone)
up.add("ddns.", 300, "TXT", "This is apeeex!")
up.send("NOERROR")
resp = master.dig("ddns.", "TXT")
resp.check_record(rtype="TXT", rdata="This is apeeex!")
verify(master, zone, dnssec)
if dnssec:
# add DS for existing delegation
check_log("DS addition")
up = master.update(zone)
up.add("deleg.ddns.", 3600, "DS",
"54576 10 2 397E50C85EDE9CDE33F363A9E66FD1B216D788F8DD438A57A423A386869C8F06")
up.send("NOERROR")
resp = master.dig("deleg.ddns.", "NS", dnssec=True)
resp.check(rcode="NOERROR")
resp.check_record(section="authority", rtype="DS",
rdata="54576 10 2 397E50C85EDE9CDE33F363A9E66FD1B216D788F8DD438A57A423A386869C8F06")
resp.check_record(section="authority", rtype="NS", rdata="a.deleg.ddns.")
resp.check_record(section="authority", rtype="RRSIG")
verify(master, zone, dnssec)
# add extra DNSKEY
check_log("DNSKEY addition")
up = master.update(zone)
up.add("ddns.", "3600", "DNSKEY",
"256 3 5 AwEAAbs0AlA6xWQn/lECfGt3S6TaeEmgJfEVVEMh06iNMNWMRHOfbqLF h3N52Ob7trmzlrzGlGLPnAZJvMB8lsFGC5CtaLUBD+4xCh5tl5QifZ+y o+MJvPGlVQI2cs7aMWV9CyFrRmuRcJaSZU2uBz9KFJ955UCq/WIy5KqS 7qaKLzzN")
up.send("NOERROR")
resp = master.dig("ddns.", "DNSKEY")
resp.check(rcode="NOERROR",
rdata="256 3 5 AwEAAbs0AlA6xWQn/lECfGt3S6TaeEmgJfEVVEMh06iNMNWMRHOfbqLF h3N52Ob7trmzlrzGlGLPnAZJvMB8lsFGC5CtaLUBD+4xCh5tl5QifZ+y o+MJvPGlVQI2cs7aMWV9CyFrRmuRcJaSZU2uBz9KFJ955UCq/WIy5KqS 7qaKLzzN")
verify(master, zone, dnssec)
def do_refusal_tests(master, zone, dnssec=False):
forbidden = [{'type':"RRSIG", 'data':"A 5 2 1800 20140331062706 20140317095503 132 nic.cz. rc7TwX4GnExDQBNDCdbgf0PS7zabtymSKQ0VhmbFJAcYZxN+yFF9PXAo SpsDVR5H0PIuUM4oqoe7gsKfqqpTdOuB9M6cN/Mni99u7XfKHkopDjYc qTJXKn3x2TER4WkGtG5uthuSEc9lseCr6XqAqkDnJlUa6pB2a3mEHwu/ Elk="},
{'type':"NSEC", 'data':"0-0.se. NS SOA TXT RRSIG NSEC DNSKEY"},
{'type':"NSEC3", 'data':"1 0 10 B8399FF56C1C0C7E D0RS5MTK2AT5SVG2S9LRMM4L2J63V6GL NS"}]
# Store initial SOA
soa_resp = master.dig("ddns.", "SOA")
prev_soa = soa_resp.resp.answer
# Add DDNS forbidden records
check_log("Adding forbidden records")
for f in forbidden:
up = master.update(zone)
up.add("forbidden.ddns.", 3600, f['type'], f['data'])
up.send("REFUSED")
resp = master.dig("forbidden.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Remove DDNS forbidden records
check_log("Removing forbidden records")
for f in forbidden:
up = master.update(zone)
up.delete("forbidden.ddns.", f['type'])
up.send("REFUSED")
check_soa(master, prev_soa)
# Add normal records and then forbidden one
check_log("Refusal rollback")
up = master.update(zone)
up.add("rollback.ddns.", 3600, "TXT", "do not add me")
up.add("forbidden.ddns.", 3600, forbidden[0]['type'], forbidden[0]['data'])
up.send("REFUSED")
resp = master.dig("rollback.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
resp = master.dig("forbidden.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Add DNAME children
check_log("Add DNAME children rollback")
up = master.update(zone)
up.add("rollback.ddns.", 3600, "TXT", "do not add me")
up.add("under.dname.ddns.", 3600, "DNAME", "ddns.")
up.send("REFUSED")
resp = master.dig("rollback.ddns", "ANY")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
# Out-of-zone data
check_log("Out-of-zone data")
up = master.update(zone)
up.add("what.the.hell.am.i.doing.here.", "3600", "TXT", "I don't belong here")
up.send("NOTZONE")
check_soa(master, prev_soa)
# Remove 'all' SOA, ignore
check_log("Remove all SOA")
up = master.update(zone)
up.delete("ddns.", "SOA")
up.send("NOERROR")
check_soa(master, prev_soa)
# Remove specific SOA, ignore
check_log("Remove specific SOA")
up = master.update(zone)
up.delete("ddns.", "SOA", "dns1.ddns. hostmaster.ddns. 2011111213 10800 3600 1209600 7200")
up.send("NOERROR")
check_soa(master, prev_soa)
if dnssec:
# NSEC3PARAM for non-apex node
check_log("Non-apex NSEC3PARAM")
up = master.update(zone)
up.add("not.apex.ddns.", "0", "NSEC3PARAM", "1 0 10 B8399FF56C1C0C7E")
up.send("REFUSED")
resp = master.dig("not.apex.ddns", "NSEC3PARAM")
resp.check(rcode="NXDOMAIN")
check_soa(master, prev_soa)
def do_nsec3param_tests(master, zone):
assert(master.dig("ddns", "NSEC3PARAM").count() == 0)
# Add NSEC3PARAM
check_log("Add NSEC3PARAM")
up = master.update(zone)
up.add("ddns.", "0", "NSEC3PARAM", "1 0 10 CAFEBABE")
up.send("NOERROR")
resp = master.dig("ddns", "NSEC3PARAM")
resp.check(rcode="NOERROR", rdata="1 0 10 CAFEBABE")
verify(master, zone, dnssec=True)
# Change NSEC3PARAM - silently ignore
check_log("Change NSEC3PARAM")
up = master.update(zone)
up.add("ddns.", "0", "NSEC3PARAM", "1 0 10 BADDCAFE")
up.send("NOERROR")
resp = master.dig("ddns", "NSEC3PARAM")
resp.check(rcode="NOERROR", rdata="1 0 10 CAFEBABE")
resp.check(rcode="NOERROR", nordata="1 0 10 BADDCAFE")
verify(master, zone, dnssec=True)
# Delete and add NSEC3PARAM
check_log("Delete and add NSEC3PARAM")
up = master.update(zone)
up.delete("ddns.", "NSEC3PARAM", "1 0 10 CAFEBABE")
up.add("ddns.", "0", "NSEC3PARAM", "1 0 10 BADDCAFE")
up.send("NOERROR")
resp = master.dig("ddns", "NSEC3PARAM")
resp.check(rcode="NOERROR", nordata="1 0 10 CAFEBABE")
resp.check(rcode="NOERROR", rdata="1 0 10 BADDCAFE")
verify(master, zone, dnssec=True)
# Normal deletion tested in DNSSEC tests
zone = t.zone("ddns.", storage=".")
master_plain = t.server("knot")
t.link(zone, master_plain, ddns=True)
master_nsec = t.server("knot")
t.link(zone, master_nsec, ddns=True)
master_nsec.dnssec_enable = True
master_nsec.gen_key(zone, ksk=True, alg="RSASHA256")
master_nsec.gen_key(zone, alg="RSASHA256")
master_nsec3 = t.server("knot")
t.link(zone, master_nsec3, ddns=True)
master_nsec3.dnssec_enable = True
master_nsec3.enable_nsec3(zone)
master_nsec3.gen_key(zone, ksk=True, alg="RSASHA256")
master_nsec3.gen_key(zone, alg="RSASHA256")
t.start()
# DNSSEC-less test
check_log("============ Plain test ===========")
do_normal_tests(master_plain, zone)
do_refusal_tests(master_plain, zone)
# DNSSEC with NSEC test
check_log("============ NSEC test ============")
do_normal_tests(master_nsec, zone, dnssec=True)
do_refusal_tests(master_nsec, zone, dnssec=True)
do_nsec3param_tests(master_nsec, zone)
# DNSSEC with NSEC3 test
check_log("============ NSEC3 test ===========")
do_normal_tests(master_nsec3, zone, dnssec=True)
do_refusal_tests(master_nsec3, zone, dnssec=True)
t.end()
|
jkadlec/knot-dns-zoneapi
|
tests-extra/tests/ddns/basic/test.py
|
Python
|
gpl-3.0
| 22,405
|
[
"Elk"
] |
eaf7cff5a67d96a3481d6c4f0ed01f07d30f57a20494e664e8c12c0e92e2ebc7
|
from .NeuroML import NeuroML, loadNeuroML_L123
from .NetworkML import NetworkML
from .MorphML import MorphML
from .ChannelML import ChannelML
import tempfile
import logging
debug_ = False
if debug_:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='__moose.nml__.log'
)
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('moose.nml').addHandler(console)
_logger = logging.getLogger('moose.nml')
_logger.debug("Loading NML library")
|
subhacom/moose-core
|
python/moose/neuroml/__init__.py
|
Python
|
gpl-3.0
| 931
|
[
"MOOSE"
] |
437c55a9ac82fc5d78aac8d4628c6b252300777bdc1d349286795ccd272aafed
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
from optparse import OptionParser
logging.basicConfig( level = logging.INFO )
LOGGER = logging.getLogger( 'camelot.bin.camelot_admin' )
#
# Description of the application, out of which the help text as well as the
# __doc__ strings can be generated
#
description = """camelot_admin is a tool to assist in the creation and development of Camelot
projects. Use this application without any options to start a GUI to create
a new Camelot project.
"""
usage = "usage: %prog [options] command"
command_description = [
('startproject', """Starts a new project, use startproject project_name.
"""),
('apidoc', """Extract API documentation from source code, to be used
with sphinx.
"""),
('license_update', """Change the license header of a project,
use license_update project_directory license_file"""),
('to_pyside', """Takes a folder with PyQt4 source code and translates it to
PySide source code. Usage ::
to_pyside source destination"""),
]
#
# Generate a docstring in restructured text format
#
__doc__ = description
for command, desc in command_description:
__doc__ += "\n.. cmdoption:: %s\n\n" % command
for line in desc.split('\n'):
__doc__ += " %s\n" % line
#
# A custom OptionParser that generates help information on the commands
#
class CommandOptionParser(OptionParser):
def format_help(self, formatter=None):
command_help = """
The available commands are :
"""
command_help += '\n\n'.join(['%s\n%s\n%s'%(command,'-'*len(command), desc) for command,desc in command_description])
command_help += """
For the management of deployed Camelot applications, see camelot_manage
"""
return OptionParser.format_help(self) + ''.join(command_help)
def apidoc(source, destination):
import os
def is_module_directory( dirname ):
""":return: True if the directory is a python module, False otherwise"""
if not os.path.isdir( dirname ):
return False
if os.path.basename( dirname ).startswith( '.' ):
return False
return os.path.exists( os.path.join( dirname, '__init__.py' ) )
def document_directory(_arg, dirname, names):
"""create .rst files for a directory of source files"""
if is_module_directory( dirname ):
targetdir = os.path.join( destination, dirname[len(source)+1:] )
if not os.path.exists( targetdir ):
os.makedirs( targetdir )
srcs = [n for n in names if n.endswith('.py') and not n.startswith('__')]
dirs = [n for n in names if is_module_directory( os.path.join( dirname, n ) )]
title = os.path.basename( dirname )
if dirname == source:
title = '%s API'%(dirname.capitalize())
ifn = os.path.join( targetdir, 'index.rst' )
module_name = dirname.replace('/', '.')
with open( ifn, 'w' ) as index:
lines = [ '=' * len(title),
title,
'=' * len(title),
'',
'',
'.. automodule:: %s'%module_name,
' :members:'
]
toclines = []
for sn in srcs:
sname = sn[:-3]
sfn = sname + '.rst'
toclines.append( ' %s'%sfn )
with open( os.path.join( targetdir, sfn ), 'w' ) as sf:
slines = ['-' * len(sname),
sname,
'-' * len(sname),
'',
'',
'.. automodule:: %s'%(module_name + '.' + sname),
' :members:', ]
sf.writelines( '%s\n'%line for line in slines )
toclines.extend( ' %s/index.rst'%dn for dn in dirs )
if toclines:
toclines.sort()
lines.extend( ['',
'.. toctree::',
''] )
lines.extend( toclines )
index.writelines( '%s\n'%line for line in lines )
LOGGER.info( '%s : %s -> %s'%(dirname, destination, targetdir) )
os.path.walk(source, document_directory, None)
def license_update(project, license_file):
import os
new_license = open(license_file).read()
def translate_file(dirname, name):
"""translate a single file"""
filename = os.path.join(dirname, name)
LOGGER.info( 'converting %s'%filename )
source = open(filename).read()
output = open(filename, 'w')
output.write(new_license)
old_license_line = True
for line in source.split('\n'):
if not len(line) or line[0]!='#':
old_license_line = False
if not old_license_line:
output.write(line)
output.write('\n')
def translate_directory(_arg, dirname, names):
"""recursively translate a directory"""
for name in names:
if name.endswith('.py'):
translate_file(dirname, name)
os.path.walk(project, translate_directory, None)
def to_pyside( source, destination ):
import os.path
import shutil
# first take a copy
if os.path.exists( destination ):
shutil.rmtree( destination )
shutil.copytree( source, destination )
def replace_word(original_str, old_word, new_word):
return new_word.join((t for t in original_str.split(old_word)))
def translate_file( dirname, name ):
"""translate a single file"""
filename = os.path.join(dirname, name)
LOGGER.info( 'converting %s'%filename )
source = open(filename).read()
output = open(filename, 'w')
source = replace_word( source, 'PyQt4', 'PySide' )
source = replace_word( source, 'pyqtSlot', 'Slot' )
source = replace_word( source, 'pyqtSignal', 'Signal' )
source = replace_word( source, 'pyqtProperty', 'Property' )
source = replace_word( source, 'QtCore.QString', 'str' )
source = replace_word( source, 'QtCore.QVariant.', 'QtCore.Q')
source = replace_word( source, 'QtCore.QVariant(', '(' )
source = replace_word( source, 'QVariant', '()' )
source = replace_word( source, '.toByteArray()', '' )
source = replace_word( source, '.toString()', '' )
source = replace_word( source, '.toBool()', '' )
source = replace_word( source, '.toSize()', '' )
source = replace_word( source, '.toLongLong()', ', True' )
source = replace_word( source, ').isValid()', ')' )
output.write( source )
def translate_directory( dirname, names ):
"""recursively translate a directory"""
for name in names:
if name.endswith('.py'):
translate_file(dirname, name)
for ( dirpath, _dirnames, filenames ) in os.walk( destination ):
translate_directory( dirpath, filenames )
def startproject(module):
import os
from camelot.bin.meta import CreateNewProject, NewProjectOptions
if os.path.exists(module):
raise Exception('Directory %s already exists, cannot start a project in it'%module)
options = NewProjectOptions()
options.module = module
action = CreateNewProject()
action.start_project( options )
def meta():
"""launch meta camelot, in a separate function to make sure camelot_admin
does not depend on PyQt, otherwise it is imposible to run to_pyside without
having PyQt installed"""
from camelot.bin.meta import launch_meta_camelot
launch_meta_camelot()
commands = locals()
def main():
import camelot
parser = CommandOptionParser( description = description,
usage = usage,
version = camelot.__version__, )
( _options, args ) = parser.parse_args()
if not len( args ):
meta()
elif not len( args )>=2:
parser.print_help()
else:
command, command_args = args[0], args[1:]
commands[command]( *command_args )
if __name__ == '__main__':
main()
|
jeroendierckx/Camelot
|
camelot/bin/camelot_admin.py
|
Python
|
gpl-2.0
| 9,511
|
[
"VisIt"
] |
a284dffbf35fc1641241cc53601f84544545cc510346a06454c37841f06d39f7
|
# (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: redis
short_description: Use Redis DB for cache
description:
- This cache uses JSON formatted, per host records saved in Redis.
version_added: "1.9"
requirements:
- redis (python lib)
options:
_uri:
description:
- A colon separated string of connection information for Redis.
required: True
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the DB entries
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import time
import json
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
raise AnsibleError("The 'redis' python module is required for the redis fact cache, 'pip install redis'")
class CacheModule(BaseCacheModule):
"""
A caching module backed by redis.
Keys are maintained in a zset with their score being the timestamp
when they are inserted. This allows for the usage of 'zremrangebyscore'
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
else:
connection = []
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = {}
self._db = StrictRedis(*connection)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key
def get(self, key):
if key not in self._cache:
value = self._db.get(self._make_key(key))
# guard against the key not being removed from the zset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
self._cache[key] = json.loads(value)
return self._cache.get(key)
def set(self, key, value):
value2 = json.dumps(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._db.setex(self._make_key(key), int(self._timeout), value2)
else:
self._db.set(self._make_key(key), value2)
self._db.zadd(self._keys_set, time.time(), key)
self._cache[key] = value
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
return self._db.zrange(self._keys_set, 0, -1)
def contains(self, key):
self._expire_keys()
return (self._db.zrank(self._keys_set, key) is not None)
def delete(self, key):
del self._cache[key]
self._db.delete(self._make_key(key))
self._db.zrem(self._keys_set, key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
# TODO: there is probably a better way to do this in redis
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
|
maartenq/ansible
|
lib/ansible/plugins/cache/redis.py
|
Python
|
gpl-3.0
| 4,208
|
[
"Brian"
] |
db16fb36b000a8151d318e621c595881118a6f2912d860b019d35d41bb691673
|
import logging
import os
from pyjade import Compiler as _Compiler, Parser, register_filter
from pyjade.runtime import attrs
from pyjade.exceptions import CurrentlyNotSupported
from pyjade.utils import process
from django.conf import settings
class Compiler(_Compiler):
autocloseCode = 'if,ifchanged,ifequal,ifnotequal,for,block,filter,autoescape,with,trans,blocktrans,spaceless,comment,cache,localize,compress,verbatim'.split(',')
useRuntime = True
def __init__(self, node, **options):
if settings.configured:
options.update(getattr(settings,'PYJADE',{}))
super(Compiler, self).__init__(node, **options)
def visitCodeBlock(self,block):
self.buffer('{%% block %s %%}'%block.name)
if block.mode=='append': self.buffer('{{block.super}}')
self.visitBlock(block)
if block.mode=='prepend': self.buffer('{{block.super}}')
self.buffer('{% endblock %}')
def visitAssignment(self,assignment):
self.buffer('{%% __pyjade_set %s = %s %%}'%(assignment.name,assignment.val))
def visitMixin(self,mixin):
self.mixing += 1
if not mixin.call:
self.buffer('{%% __pyjade_kwacro %s %s %%}'%(mixin.name,mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% end__pyjade_kwacro %}')
elif mixin.block:
raise CurrentlyNotSupported("The mixin blocks are not supported yet.")
else:
self.buffer('{%% __pyjade_usekwacro %s %s %%}'%(mixin.name,mixin.args))
self.mixing -= 1
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
val = self.var_processor(val)
self.buf.append('{{%s%s}}'%(val,'|force_escape' if code.escape else ''))
else:
self.buf.append('{%% %s %%}'%code.val)
if code.block:
self.visit(code.block)
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}'%codeTag)
def attributes(self,attrs):
return "{%% __pyjade_attrs %s %%}"%attrs
try:
try:
from django.template.base import add_to_builtins
except ImportError: # Django < 1.8
from django.template import add_to_builtins
add_to_builtins('pyjade.ext.django.templatetags')
except ImportError:
# Django 1.9 removed add_to_builtins and instead
# provides a setting to specify builtins:
# TEMPLATES['OPTIONS']['builtins'] = ['pyjade.ext.django.templatetags']
pass
from django.utils.translation import trans_real
try:
from django.utils.encoding import force_text as to_text
except ImportError:
from django.utils.encoding import force_unicode as to_text
def decorate_templatize(func):
def templatize(src, origin=None):
src = to_text(src, settings.FILE_CHARSET)
if origin.endswith(".jade"):
html = process(src,compiler=Compiler)
else:
html = src
return func(html, origin)
return templatize
trans_real.templatize = decorate_templatize(trans_real.templatize)
try:
from django.contrib.markup.templatetags.markup import markdown
@register_filter('markdown')
def markdown_filter(x,y):
return markdown(x)
except ImportError:
pass
|
char101/pyjade
|
pyjade/ext/django/compiler.py
|
Python
|
mit
| 3,357
|
[
"VisIt"
] |
b457795cae054bb9d17573836a02fa1fd9ed79f2251eb4fec53a2569f57a2a91
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for nucelobase pairs.
| Geometries and reference interaction energies from Jurecka et al. PCCP 8 1985 (2006).
| Corrections implemented from footnote 92 of Burns et al., JCP 134 084107 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'HB'`` hydrogen-bonded systems (coplanar base-pairs)
- ``'MX'`` interstrand systems (adjacent base-pairs on different strands)
- ``'DD'`` stacked systems (adjacent base-pairs on same strand)
"""
import qcdb
# <<< JSCH Database Module >>>
dbse = 'JSCH'
# <<< Database Members >>>
HRXN = range(1, 125)
HRXN_SM = [9, 97]
HRXN_LG = [63]
HB = range(1, 39)
MX = range(39, 71)
DD = range(71, 125)
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values >>>
BIND = {}
BIND['%s-%s' % (dbse, 1)] = -32.06
BIND['%s-%s' % (dbse, 2)] = -31.59
BIND['%s-%s' % (dbse, 3)] = -16.86
BIND['%s-%s' % (dbse, 4)] = -18.16
BIND['%s-%s' % (dbse, 5)] = -33.30
BIND['%s-%s' % (dbse, 6)] = -24.90
BIND['%s-%s' % (dbse, 7)] = -19.10
BIND['%s-%s' % (dbse, 8)] = -51.40
BIND['%s-%s' % (dbse, 9)] = -10.30
BIND['%s-%s' % (dbse, 10)] = -13.70
BIND['%s-%s' % (dbse, 11)] = -29.50
BIND['%s-%s' % (dbse, 12)] = -14.20
BIND['%s-%s' % (dbse, 13)] = -19.50
BIND['%s-%s' % (dbse, 14)] = -19.70
BIND['%s-%s' % (dbse, 15)] = -5.20
BIND['%s-%s' % (dbse, 16)] = -17.80
BIND['%s-%s' % (dbse, 17)] = -16.60
BIND['%s-%s' % (dbse, 18)] = -17.60
BIND['%s-%s' % (dbse, 19)] = -21.30
BIND['%s-%s' % (dbse, 20)] = -21.80
BIND['%s-%s' % (dbse, 21)] = -22.70
BIND['%s-%s' % (dbse, 22)] = -19.40
BIND['%s-%s' % (dbse, 23)] = -18.90
BIND['%s-%s' % (dbse, 24)] = -14.40
BIND['%s-%s' % (dbse, 25)] = -12.80
BIND['%s-%s' % (dbse, 26)] = -18.80
BIND['%s-%s' % (dbse, 27)] = -13.50
BIND['%s-%s' % (dbse, 28)] = -14.50
BIND['%s-%s' % (dbse, 29)] = -13.70
BIND['%s-%s' % (dbse, 30)] = -12.20
BIND['%s-%s' % (dbse, 31)] = -22.80
BIND['%s-%s' % (dbse, 32)] = -12.60
BIND['%s-%s' % (dbse, 33)] = -16.40
BIND['%s-%s' % (dbse, 34)] = -35.80
BIND['%s-%s' % (dbse, 35)] = -18.40
BIND['%s-%s' % (dbse, 36)] = -11.30
BIND['%s-%s' % (dbse, 37)] = -30.70
BIND['%s-%s' % (dbse, 38)] = -31.40
BIND['%s-%s' % (dbse, 39)] = -3.68
BIND['%s-%s' % (dbse, 40)] = -4.82
BIND['%s-%s' % (dbse, 41)] = -2.34
BIND['%s-%s' % (dbse, 42)] = -2.16
BIND['%s-%s' % (dbse, 43)] = 3.09
BIND['%s-%s' % (dbse, 44)] = 1.93
BIND['%s-%s' % (dbse, 45)] = -3.91
BIND['%s-%s' % (dbse, 46)] = 1.24
BIND['%s-%s' % (dbse, 47)] = -0.31
BIND['%s-%s' % (dbse, 48)] = 0.58
BIND['%s-%s' % (dbse, 49)] = -0.47
BIND['%s-%s' % (dbse, 50)] = -0.18
BIND['%s-%s' % (dbse, 51)] = -4.22
BIND['%s-%s' % (dbse, 52)] = -1.15
BIND['%s-%s' % (dbse, 53)] = 0.30
BIND['%s-%s' % (dbse, 54)] = -4.06
BIND['%s-%s' % (dbse, 55)] = 0.88
BIND['%s-%s' % (dbse, 56)] = -0.92
BIND['%s-%s' % (dbse, 57)] = -1.55
BIND['%s-%s' % (dbse, 58)] = 0.70
BIND['%s-%s' % (dbse, 59)] = -1.71
BIND['%s-%s' % (dbse, 60)] = -1.30
BIND['%s-%s' % (dbse, 61)] = -0.70
BIND['%s-%s' % (dbse, 62)] = 1.00
BIND['%s-%s' % (dbse, 63)] = -4.50
BIND['%s-%s' % (dbse, 64)] = 1.40
BIND['%s-%s' % (dbse, 65)] = -4.80
BIND['%s-%s' % (dbse, 66)] = -0.10
BIND['%s-%s' % (dbse, 67)] = -3.00
BIND['%s-%s' % (dbse, 68)] = -5.20
BIND['%s-%s' % (dbse, 69)] = 0.80
BIND['%s-%s' % (dbse, 70)] = 3.10
BIND['%s-%s' % (dbse, 71)] = -19.02
BIND['%s-%s' % (dbse, 72)] = -20.35
BIND['%s-%s' % (dbse, 73)] = -12.30
BIND['%s-%s' % (dbse, 74)] = -14.57
BIND['%s-%s' % (dbse, 75)] = 2.45
BIND['%s-%s' % (dbse, 76)] = -3.85
BIND['%s-%s' % (dbse, 77)] = -8.88
BIND['%s-%s' % (dbse, 78)] = -9.92
BIND['%s-%s' % (dbse, 79)] = 0.32
BIND['%s-%s' % (dbse, 80)] = 0.64
BIND['%s-%s' % (dbse, 81)] = -0.98
BIND['%s-%s' % (dbse, 82)] = -9.10
BIND['%s-%s' % (dbse, 83)] = -9.11
BIND['%s-%s' % (dbse, 84)] = -8.27
BIND['%s-%s' % (dbse, 85)] = -9.43
BIND['%s-%s' % (dbse, 86)] = -7.43
BIND['%s-%s' % (dbse, 87)] = -8.80
BIND['%s-%s' % (dbse, 88)] = -9.11
BIND['%s-%s' % (dbse, 89)] = -8.58
BIND['%s-%s' % (dbse, 90)] = -12.67
BIND['%s-%s' % (dbse, 91)] = -10.22
BIND['%s-%s' % (dbse, 92)] = -11.38
BIND['%s-%s' % (dbse, 93)] = -10.02
BIND['%s-%s' % (dbse, 94)] = -9.79
BIND['%s-%s' % (dbse, 95)] = -10.60
BIND['%s-%s' % (dbse, 96)] = -10.42
BIND['%s-%s' % (dbse, 97)] = -7.46
BIND['%s-%s' % (dbse, 98)] = -12.09
BIND['%s-%s' % (dbse, 99)] = -3.54
BIND['%s-%s' % (dbse, 100)] = -1.62
BIND['%s-%s' % (dbse, 101)] = -6.06
BIND['%s-%s' % (dbse, 102)] = -4.18
BIND['%s-%s' % (dbse, 103)] = -10.80
BIND['%s-%s' % (dbse, 104)] = -7.88
BIND['%s-%s' % (dbse, 105)] = -9.14
BIND['%s-%s' % (dbse, 106)] = -4.69
BIND['%s-%s' % (dbse, 107)] = -7.58
BIND['%s-%s' % (dbse, 108)] = -6.07
BIND['%s-%s' % (dbse, 109)] = -5.67
BIND['%s-%s' % (dbse, 110)] = -4.96
BIND['%s-%s' % (dbse, 111)] = -4.96
BIND['%s-%s' % (dbse, 112)] = -5.44
BIND['%s-%s' % (dbse, 113)] = -6.64
BIND['%s-%s' % (dbse, 114)] = -6.07
BIND['%s-%s' % (dbse, 115)] = -6.25
BIND['%s-%s' % (dbse, 116)] = -3.86
BIND['%s-%s' % (dbse, 117)] = -8.10
BIND['%s-%s' % (dbse, 118)] = -7.90
BIND['%s-%s' % (dbse, 119)] = -6.70
BIND['%s-%s' % (dbse, 120)] = -6.20
BIND['%s-%s' % (dbse, 121)] = -7.70
BIND['%s-%s' % (dbse, 122)] = -6.50
BIND['%s-%s' % (dbse, 123)] = -12.40
BIND['%s-%s' % (dbse, 124)] = -11.60
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = 'HB-01 G...C WC'
TAGL['%s-%s-dimer' % (dbse, 1)] = 'G...C WC'
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = 'Cytosine from G...C WC'
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = 'Guanine from G...C WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = 'Cytosine from G...C WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = 'Guanine from G...C WC'
TAGL['%s-%s' % (dbse, 2)] = 'HB-02 mG...mC WC'
TAGL['%s-%s-dimer' % (dbse, 2)] = 'mG...mC WC'
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = 'methyl-Cytosine from mG...mC WC'
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = 'methyl-Guanine from mG...mC WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = 'methyl-Cytosine from mG...mC WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = 'methyl-Guanine from mG...mC WC'
TAGL['%s-%s' % (dbse, 3)] = 'HB-03 A...T WC'
TAGL['%s-%s-dimer' % (dbse, 3)] = 'A...T WC'
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = 'Adenine from A...T WC'
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = 'Thymine from A...T WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = 'Adenine from A...T WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = 'Thymine from A...T WC'
TAGL['%s-%s' % (dbse, 4)] = 'HB-04 mA...mT H'
TAGL['%s-%s-dimer' % (dbse, 4)] = 'mA...mT H'
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = 'methyl-Adenine from mA...mT H'
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = 'methyl-Thymine from mA...mT H'
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = 'methyl-Adenine from mA...mT H'
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = 'methyl-Thymine from mA...mT H'
TAGL['%s-%s' % (dbse, 5)] = 'HB-05 8oG...C WC pl'
TAGL['%s-%s-dimer' % (dbse, 5)] = '8oG...C WC pl'
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = '8-oxo-Guanine from 8oG...C WC pl'
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = 'Cytosine from 8oG...C WC pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = '8-oxo-Guanine from 8oG...C WC pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = 'Cytosine from 8oG...C WC pl'
TAGL['%s-%s' % (dbse, 6)] = 'HB-06 I...C WC pl'
TAGL['%s-%s-dimer' % (dbse, 6)] = 'I...C WC pl'
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = 'Cytosine from I...C WC pl'
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = 'Inosine from I...C WC pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = 'Cytosine from I...C WC pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = 'Inosine from I...C WC pl'
TAGL['%s-%s' % (dbse, 7)] = 'HB-07 G...U wobble'
TAGL['%s-%s-dimer' % (dbse, 7)] = 'G...U wobble'
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = 'Guanine from G...U wobble'
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = 'Uracil from G...U wobble'
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = 'Guanine from G...U wobble'
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = 'Uracil from G...U wobble'
TAGL['%s-%s' % (dbse, 8)] = 'HB-08 CCH+'
TAGL['%s-%s-dimer' % (dbse, 8)] = 'CCH+'
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = 'Cytosine from CCH+'
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = 'protonated-Cytosine from CCH+'
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = 'Cytosine from CCH+'
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = 'protonated-Cytosine from CCH+'
TAGL['%s-%s' % (dbse, 9)] = 'HB-09 U...U Calcutta pl'
TAGL['%s-%s-dimer' % (dbse, 9)] = 'U...U Calcutta pl'
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = 'Uracil from U...U Calcutta pl'
TAGL['%s-%s' % (dbse, 10)] = 'HB-10 U...U pl'
TAGL['%s-%s-dimer' % (dbse, 10)] = 'U...U pl'
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = 'Uracil from U...U pl'
TAGL['%s-%s' % (dbse, 11)] = 'HB-11 6tG...C WC pl'
TAGL['%s-%s-dimer' % (dbse, 11)] = '6tG...C WC pl'
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = 'Cytosine from 6tG...C WC pl'
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = '6-thio-Guanine from 6tG...C WC pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = 'Cytosine from 6tG...C WC pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = '6-thio-Guanine from 6tG...C WC pl'
TAGL['%s-%s' % (dbse, 12)] = 'HB-12 A...4tU WC'
TAGL['%s-%s-dimer' % (dbse, 12)] = 'A...4tU WC'
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = 'Adenine from A...4tU WC'
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = '4-thio-Uracil from A...4tU WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = 'Adenine from A...4tU WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = '4-thio-Uracil from A...4tU WC'
TAGL['%s-%s' % (dbse, 13)] = 'HB-13 2-aminoA...T'
TAGL['%s-%s-dimer' % (dbse, 13)] = '2-aminoA...T'
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = '2-amino-Adenine from 2-aminoA...T'
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = 'Thymine from 2-aminoA...T'
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = '2-amino-Adenine from 2-aminoA...T'
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = 'Thymine from 2-aminoA...T'
TAGL['%s-%s' % (dbse, 14)] = 'HB-14 2-aminoA...T pl'
TAGL['%s-%s-dimer' % (dbse, 14)] = '2-aminoA...T pl'
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = '2-amino-Adenine from 2-aminoA...T pl'
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = 'Thymine from 2-aminoA...T pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = '2-amino-Adenine from 2-aminoA...T pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = 'Thymine from 2-aminoA...T pl'
TAGL['%s-%s' % (dbse, 15)] = 'HB-15 A...F'
TAGL['%s-%s-dimer' % (dbse, 15)] = 'A...F'
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = 'Adenine from A...F'
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = 'difluorotoluene from A...F'
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = 'Adenine from A...F'
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = 'difluorotoluene from A...F'
TAGL['%s-%s' % (dbse, 16)] = 'HB-16 G...4tU'
TAGL['%s-%s-dimer' % (dbse, 16)] = 'G...4tU'
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = 'Guanine from G...4tU'
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = '4-thio-Uracil from G...4tU'
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = 'Guanine from G...4tU'
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = '4-thio-Uracil from G...4tU'
TAGL['%s-%s' % (dbse, 17)] = 'HB-17 G...2tU'
TAGL['%s-%s-dimer' % (dbse, 17)] = 'G...2tU'
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = 'Guanine from G...2tU'
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = '2-thio-Uracil from G...2tU'
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = 'Guanine from G...2tU'
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = '2-thio-Uracil from G...2tU'
TAGL['%s-%s' % (dbse, 18)] = 'HB-18 A...C pl'
TAGL['%s-%s-dimer' % (dbse, 18)] = 'A...C pl'
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = 'Cytosine from A...C pl'
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = 'Adenine from A...C pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = 'Cytosine from A...C pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = 'Adenine from A...C pl'
TAGL['%s-%s' % (dbse, 19)] = 'HB-19 G...G pl'
TAGL['%s-%s-dimer' % (dbse, 19)] = 'G...G pl'
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = 'Guanine from G...G pl'
TAGL['%s-%s' % (dbse, 20)] = 'HB-20 G...6tG pl'
TAGL['%s-%s-dimer' % (dbse, 20)] = 'G...6tG pl'
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = 'Guanine from G...6tG pl'
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = '6-thio-Guanine from G...6tG pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = 'Guanine from G...6tG pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = '6-thio-Guanine from G...6tG pl'
TAGL['%s-%s' % (dbse, 21)] = 'HB-21 6tG...G pl'
TAGL['%s-%s-dimer' % (dbse, 21)] = '6tG...G pl'
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = '6-thio-Guanine from 6tG...G pl'
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = 'Guanine from 6tG...G pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = '6-thio-Guanine from 6tG...G pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = 'Guanine from 6tG...G pl'
TAGL['%s-%s' % (dbse, 22)] = 'HB-22 G...A 1'
TAGL['%s-%s-dimer' % (dbse, 22)] = 'G...A 1'
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = 'Guanine from G...A 1'
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = 'Adenine from G...A 1'
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = 'Guanine from G...A 1'
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = 'Adenine from G...A 1'
TAGL['%s-%s' % (dbse, 23)] = 'HB-23 G...A 1 pl'
TAGL['%s-%s-dimer' % (dbse, 23)] = 'G...A 1 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 23)] = 'Adenine from G...A 1 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 23)] = 'Guanine from G...A 1 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 23)] = 'Adenine from G...A 1 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 23)] = 'Guanine from G...A 1 pl'
TAGL['%s-%s' % (dbse, 24)] = 'HB-24 G...A 2'
TAGL['%s-%s-dimer' % (dbse, 24)] = 'G...A 2'
TAGL['%s-%s-monoA-CP' % (dbse, 24)] = 'Guanine from G...A 2'
TAGL['%s-%s-monoB-CP' % (dbse, 24)] = 'Adenine from G...A 2'
TAGL['%s-%s-monoA-unCP' % (dbse, 24)] = 'Guanine from G...A 2'
TAGL['%s-%s-monoB-unCP' % (dbse, 24)] = 'Adenine from G...A 2'
TAGL['%s-%s' % (dbse, 25)] = 'HB-25 G...A 2 pl'
TAGL['%s-%s-dimer' % (dbse, 25)] = 'G...A 2 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 25)] = 'Guanine from G...A 2 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 25)] = 'Adenine from G...A 2 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 25)] = 'Guanine from G...A 2 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 25)] = 'Adenine from G...A 2 pl'
TAGL['%s-%s' % (dbse, 26)] = 'HB-26 G...A 3'
TAGL['%s-%s-dimer' % (dbse, 26)] = 'G...A 3'
TAGL['%s-%s-monoA-CP' % (dbse, 26)] = 'Guanine from G...A 3'
TAGL['%s-%s-monoB-CP' % (dbse, 26)] = 'Adenine from G...A 3'
TAGL['%s-%s-monoA-unCP' % (dbse, 26)] = 'Guanine from G...A 3'
TAGL['%s-%s-monoB-unCP' % (dbse, 26)] = 'Adenine from G...A 3'
TAGL['%s-%s' % (dbse, 27)] = 'HB-27 G...A 4'
TAGL['%s-%s-dimer' % (dbse, 27)] = 'G...A 4'
TAGL['%s-%s-monoA-CP' % (dbse, 27)] = 'Guanine from G...A 4'
TAGL['%s-%s-monoB-CP' % (dbse, 27)] = 'Adenine from G...A 4'
TAGL['%s-%s-monoA-unCP' % (dbse, 27)] = 'Guanine from G...A 4'
TAGL['%s-%s-monoB-unCP' % (dbse, 27)] = 'Adenine from G...A 4'
TAGL['%s-%s' % (dbse, 28)] = 'HB-28 A...A 1 pl'
TAGL['%s-%s-dimer' % (dbse, 28)] = 'A...A 1 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 28)] = 'Adenine from A...A 1 pl'
TAGL['%s-%s' % (dbse, 29)] = 'HB-29 A...A 2 pl'
TAGL['%s-%s-dimer' % (dbse, 29)] = 'A...A 2 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 29)] = 'Adenine from A...A 2 pl'
TAGL['%s-%s' % (dbse, 30)] = 'HB-30 A...A 3 pl'
TAGL['%s-%s-dimer' % (dbse, 30)] = 'A...A 3 pl'
TAGL['%s-%s-monoA-CP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s-monoB-CP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 30)] = 'Adenine from A...A 3 pl'
TAGL['%s-%s' % (dbse, 31)] = 'HB-31 8oG...G'
TAGL['%s-%s-dimer' % (dbse, 31)] = '8oG...G'
TAGL['%s-%s-monoA-CP' % (dbse, 31)] = 'Guanine from 8oG...G'
TAGL['%s-%s-monoB-CP' % (dbse, 31)] = '8-oxo-Guanine from 8oG...G'
TAGL['%s-%s-monoA-unCP' % (dbse, 31)] = 'Guanine from 8oG...G'
TAGL['%s-%s-monoB-unCP' % (dbse, 31)] = '8-oxo-Guanine from 8oG...G'
TAGL['%s-%s' % (dbse, 32)] = 'HB-32 2tU....2tU pl'
TAGL['%s-%s-dimer' % (dbse, 32)] = '2tU....2tU pl'
TAGL['%s-%s-monoA-CP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s-monoB-CP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s-monoA-unCP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s-monoB-unCP' % (dbse, 32)] = '2-thio-Uracil from 2tU....2tU pl'
TAGL['%s-%s' % (dbse, 33)] = 'HB-33 A...T WC'
TAGL['%s-%s-dimer' % (dbse, 33)] = 'A...T WC'
TAGL['%s-%s-monoA-CP' % (dbse, 33)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-CP' % (dbse, 33)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 33)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 33)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s' % (dbse, 34)] = 'HB-34 G...C WC'
TAGL['%s-%s-dimer' % (dbse, 34)] = 'G...C WC'
TAGL['%s-%s-monoA-CP' % (dbse, 34)] = 'methyl-Cytosine from G...C WC'
TAGL['%s-%s-monoB-CP' % (dbse, 34)] = 'methyl-Guanine from G...C WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 34)] = 'methyl-Cytosine from G...C WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 34)] = 'methyl-Guanine from G...C WC'
TAGL['%s-%s' % (dbse, 35)] = 'HB-35 A...T WC'
TAGL['%s-%s-dimer' % (dbse, 35)] = 'A...T WC'
TAGL['%s-%s-monoA-CP' % (dbse, 35)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-CP' % (dbse, 35)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 35)] = 'methyl-Adenine from A...T WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 35)] = 'methyl-Thymine from A...T WC'
TAGL['%s-%s' % (dbse, 36)] = 'HB-36 G...A HB'
TAGL['%s-%s-dimer' % (dbse, 36)] = 'G...A HB'
TAGL['%s-%s-monoA-CP' % (dbse, 36)] = 'Guanine from G...A HB'
TAGL['%s-%s-monoB-CP' % (dbse, 36)] = 'Adenine from G...A HB'
TAGL['%s-%s-monoA-unCP' % (dbse, 36)] = 'Guanine from G...A HB'
TAGL['%s-%s-monoB-unCP' % (dbse, 36)] = 'Adenine from G...A HB'
TAGL['%s-%s' % (dbse, 37)] = 'HB-37 C...G WC'
TAGL['%s-%s-dimer' % (dbse, 37)] = 'C...G WC'
TAGL['%s-%s-monoA-CP' % (dbse, 37)] = 'Cytosine from C...G WC'
TAGL['%s-%s-monoB-CP' % (dbse, 37)] = 'Guanine from C...G WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 37)] = 'Cytosine from C...G WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 37)] = 'Guanine from C...G WC'
TAGL['%s-%s' % (dbse, 38)] = 'HB-38 G...C WC'
TAGL['%s-%s-dimer' % (dbse, 38)] = 'G...C WC'
TAGL['%s-%s-monoA-CP' % (dbse, 38)] = 'Guanine from G...C WC'
TAGL['%s-%s-monoB-CP' % (dbse, 38)] = 'Cytosine from G...C WC'
TAGL['%s-%s-monoA-unCP' % (dbse, 38)] = 'Guanine from G...C WC'
TAGL['%s-%s-monoB-unCP' % (dbse, 38)] = 'Cytosine from G...C WC'
TAGL['%s-%s' % (dbse, 39)] = 'IS-01 GG0/3.36 CGis036'
TAGL['%s-%s-dimer' % (dbse, 39)] = 'GG0/3.36 CGis036'
TAGL['%s-%s-monoA-CP' % (dbse, 39)] = 'Guanine from GG0/3.36 CGis036'
TAGL['%s-%s-monoB-CP' % (dbse, 39)] = 'Cytosine from GG0/3.36 CGis036'
TAGL['%s-%s-monoA-unCP' % (dbse, 39)] = 'Guanine from GG0/3.36 CGis036'
TAGL['%s-%s-monoB-unCP' % (dbse, 39)] = 'Cytosine from GG0/3.36 CGis036'
TAGL['%s-%s' % (dbse, 40)] = 'IS-02 GG0/3.36 GCis036'
TAGL['%s-%s-dimer' % (dbse, 40)] = 'GG0/3.36 GCis036'
TAGL['%s-%s-monoA-CP' % (dbse, 40)] = 'Cytosine from GG0/3.36 GCis036'
TAGL['%s-%s-monoB-CP' % (dbse, 40)] = 'Guanine from GG0/3.36 GCis036'
TAGL['%s-%s-monoA-unCP' % (dbse, 40)] = 'Cytosine from GG0/3.36 GCis036'
TAGL['%s-%s-monoB-unCP' % (dbse, 40)] = 'Guanine from GG0/3.36 GCis036'
TAGL['%s-%s' % (dbse, 41)] = 'IS-03 AA20/3.05 ATis2005'
TAGL['%s-%s-dimer' % (dbse, 41)] = 'AA20/3.05 ATis2005'
TAGL['%s-%s-monoA-CP' % (dbse, 41)] = 'Adenine from AA20/3.05 ATis2005'
TAGL['%s-%s-monoB-CP' % (dbse, 41)] = 'Thymine from AA20/3.05 ATis2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 41)] = 'Adenine from AA20/3.05 ATis2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 41)] = 'Thymine from AA20/3.05 ATis2005'
TAGL['%s-%s' % (dbse, 42)] = 'IS-04 AA20/3.05 TAis2005'
TAGL['%s-%s-dimer' % (dbse, 42)] = 'AA20/3.05 TAis2005'
TAGL['%s-%s-monoA-CP' % (dbse, 42)] = 'Thymine from AA20/3.05 TAis2005'
TAGL['%s-%s-monoB-CP' % (dbse, 42)] = 'Adenine from AA20/3.05 TAis2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 42)] = 'Thymine from AA20/3.05 TAis2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 42)] = 'Adenine from AA20/3.05 TAis2005'
TAGL['%s-%s' % (dbse, 43)] = 'IS-05 GC0/3.25 C//Cis'
TAGL['%s-%s-dimer' % (dbse, 43)] = 'GC0/3.25 C//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 43)] = 'Cytosine from GC0/3.25 C//Cis'
TAGL['%s-%s' % (dbse, 44)] = 'IS-06 GC0/3.25 G//Gis'
TAGL['%s-%s-dimer' % (dbse, 44)] = 'GC0/3.25 G//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 44)] = 'Guanine from GC0/3.25 G//Gis'
TAGL['%s-%s' % (dbse, 45)] = 'IS-07 CG0/3.19 G//Gis'
TAGL['%s-%s-dimer' % (dbse, 45)] = 'CG0/3.19 G//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 45)] = 'Guanine from CG0/3.19 G//Gis'
TAGL['%s-%s' % (dbse, 46)] = 'IS-08 CG0/3.19 C//Cis'
TAGL['%s-%s-dimer' % (dbse, 46)] = 'CG0/3.19 C//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 46)] = 'Cytosine from CG0/3.19 C//Cis'
TAGL['%s-%s' % (dbse, 47)] = 'IS-09 GA10/3.15 A//Cis'
TAGL['%s-%s-dimer' % (dbse, 47)] = 'GA10/3.15 A//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 47)] = 'Adenine from GA10/3.15 A//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 47)] = 'Cytosine from GA10/3.15 A//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 47)] = 'Adenine from GA10/3.15 A//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 47)] = 'Cytosine from GA10/3.15 A//Cis'
TAGL['%s-%s' % (dbse, 48)] = 'IS-10 GA10/3.15 T//Gis'
TAGL['%s-%s-dimer' % (dbse, 48)] = 'GA10/3.15 T//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 48)] = 'Thymine from GA10/3.15 T//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 48)] = 'Guanine from GA10/3.15 T//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 48)] = 'Thymine from GA10/3.15 T//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 48)] = 'Guanine from GA10/3.15 T//Gis'
TAGL['%s-%s' % (dbse, 49)] = 'IS-11 AG08/3.19 T//Gis'
TAGL['%s-%s-dimer' % (dbse, 49)] = 'AG08/3.19 T//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 49)] = 'Guanine from AG08/3.19 T//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 49)] = 'Thymine from AG08/3.19 T//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 49)] = 'Guanine from AG08/3.19 T//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 49)] = 'Thymine from AG08/3.19 T//Gis'
TAGL['%s-%s' % (dbse, 50)] = 'IS-12 AG08/3.19 A//Cis'
TAGL['%s-%s-dimer' % (dbse, 50)] = 'AG08/3.19 A//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 50)] = 'Adenine from AG08/3.19 A//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 50)] = 'Cytosine from AG08/3.19 A//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 50)] = 'Adenine from AG08/3.19 A//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 50)] = 'Cytosine from AG08/3.19 A//Cis'
TAGL['%s-%s' % (dbse, 51)] = 'IS-13 TG03.19 A//Gis'
TAGL['%s-%s-dimer' % (dbse, 51)] = 'TG03.19 A//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 51)] = 'Adenine from TG03.19 A//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 51)] = 'Guanine from TG03.19 A//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 51)] = 'Adenine from TG03.19 A//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 51)] = 'Guanine from TG03.19 A//Gis'
TAGL['%s-%s' % (dbse, 52)] = 'IS-14 TG03.19 T//Cis'
TAGL['%s-%s-dimer' % (dbse, 52)] = 'TG03.19 T//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 52)] = 'Thymine from TG03.19 T//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 52)] = 'Cytosine from TG03.19 T//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 52)] = 'Thymine from TG03.19 T//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 52)] = 'Cytosine from TG03.19 T//Cis'
TAGL['%s-%s' % (dbse, 53)] = 'IS-15 GT10/3.15 T//Cis'
TAGL['%s-%s-dimer' % (dbse, 53)] = 'GT10/3.15 T//Cis'
TAGL['%s-%s-monoA-CP' % (dbse, 53)] = 'Thymine from GT10/3.15 T//Cis'
TAGL['%s-%s-monoB-CP' % (dbse, 53)] = 'Cytosine from GT10/3.15 T//Cis'
TAGL['%s-%s-monoA-unCP' % (dbse, 53)] = 'Thymine from GT10/3.15 T//Cis'
TAGL['%s-%s-monoB-unCP' % (dbse, 53)] = 'Cytosine from GT10/3.15 T//Cis'
TAGL['%s-%s' % (dbse, 54)] = 'IS-16 GT10/3.15 A//Gis'
TAGL['%s-%s-dimer' % (dbse, 54)] = 'GT10/3.15 A//Gis'
TAGL['%s-%s-monoA-CP' % (dbse, 54)] = 'Adenine from GT10/3.15 A//Gis'
TAGL['%s-%s-monoB-CP' % (dbse, 54)] = 'Guanine from GT10/3.15 A//Gis'
TAGL['%s-%s-monoA-unCP' % (dbse, 54)] = 'Adenine from GT10/3.15 A//Gis'
TAGL['%s-%s-monoB-unCP' % (dbse, 54)] = 'Guanine from GT10/3.15 A//Gis'
TAGL['%s-%s' % (dbse, 55)] = 'IS-17 AT10/3.26 T//Tis'
TAGL['%s-%s-dimer' % (dbse, 55)] = 'AT10/3.26 T//Tis'
TAGL['%s-%s-monoA-CP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s-monoB-CP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s-monoA-unCP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s-monoB-unCP' % (dbse, 55)] = 'Thymine from AT10/3.26 T//Tis'
TAGL['%s-%s' % (dbse, 56)] = 'IS-18 AT10/3.26 A//Ais'
TAGL['%s-%s-dimer' % (dbse, 56)] = 'AT10/3.26 A//Ais'
TAGL['%s-%s-monoA-CP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s-monoB-CP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s-monoA-unCP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s-monoB-unCP' % (dbse, 56)] = 'Adenine from AT10/3.26 A//Ais'
TAGL['%s-%s' % (dbse, 57)] = 'IS-19 TA08/3.16 A//Ais'
TAGL['%s-%s-dimer' % (dbse, 57)] = 'TA08/3.16 A//Ais'
TAGL['%s-%s-monoA-CP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s-monoB-CP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s-monoA-unCP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s-monoB-unCP' % (dbse, 57)] = 'Adenine from TA08/3.16 A//Ais'
TAGL['%s-%s' % (dbse, 58)] = 'IS-20 TA08/3.16 T//Tis'
TAGL['%s-%s-dimer' % (dbse, 58)] = 'TA08/3.16 T//Tis'
TAGL['%s-%s-monoA-CP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s-monoB-CP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s-monoA-unCP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s-monoB-unCP' % (dbse, 58)] = 'Thymine from TA08/3.16 T//Tis'
TAGL['%s-%s' % (dbse, 59)] = 'IS-21 AA0/3.24 A//Tis'
TAGL['%s-%s-dimer' % (dbse, 59)] = 'AA0/3.24 A//Tis'
TAGL['%s-%s-monoA-CP' % (dbse, 59)] = 'Adenine from AA0/3.24 A//Tis'
TAGL['%s-%s-monoB-CP' % (dbse, 59)] = 'Thymine from AA0/3.24 A//Tis'
TAGL['%s-%s-monoA-unCP' % (dbse, 59)] = 'Adenine from AA0/3.24 A//Tis'
TAGL['%s-%s-monoB-unCP' % (dbse, 59)] = 'Thymine from AA0/3.24 A//Tis'
TAGL['%s-%s' % (dbse, 60)] = 'IS-22 AA0/3.24 T//Ais'
TAGL['%s-%s-dimer' % (dbse, 60)] = 'AA0/3.24 T//Ais'
TAGL['%s-%s-monoA-CP' % (dbse, 60)] = 'Adenine from AA0/3.24 T//Ais'
TAGL['%s-%s-monoB-CP' % (dbse, 60)] = 'Thymine from AA0/3.24 T//Ais'
TAGL['%s-%s-monoA-unCP' % (dbse, 60)] = 'Adenine from AA0/3.24 T//Ais'
TAGL['%s-%s-monoB-unCP' % (dbse, 60)] = 'Thymine from AA0/3.24 T//Ais'
TAGL['%s-%s' % (dbse, 61)] = 'IS-23 A...A IS'
TAGL['%s-%s-dimer' % (dbse, 61)] = 'A...A IS'
TAGL['%s-%s-monoA-CP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s-monoB-CP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 61)] = 'methyl-Adenine from A...A IS'
TAGL['%s-%s' % (dbse, 62)] = 'IS-24 T...T IS'
TAGL['%s-%s-dimer' % (dbse, 62)] = 'T...T IS'
TAGL['%s-%s-monoA-CP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s-monoB-CP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 62)] = 'methyl-Thymine from T...T IS'
TAGL['%s-%s' % (dbse, 63)] = 'IS-25 G...G IS'
TAGL['%s-%s-dimer' % (dbse, 63)] = 'G...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 63)] = 'methyl-Guanine from G...G IS'
TAGL['%s-%s' % (dbse, 64)] = 'IS-26 C...C IS'
TAGL['%s-%s-dimer' % (dbse, 64)] = 'C...C IS'
TAGL['%s-%s-monoA-CP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s-monoB-CP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 64)] = 'methyl-Cytosine from C...C IS'
TAGL['%s-%s' % (dbse, 65)] = 'IS-27 A...G IS'
TAGL['%s-%s-dimer' % (dbse, 65)] = 'A...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 65)] = 'methyl-Adenine from A...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 65)] = 'methyl-Guanine from A...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 65)] = 'methyl-Adenine from A...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 65)] = 'methyl-Guanine from A...G IS'
TAGL['%s-%s' % (dbse, 66)] = 'IS-28 T...C IS'
TAGL['%s-%s-dimer' % (dbse, 66)] = 'T...C IS'
TAGL['%s-%s-monoA-CP' % (dbse, 66)] = 'methyl-Cytosine from T...C IS'
TAGL['%s-%s-monoB-CP' % (dbse, 66)] = 'methyl-Thymine from T...C IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 66)] = 'methyl-Cytosine from T...C IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 66)] = 'methyl-Thymine from T...C IS'
TAGL['%s-%s' % (dbse, 67)] = 'IS-29 C...A IS'
TAGL['%s-%s-dimer' % (dbse, 67)] = 'C...A IS'
TAGL['%s-%s-monoA-CP' % (dbse, 67)] = 'Cytosine from C...A IS'
TAGL['%s-%s-monoB-CP' % (dbse, 67)] = 'Adenine from C...A IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 67)] = 'Cytosine from C...A IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 67)] = 'Adenine from C...A IS'
TAGL['%s-%s' % (dbse, 68)] = 'IS-30 G...G IS'
TAGL['%s-%s-dimer' % (dbse, 68)] = 'G...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 68)] = 'Guanine from G...G IS'
TAGL['%s-%s' % (dbse, 69)] = 'IS-31 G...G IS'
TAGL['%s-%s-dimer' % (dbse, 69)] = 'G...G IS'
TAGL['%s-%s-monoA-CP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-CP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 69)] = 'Guanine from G...G IS'
TAGL['%s-%s' % (dbse, 70)] = 'IS-32 C...C IS'
TAGL['%s-%s-dimer' % (dbse, 70)] = 'C...C IS'
TAGL['%s-%s-monoA-CP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s-monoB-CP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s-monoA-unCP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s-monoB-unCP' % (dbse, 70)] = 'Cytosine from C...C IS'
TAGL['%s-%s' % (dbse, 71)] = 'ST-01 G...C S'
TAGL['%s-%s-dimer' % (dbse, 71)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 71)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 71)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 71)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 71)] = 'Cytosine from G...C S'
TAGL['%s-%s' % (dbse, 72)] = 'ST-02 mG...mC S'
TAGL['%s-%s-dimer' % (dbse, 72)] = 'mG...mC S'
TAGL['%s-%s-monoA-CP' % (dbse, 72)] = 'methyl-Guanine from mG...mC S'
TAGL['%s-%s-monoB-CP' % (dbse, 72)] = 'methyl-Cytosine from mG...mC S'
TAGL['%s-%s-monoA-unCP' % (dbse, 72)] = 'methyl-Guanine from mG...mC S'
TAGL['%s-%s-monoB-unCP' % (dbse, 72)] = 'methyl-Cytosine from mG...mC S'
TAGL['%s-%s' % (dbse, 73)] = 'ST-03 A...T S'
TAGL['%s-%s-dimer' % (dbse, 73)] = 'A...T S'
TAGL['%s-%s-monoA-CP' % (dbse, 73)] = 'Adenine from A...T S'
TAGL['%s-%s-monoB-CP' % (dbse, 73)] = 'Thymine from A...T S'
TAGL['%s-%s-monoA-unCP' % (dbse, 73)] = 'Adenine from A...T S'
TAGL['%s-%s-monoB-unCP' % (dbse, 73)] = 'Thymine from A...T S'
TAGL['%s-%s' % (dbse, 74)] = 'ST-04 mA...mT S'
TAGL['%s-%s-dimer' % (dbse, 74)] = 'mA...mT S'
TAGL['%s-%s-monoA-CP' % (dbse, 74)] = 'methyl-Adenine from mA...mT S'
TAGL['%s-%s-monoB-CP' % (dbse, 74)] = 'methyl-Thymine from mA...mT S'
TAGL['%s-%s-monoA-unCP' % (dbse, 74)] = 'methyl-Adenine from mA...mT S'
TAGL['%s-%s-monoB-unCP' % (dbse, 74)] = 'methyl-Thymine from mA...mT S'
TAGL['%s-%s' % (dbse, 75)] = 'ST-05 CC1'
TAGL['%s-%s-dimer' % (dbse, 75)] = 'CC1'
TAGL['%s-%s-monoA-CP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s-monoB-CP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s-monoA-unCP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s-monoB-unCP' % (dbse, 75)] = 'Cytosine from CC1'
TAGL['%s-%s' % (dbse, 76)] = 'ST-06 CC2'
TAGL['%s-%s-dimer' % (dbse, 76)] = 'CC2'
TAGL['%s-%s-monoA-CP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s-monoB-CP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s-monoA-unCP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s-monoB-unCP' % (dbse, 76)] = 'Cytosine from CC2'
TAGL['%s-%s' % (dbse, 77)] = 'ST-07 CC3'
TAGL['%s-%s-dimer' % (dbse, 77)] = 'CC3'
TAGL['%s-%s-monoA-CP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s-monoB-CP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s-monoA-unCP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s-monoB-unCP' % (dbse, 77)] = 'Cytosine from CC3'
TAGL['%s-%s' % (dbse, 78)] = 'ST-08 CC4'
TAGL['%s-%s-dimer' % (dbse, 78)] = 'CC4'
TAGL['%s-%s-monoA-CP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s-monoB-CP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s-monoA-unCP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s-monoB-unCP' % (dbse, 78)] = 'Cytosine from CC4'
TAGL['%s-%s' % (dbse, 79)] = 'ST-09 CC5'
TAGL['%s-%s-dimer' % (dbse, 79)] = 'CC5'
TAGL['%s-%s-monoA-CP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s-monoB-CP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s-monoA-unCP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s-monoB-unCP' % (dbse, 79)] = 'Cytosine from CC5'
TAGL['%s-%s' % (dbse, 80)] = 'ST-10 CC6'
TAGL['%s-%s-dimer' % (dbse, 80)] = 'CC6'
TAGL['%s-%s-monoA-CP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s-monoB-CP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s-monoA-unCP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s-monoB-unCP' % (dbse, 80)] = 'Cytosine from CC6'
TAGL['%s-%s' % (dbse, 81)] = 'ST-11 CC7'
TAGL['%s-%s-dimer' % (dbse, 81)] = 'CC7'
TAGL['%s-%s-monoA-CP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s-monoB-CP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s-monoA-unCP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s-monoB-unCP' % (dbse, 81)] = 'Cytosine from CC7'
TAGL['%s-%s' % (dbse, 82)] = 'ST-12 CC8'
TAGL['%s-%s-dimer' % (dbse, 82)] = 'CC8'
TAGL['%s-%s-monoA-CP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s-monoB-CP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s-monoA-unCP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s-monoB-unCP' % (dbse, 82)] = 'Cytosine from CC8'
TAGL['%s-%s' % (dbse, 83)] = 'ST-13 CC9'
TAGL['%s-%s-dimer' % (dbse, 83)] = 'CC9'
TAGL['%s-%s-monoA-CP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s-monoB-CP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s-monoA-unCP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s-monoB-unCP' % (dbse, 83)] = 'Cytosine from CC9'
TAGL['%s-%s' % (dbse, 84)] = 'ST-14 CC10'
TAGL['%s-%s-dimer' % (dbse, 84)] = 'CC10'
TAGL['%s-%s-monoA-CP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s-monoB-CP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s-monoA-unCP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s-monoB-unCP' % (dbse, 84)] = 'Cytosine from CC10'
TAGL['%s-%s' % (dbse, 85)] = 'ST-15 CC11'
TAGL['%s-%s-dimer' % (dbse, 85)] = 'CC11'
TAGL['%s-%s-monoA-CP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s-monoB-CP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s-monoA-unCP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s-monoB-unCP' % (dbse, 85)] = 'Cytosine from CC11'
TAGL['%s-%s' % (dbse, 86)] = 'ST-16 CC12'
TAGL['%s-%s-dimer' % (dbse, 86)] = 'CC12'
TAGL['%s-%s-monoA-CP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s-monoB-CP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s-monoA-unCP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s-monoB-unCP' % (dbse, 86)] = 'Cytosine from CC12'
TAGL['%s-%s' % (dbse, 87)] = 'ST-17 CC13'
TAGL['%s-%s-dimer' % (dbse, 87)] = 'CC13'
TAGL['%s-%s-monoA-CP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s-monoB-CP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s-monoA-unCP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s-monoB-unCP' % (dbse, 87)] = 'Cytosine from CC13'
TAGL['%s-%s' % (dbse, 88)] = 'ST-18 CC14'
TAGL['%s-%s-dimer' % (dbse, 88)] = 'CC14'
TAGL['%s-%s-monoA-CP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s-monoB-CP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s-monoA-unCP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s-monoB-unCP' % (dbse, 88)] = 'Cytosine from CC14'
TAGL['%s-%s' % (dbse, 89)] = 'ST-19 AAst'
TAGL['%s-%s-dimer' % (dbse, 89)] = 'AAst'
TAGL['%s-%s-monoA-CP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s-monoB-CP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s-monoA-unCP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s-monoB-unCP' % (dbse, 89)] = 'Adenine from AAst'
TAGL['%s-%s' % (dbse, 90)] = 'ST-20 GGst'
TAGL['%s-%s-dimer' % (dbse, 90)] = 'GGst'
TAGL['%s-%s-monoA-CP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s-monoB-CP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s-monoA-unCP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s-monoB-unCP' % (dbse, 90)] = 'Guanine from GGst'
TAGL['%s-%s' % (dbse, 91)] = 'ST-21 ACst'
TAGL['%s-%s-dimer' % (dbse, 91)] = 'ACst'
TAGL['%s-%s-monoA-CP' % (dbse, 91)] = 'Adenine from ACst'
TAGL['%s-%s-monoB-CP' % (dbse, 91)] = 'Cytosine from ACst'
TAGL['%s-%s-monoA-unCP' % (dbse, 91)] = 'Adenine from ACst'
TAGL['%s-%s-monoB-unCP' % (dbse, 91)] = 'Cytosine from ACst'
TAGL['%s-%s' % (dbse, 92)] = 'ST-22 GAst'
TAGL['%s-%s-dimer' % (dbse, 92)] = 'GAst'
TAGL['%s-%s-monoA-CP' % (dbse, 92)] = 'Guanine from GAst'
TAGL['%s-%s-monoB-CP' % (dbse, 92)] = 'Adenine from GAst'
TAGL['%s-%s-monoA-unCP' % (dbse, 92)] = 'Guanine from GAst'
TAGL['%s-%s-monoB-unCP' % (dbse, 92)] = 'Adenine from GAst'
TAGL['%s-%s' % (dbse, 93)] = 'ST-23 CCst'
TAGL['%s-%s-dimer' % (dbse, 93)] = 'CCst'
TAGL['%s-%s-monoA-CP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s-monoB-CP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s-monoA-unCP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s-monoB-unCP' % (dbse, 93)] = 'Cytosine from CCst'
TAGL['%s-%s' % (dbse, 94)] = 'ST-24 AUst'
TAGL['%s-%s-dimer' % (dbse, 94)] = 'AUst'
TAGL['%s-%s-monoA-CP' % (dbse, 94)] = 'Adenine from AUst'
TAGL['%s-%s-monoB-CP' % (dbse, 94)] = 'Uracil from AUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 94)] = 'Adenine from AUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 94)] = 'Uracil from AUst'
TAGL['%s-%s' % (dbse, 95)] = 'ST-25 GCst'
TAGL['%s-%s-dimer' % (dbse, 95)] = 'GCst'
TAGL['%s-%s-monoA-CP' % (dbse, 95)] = 'Guanine from GCst'
TAGL['%s-%s-monoB-CP' % (dbse, 95)] = 'Cytosine from GCst'
TAGL['%s-%s-monoA-unCP' % (dbse, 95)] = 'Guanine from GCst'
TAGL['%s-%s-monoB-unCP' % (dbse, 95)] = 'Cytosine from GCst'
TAGL['%s-%s' % (dbse, 96)] = 'ST-26 CUst'
TAGL['%s-%s-dimer' % (dbse, 96)] = 'CUst'
TAGL['%s-%s-monoA-CP' % (dbse, 96)] = 'Cytosine from CUst'
TAGL['%s-%s-monoB-CP' % (dbse, 96)] = 'Uracil from CUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 96)] = 'Cytosine from CUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 96)] = 'Uracil from CUst'
TAGL['%s-%s' % (dbse, 97)] = 'ST-27 UUst'
TAGL['%s-%s-dimer' % (dbse, 97)] = 'UUst'
TAGL['%s-%s-monoA-CP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s-monoB-CP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 97)] = 'Uracil from UUst'
TAGL['%s-%s' % (dbse, 98)] = 'ST-28 GUst'
TAGL['%s-%s-dimer' % (dbse, 98)] = 'GUst'
TAGL['%s-%s-monoA-CP' % (dbse, 98)] = 'Guanine from GUst'
TAGL['%s-%s-monoB-CP' % (dbse, 98)] = 'Uracil from GUst'
TAGL['%s-%s-monoA-unCP' % (dbse, 98)] = 'Guanine from GUst'
TAGL['%s-%s-monoB-unCP' % (dbse, 98)] = 'Uracil from GUst'
TAGL['%s-%s' % (dbse, 99)] = 'ST-29 GG0/3.36 GGs036'
TAGL['%s-%s-dimer' % (dbse, 99)] = 'GGs036'
TAGL['%s-%s-monoA-CP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s-monoB-CP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s-monoA-unCP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s-monoB-unCP' % (dbse, 99)] = 'Guanine from GGs036'
TAGL['%s-%s' % (dbse, 100)] = 'ST-30 GG0/3.36 CCs036'
TAGL['%s-%s-dimer' % (dbse, 100)] = 'CCs036'
TAGL['%s-%s-monoA-CP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s-monoB-CP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s-monoA-unCP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s-monoB-unCP' % (dbse, 100)] = 'Cytosine from CCs036'
TAGL['%s-%s' % (dbse, 101)] = 'ST-31 AA20/3.05 AAs2005'
TAGL['%s-%s-dimer' % (dbse, 101)] = 'AAs2005'
TAGL['%s-%s-monoA-CP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s-monoB-CP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 101)] = 'Adenine from AAs2005'
TAGL['%s-%s' % (dbse, 102)] = 'ST-32 AA20/3.05 TTs2005'
TAGL['%s-%s-dimer' % (dbse, 102)] = 'TTs2005'
TAGL['%s-%s-monoA-CP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s-monoB-CP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s-monoA-unCP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s-monoB-unCP' % (dbse, 102)] = 'Thymine from TTs2005'
TAGL['%s-%s' % (dbse, 103)] = 'ST-33 GC0/3.25 G//Cs'
TAGL['%s-%s-dimer' % (dbse, 103)] = 'GC0/3.25 G//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 103)] = 'Cytosine from GC0/3.25 G//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 103)] = 'Guanine from GC0/3.25 G//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 103)] = 'Cytosine from GC0/3.25 G//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 103)] = 'Guanine from GC0/3.25 G//Cs'
TAGL['%s-%s' % (dbse, 104)] = 'ST-34 CG0/3.19 G//Cs'
TAGL['%s-%s-dimer' % (dbse, 104)] = 'CG0/3.19 G//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 104)] = 'Cytosine from CG0/3.19 G//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 104)] = 'Guanine from CG0/3.19 G//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 104)] = 'Cytosine from CG0/3.19 G//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 104)] = 'Guanine from CG0/3.19 G//Cs'
TAGL['%s-%s' % (dbse, 105)] = 'ST-35 GA10/3.15 A//Gs'
TAGL['%s-%s-dimer' % (dbse, 105)] = 'GA10/3.15 A//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 105)] = 'Adenine from GA10/3.15 A//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 105)] = 'Guanine from GA10/3.15 A//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 105)] = 'Adenine from GA10/3.15 A//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 105)] = 'Guanine from GA10/3.15 A//Gs'
TAGL['%s-%s' % (dbse, 106)] = 'ST-36 GA10/3.15 T//Cs'
TAGL['%s-%s-dimer' % (dbse, 106)] = 'GA10/3.15 T//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 106)] = 'Thymine from GA10/3.15 T//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 106)] = 'Cytosine from GA10/3.15 T//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 106)] = 'Thymine from GA10/3.15 T//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 106)] = 'Cytosine from GA10/3.15 T//Cs'
TAGL['%s-%s' % (dbse, 107)] = 'ST-37 AG08/3.19 A//Gs'
TAGL['%s-%s-dimer' % (dbse, 107)] = 'AG08/3.19 A//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 107)] = 'Adenine from AG08/3.19 A//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 107)] = 'Guanine from AG08/3.19 A//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 107)] = 'Adenine from AG08/3.19 A//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 107)] = 'Guanine from AG08/3.19 A//Gs'
TAGL['%s-%s' % (dbse, 108)] = 'ST-38 AG08/3.19 T//Cs'
TAGL['%s-%s-dimer' % (dbse, 108)] = 'AG08/3.19 T//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 108)] = 'Thymine from AG08/3.19 T//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 108)] = 'Cytosine from AG08/3.19 T//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 108)] = 'Thymine from AG08/3.19 T//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 108)] = 'Cytosine from AG08/3.19 T//Cs'
TAGL['%s-%s' % (dbse, 109)] = 'ST-39 TG03.19 T//Gs'
TAGL['%s-%s-dimer' % (dbse, 109)] = 'TG03.19 T//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 109)] = 'Thymine from TG03.19 T//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 109)] = 'Guanine from TG03.19 T//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 109)] = 'Thymine from TG03.19 T//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 109)] = 'Guanine from TG03.19 T//Gs'
TAGL['%s-%s' % (dbse, 110)] = 'ST-40 TG03.19 A//Cs'
TAGL['%s-%s-dimer' % (dbse, 110)] = 'TG03.19 A//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 110)] = 'Adenine from TG03.19 A//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 110)] = 'Cytosine from TG03.19 A//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 110)] = 'Adenine from TG03.19 A//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 110)] = 'Cytosine from TG03.19 A//Cs'
TAGL['%s-%s' % (dbse, 111)] = 'ST-41 GT10/3.15 T//Gs'
TAGL['%s-%s-dimer' % (dbse, 111)] = 'GT10/3.15 T//Gs'
TAGL['%s-%s-monoA-CP' % (dbse, 111)] = 'Thymine from GT10/3.15 T//Gs'
TAGL['%s-%s-monoB-CP' % (dbse, 111)] = 'Guanine from GT10/3.15 T//Gs'
TAGL['%s-%s-monoA-unCP' % (dbse, 111)] = 'Thymine from GT10/3.15 T//Gs'
TAGL['%s-%s-monoB-unCP' % (dbse, 111)] = 'Guanine from GT10/3.15 T//Gs'
TAGL['%s-%s' % (dbse, 112)] = 'ST-42 GT10/3.15 A//Cs'
TAGL['%s-%s-dimer' % (dbse, 112)] = 'GT10/3.15 A//Cs'
TAGL['%s-%s-monoA-CP' % (dbse, 112)] = 'Adenine from GT10/3.15 A//Cs'
TAGL['%s-%s-monoB-CP' % (dbse, 112)] = 'Cytosine from GT10/3.15 A//Cs'
TAGL['%s-%s-monoA-unCP' % (dbse, 112)] = 'Adenine from GT10/3.15 A//Cs'
TAGL['%s-%s-monoB-unCP' % (dbse, 112)] = 'Cytosine from GT10/3.15 A//Cs'
TAGL['%s-%s' % (dbse, 113)] = 'ST-43 AT10/3.26 A//Ts'
TAGL['%s-%s-dimer' % (dbse, 113)] = 'AT10/3.26 A//Ts'
TAGL['%s-%s-monoA-CP' % (dbse, 113)] = 'Adenine from AT10/3.26 A//Ts'
TAGL['%s-%s-monoB-CP' % (dbse, 113)] = 'Thymine from AT10/3.26 A//Ts'
TAGL['%s-%s-monoA-unCP' % (dbse, 113)] = 'Adenine from AT10/3.26 A//Ts'
TAGL['%s-%s-monoB-unCP' % (dbse, 113)] = 'Thymine from AT10/3.26 A//Ts'
TAGL['%s-%s' % (dbse, 114)] = 'ST-44 TA08/3.16 A//Ts'
TAGL['%s-%s-dimer' % (dbse, 114)] = 'TA08/3.16 A//Ts'
TAGL['%s-%s-monoA-CP' % (dbse, 114)] = 'Adenine from TA08/3.16 A//Ts'
TAGL['%s-%s-monoB-CP' % (dbse, 114)] = 'Thymine from TA08/3.16 A//Ts'
TAGL['%s-%s-monoA-unCP' % (dbse, 114)] = 'Adenine from TA08/3.16 A//Ts'
TAGL['%s-%s-monoB-unCP' % (dbse, 114)] = 'Thymine from TA08/3.16 A//Ts'
TAGL['%s-%s' % (dbse, 115)] = 'ST-45 AA0/3.24 A//As'
TAGL['%s-%s-dimer' % (dbse, 115)] = 'AA0/3.24 A//As'
TAGL['%s-%s-monoA-CP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s-monoB-CP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s-monoA-unCP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s-monoB-unCP' % (dbse, 115)] = 'Adenine from AA0/3.24 A//As'
TAGL['%s-%s' % (dbse, 116)] = 'ST-46 AA0/3.24 T//Ts'
TAGL['%s-%s-dimer' % (dbse, 116)] = 'AA0/3.24 T//Ts'
TAGL['%s-%s-monoA-CP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s-monoB-CP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s-monoA-unCP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s-monoB-unCP' % (dbse, 116)] = 'Thymine from AA0/3.24 T//Ts'
TAGL['%s-%s' % (dbse, 117)] = 'ST-47 A...T S'
TAGL['%s-%s-dimer' % (dbse, 117)] = 'A...T S'
TAGL['%s-%s-monoA-CP' % (dbse, 117)] = 'methyl-Adenine from A...T S'
TAGL['%s-%s-monoB-CP' % (dbse, 117)] = 'methyl-Thymine from A...T S'
TAGL['%s-%s-monoA-unCP' % (dbse, 117)] = 'methyl-Adenine from A...T S'
TAGL['%s-%s-monoB-unCP' % (dbse, 117)] = 'methyl-Thymine from A...T S'
TAGL['%s-%s' % (dbse, 118)] = 'ST-48 G...C S'
TAGL['%s-%s-dimer' % (dbse, 118)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 118)] = 'methyl-Cytosine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 118)] = 'methyl-Guanine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 118)] = 'methyl-Cytosine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 118)] = 'methyl-Guanine from G...C S'
TAGL['%s-%s' % (dbse, 119)] = 'ST-49 A...C S'
TAGL['%s-%s-dimer' % (dbse, 119)] = 'A...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 119)] = 'methyl-Adenine from A...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 119)] = 'methyl-Cytosine from A...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 119)] = 'methyl-Adenine from A...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 119)] = 'methyl-Cytosine from A...C S'
TAGL['%s-%s' % (dbse, 120)] = 'ST-50 T...G S'
TAGL['%s-%s-dimer' % (dbse, 120)] = 'T...G S'
TAGL['%s-%s-monoA-CP' % (dbse, 120)] = 'methyl-Thymine from T...G S'
TAGL['%s-%s-monoB-CP' % (dbse, 120)] = 'methyl-Guanine from T...G S'
TAGL['%s-%s-monoA-unCP' % (dbse, 120)] = 'methyl-Thymine from T...G S'
TAGL['%s-%s-monoB-unCP' % (dbse, 120)] = 'methyl-Guanine from T...G S'
TAGL['%s-%s' % (dbse, 121)] = 'ST-51 G...C S'
TAGL['%s-%s-dimer' % (dbse, 121)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 121)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 121)] = 'Guanine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 121)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 121)] = 'Guanine from G...C S'
TAGL['%s-%s' % (dbse, 122)] = 'ST-52 A...G S'
TAGL['%s-%s-dimer' % (dbse, 122)] = 'A...G S'
TAGL['%s-%s-monoA-CP' % (dbse, 122)] = 'Adenine from A...G S'
TAGL['%s-%s-monoB-CP' % (dbse, 122)] = 'Guanine from A...G S'
TAGL['%s-%s-monoA-unCP' % (dbse, 122)] = 'Adenine from A...G S'
TAGL['%s-%s-monoB-unCP' % (dbse, 122)] = 'Guanine from A...G S'
TAGL['%s-%s' % (dbse, 123)] = 'ST-53 C...G S'
TAGL['%s-%s-dimer' % (dbse, 123)] = 'C...G S'
TAGL['%s-%s-monoA-CP' % (dbse, 123)] = 'Guanine from C...G S'
TAGL['%s-%s-monoB-CP' % (dbse, 123)] = 'Cytosine from C...G S'
TAGL['%s-%s-monoA-unCP' % (dbse, 123)] = 'Guanine from C...G S'
TAGL['%s-%s-monoB-unCP' % (dbse, 123)] = 'Cytosine from C...G S'
TAGL['%s-%s' % (dbse, 124)] = 'ST-54 G...C S'
TAGL['%s-%s-dimer' % (dbse, 124)] = 'G...C S'
TAGL['%s-%s-monoA-CP' % (dbse, 124)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-CP' % (dbse, 124)] = 'Cytosine from G...C S'
TAGL['%s-%s-monoA-unCP' % (dbse, 124)] = 'Guanine from G...C S'
TAGL['%s-%s-monoB-unCP' % (dbse, 124)] = 'Cytosine from G...C S'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
C -1.0398599 -0.0950435 2.9628987
N -0.8760506 -0.1198953 4.3522101
C 0.3372729 -0.0573522 4.9526643
C 1.4603152 0.0294729 4.2021231
C 1.2876371 0.0522766 2.7771415
N 0.0866353 -0.0006919 2.2061593
O -2.1779850 -0.1592983 2.4996990
N 2.3517978 0.1313296 1.9777210
H -1.7254816 -0.1869061 4.8897274
H 0.3482118 -0.0833071 6.0321432
H 2.4345221 0.0778275 4.6597911
H 3.2714721 0.1534551 2.3764404
H 2.2350290 0.1077513 0.9551229
--
0 1
O 2.0171439 0.0263963 -0.7905108
C 0.9445057 0.0313388 -1.4013109
N -0.2671137 0.0963439 -0.7051367
C -1.5207327 0.1136461 -1.2552546
N -1.7528129 0.0544172 -2.5494108
C -0.6040129 -0.0113445 -3.2574879
C 0.7161247 -0.0244271 -2.8113172
N 1.6041685 -0.0981114 -3.8601422
C 0.8295480 -0.1292217 -4.9265187
N -0.5075993 -0.0802063 -4.6198760
N -2.5513427 0.2447649 -0.3850923
H -0.1820496 0.1041077 0.3219703
H 1.1760819 -0.1871623 -5.9443460
H -1.2844954 -0.0872596 -5.2590531
H -3.4573855 0.0691895 -0.7801319
H -2.4169221 0.0545062 0.6045745
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
C -0.8133331 -0.0866715 2.9789275
N -0.6596824 -0.0799211 4.3738426
C 0.5690989 0.0065483 4.9361002
C 1.6895413 0.0878038 4.1758957
C 1.5121802 0.0788250 2.7574552
N 0.3010370 0.0009434 2.2093349
O -1.9566795 -0.1721648 2.5197622
N 2.5630702 0.1536301 1.9395171
C -1.8703132 -0.1697062 5.1759469
H 0.5992246 0.0046040 6.0165438
H 2.6640151 0.1533452 4.6318609
H 3.4878053 0.1855364 2.3258249
H 2.4345725 0.0922431 0.9177005
H -2.4003892 -1.0888680 4.9431219
H -2.5250547 0.6677091 4.9519253
H -1.5896839 -0.1549564 6.2247983
--
0 1
O 2.2125806 -0.0495287 -0.7919145
C 1.1295844 -0.0091268 -1.3874888
N -0.0672022 0.0855128 -0.6713507
C -1.3285435 0.1453481 -1.2005422
N -1.5812601 0.1027260 -2.4911624
C -0.4460576 0.0088365 -3.2186883
C 0.8814078 -0.0466784 -2.7933188
N 1.7434051 -0.1366731 -3.8585399
C 0.9419745 -0.1354113 -4.9081560
N -0.3886567 -0.0498363 -4.5825778
N -2.3394192 0.3029765 -0.3108597
H 0.0331014 0.0898348 0.3565931
H 1.2604461 -0.1939844 -5.9363793
C -1.5258946 -0.0225882 -5.4753265
H -3.2566158 0.1569838 -0.6925288
H -2.1947491 0.0836423 0.6730029
H -2.1741480 -0.8737721 -5.2827842
H -2.0939825 0.8923400 -5.3277349
H -1.1628640 -0.0654072 -6.4980769
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
N 0.9350155 -0.0279801 -0.3788916
C 1.6739638 -0.0357766 0.7424316
C 3.0747955 -0.0094480 0.5994562
C 3.5646109 0.0195446 -0.7059872
N 2.8531510 0.0258031 -1.8409596
C 1.5490760 0.0012569 -1.5808009
N 4.0885824 -0.0054429 1.5289786
C 5.1829921 0.0253971 0.7872176
N 4.9294871 0.0412404 -0.5567274
N 1.0716177 -0.0765366 1.9391390
H 0.8794435 0.0050260 -2.4315709
H 6.1882591 0.0375542 1.1738824
H 5.6035368 0.0648755 -1.3036811
H 0.0586915 -0.0423765 2.0039181
H 1.6443796 -0.0347395 2.7619159
--
0 1
N -3.9211729 -0.0009646 -1.5163659
C -4.6136833 0.0169051 -0.3336520
C -3.9917387 0.0219348 0.8663338
C -2.5361367 0.0074651 0.8766724
N -1.9256484 -0.0110593 -0.3638948
C -2.5395897 -0.0149474 -1.5962357
C -4.7106131 0.0413373 2.1738637
O -1.8674730 0.0112093 1.9120833
O -1.9416783 -0.0291878 -2.6573783
H -4.4017172 -0.0036078 -2.4004924
H -0.8838255 -0.0216168 -0.3784269
H -5.6909220 0.0269347 -0.4227183
H -4.4439282 -0.8302573 2.7695655
H -4.4267056 0.9186178 2.7530256
H -5.7883971 0.0505530 2.0247280
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
N 1.4233678 -2.5755572 -0.0177928
C 2.4164068 -1.6737862 -0.0069340
N 3.6894208 -2.0970069 0.0011813
C 4.6820785 -1.1903949 0.0099205
N 4.6008957 0.1417597 0.0124822
C 3.3223927 0.5333463 0.0042869
C 2.1894368 -0.2816944 -0.0057960
N 1.0473244 0.4790858 -0.0114405
C 1.4850348 1.7325334 -0.0051890
N 2.8426160 1.8225456 0.0046153
H 5.6822446 -1.6040945 0.0161209
H 0.8406886 2.5978945 -0.0067705
C 3.6543123 3.0217765 0.0114721
H 1.6817907 -3.5456829 -0.0088365
H 0.4430037 -2.3144424 -0.0119382
H 4.2913482 3.0300392 0.8917344
H 4.2797691 3.0498799 -0.8767515
H 2.9957930 3.8849549 0.0253861
--
0 1
N -1.7137952 0.0896100 -0.0160334
C -2.4110219 1.2713542 -0.0102985
N -3.7875706 1.1256389 -0.0070257
C -4.3748768 -0.1132637 0.0070053
C -3.6761626 -1.2738156 0.0090266
C -2.2277642 -1.1903377 -0.0021447
O -1.8742013 2.3720388 -0.0087627
O -1.4812933 -2.1730005 -0.0012821
C -4.3178351 -2.6216898 0.0206209
C -4.5749913 2.3488054 0.0099294
H -0.6740657 0.1873910 -0.0185734
H -5.4567167 -0.1063625 0.0145634
H -4.0169219 -3.1954743 -0.8545578
H -5.4025389 -2.5351168 0.0301255
H -4.0008440 -3.1876298 0.8951907
H -4.2676272 2.9948502 -0.8071476
H -4.4269734 2.8838422 0.9449871
H -5.6215286 2.0832557 -0.1007218
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
N -0.7878100 0.0020606 -4.2304138
C -0.8092899 0.0015891 -2.8583291
C 0.5077793 -0.0002173 -2.4445540
N 1.3042354 -0.0007671 -3.5776764
C 0.5251204 0.0003646 -4.7183358
C 0.8109948 -0.0022184 -1.0723889
N -0.3541156 -0.0012118 -0.3048371
C -1.6266470 0.0012022 -0.7959383
N -1.9085135 0.0021412 -2.0882229
O 1.9375199 -0.0047453 -0.5373376
N -2.6234345 0.0038170 0.1093233
O 0.8790764 -0.0004039 -5.8828291
H -1.5908258 0.0034199 -4.8351827
H -0.2139163 -0.0018761 0.7197062
H -3.5584869 0.0004853 -0.2509512
H -2.4522376 -0.0015362 1.1120222
H 2.3082381 -0.0028321 -3.6013832
--
0 1
N 2.3837158 0.0024084 2.1880115
C 1.3635083 0.0013168 3.0441674
C 1.6066986 0.0037303 4.4583838
C 0.5209494 0.0030055 5.2667859
N -0.7245613 -0.0002446 4.7321333
C -0.9573175 -0.0028235 3.3551883
N 0.1301240 -0.0019248 2.5406741
O -2.1208999 -0.0056022 2.9504908
H -1.5467235 -0.0008598 5.3145350
H 0.5868645 0.0048893 6.3446020
H 2.6043649 0.0062410 4.8655851
H 3.3229417 0.0045233 2.5396202
H 2.2148236 0.0000552 1.1689293
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C -0.0399020 0.0000000 -0.0353727
N -0.0114814 0.0000000 1.3676751
C 1.1387066 0.0000000 2.0831816
C 2.3367544 0.0000000 1.4511865
C 2.3093819 0.0000000 0.0167889
N 1.1708246 0.0000000 -0.6653863
O -1.1150036 0.0000000 -0.6203815
N 3.4490015 0.0000000 -0.6790500
H -0.9108314 0.0000000 1.8214404
H 1.0410884 0.0000000 3.1587270
H 3.2607830 0.0000000 2.0056829
H 4.3278873 0.0000000 -0.1979709
H 3.4179786 0.0000000 -1.7058513
--
0 1
O 3.2403090 0.0000000 -3.4870523
C 2.1818608 0.0000000 -4.1224126
N 0.9469308 0.0000000 -3.4715093
C -0.2749703 0.0000000 -4.0613039
N -0.4833351 0.0000000 -5.3532242
C 0.6850532 0.0000000 -6.0363493
C 1.9918547 0.0000000 -5.5441185
N 2.9083577 0.0000000 -6.5651625
C 2.1671699 0.0000000 -7.6581930
N 0.8245995 0.0000000 -7.3970665
H -1.1112956 0.0000000 -3.3753120
H 0.9890303 0.0000000 -2.4307715
H 2.5507838 0.0000000 -8.6645924
H 0.0674277 0.0000000 -8.0602299
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
O -1.3445145 -0.0017812 0.2109785
C -0.5827723 -0.0011739 1.1802676
N 0.8024859 -0.0013909 0.9807253
C 1.7623754 -0.0004370 1.9497163
N 1.5138727 0.0005531 3.2372784
C 0.1802572 0.0006138 3.4773368
C -0.8864943 -0.0001193 2.5787139
N -2.0960454 0.0003342 3.2323438
C -1.7654415 0.0013112 4.5084219
N -0.4076744 0.0015312 4.7105883
N 3.0436067 -0.0006786 1.5010046
H 1.1059736 -0.0017584 -0.0014454
H -2.4611653 0.0018946 5.3301928
H 0.0813329 0.0022447 5.5900062
H 3.7807912 0.2008871 2.1781729
H 3.2486220 -0.0002735 0.5197891
--
0 1
O 1.7201107 -0.0009041 -1.6341114
C 0.9160823 -0.0002542 -2.5726633
N -0.4364135 -0.0009531 -2.4309422
C -1.3901935 -0.0001633 -3.4689726
C -0.8112768 0.0014209 -4.8053842
C 0.5255837 0.0020253 -4.9588359
N 1.3652416 0.0012542 -3.8724327
O -2.5802179 -0.0008229 -3.2195788
H -1.4757079 0.0020523 -5.6523281
H -0.8069002 -0.0016737 -1.4589893
H 1.0090483 0.0031967 -5.9240562
H 2.3651020 0.0016683 -3.9825177
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C -0.0546262 -0.0000666 -0.0449366
N -0.0199900 -0.0000391 1.3370041
C 1.1383570 0.0000076 2.0419758
C 2.3258577 0.0000418 1.3914589
C 2.2850892 0.0000244 -0.0362838
N 1.1291861 -0.0000259 -0.7116098
O -1.1572546 -0.0001260 -0.6121938
N 3.4163218 0.0000650 -0.7389180
H -0.9156158 -0.0000626 1.8021973
H 1.0513420 0.0000150 3.1177694
H 3.2570460 0.0000679 1.9330954
H 4.3021685 0.0000746 -0.2670886
H 3.3885124 0.0000089 -1.7541744
--
1 1
N 0.8209043 0.0000137 -3.4723115
C -0.4088139 0.0000345 -4.0385840
C -0.4948596 0.0000126 -5.4617196
C 0.6604919 -0.0000154 -6.1675372
N 1.8676936 -0.0000144 -5.5424117
C 2.0133448 -0.0000297 -4.1696950
O 3.1022731 -0.0000760 -3.6244111
N -1.4596996 0.0000728 -3.2533202
H 2.7287536 -0.0000519 -6.0699661
H 0.6811915 -0.0000377 -7.2470448
H -1.4500232 0.0000004 -5.9585035
H -2.3755461 0.0000545 -3.6707993
H -1.3585129 0.0000190 -2.2095314
H 0.9279995 0.0000010 -2.4136765
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
O 3.5986069 0.3187715 -0.0000425
C 3.1043656 -0.7907721 0.0000494
N 3.8766606 -1.9463148 0.0000974
C 3.3523333 -3.2081585 0.0000314
C 2.0204180 -3.4182033 0.0000005
C 1.1157326 -2.2823273 -0.0000024
N 1.7481024 -1.0416244 0.0001189
O -0.1074745 -2.3680037 -0.0001125
H 1.6059877 -4.4112397 -0.0000642
H 4.8710378 -1.7927175 -0.0000238
H 1.1448708 -0.2099855 0.0000657
H 4.0702664 -4.0151053 -0.0000042
--
0 1
O 0.0832848 1.3018469 -0.0000275
C -1.1439944 1.3419495 0.0000322
N -1.7902509 2.5769847 0.0001252
C -3.1492468 2.8315090 0.0000032
N -3.9059521 1.6745104 0.0001278
C -3.3673000 0.4141979 0.0000554
C -2.0348331 0.2020916 0.0000036
O -3.6310511 3.9463823 -0.0001798
H -1.6060027 -0.7883399 -0.0000828
H -4.9024679 1.8155815 -0.0000087
H -1.1951712 3.3944157 0.0000479
H -4.0815013 -0.3955360 0.0000308
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
O 3.0139530 -0.0000663 -2.3714607
C 3.0149686 -0.0000275 -1.1572038
N 4.1880062 -0.0001806 -0.4139005
C 4.2197761 0.0000190 0.9517028
C 3.0868691 0.0000352 1.6837875
C 1.8067676 -0.0000168 1.0072050
N 1.8766809 0.0001650 -0.3776725
O 0.7209062 0.0000058 1.5860958
H 3.1056982 -0.0000395 2.7597452
H 5.0361993 0.0000319 -0.9554725
H 0.9797808 -0.0001787 -0.8806223
H 5.2026538 0.0000504 1.3991588
--
0 1
O -0.6997337 0.0001361 -1.5583592
C -1.7610439 0.0000592 -0.9401093
N -2.9691203 0.0001496 -1.6022795
C -4.1792891 0.0000396 -0.9575726
C -4.2572226 -0.0001289 0.3871173
C -3.0393068 -0.0001349 1.1840453
N -1.8606585 -0.0000544 0.4209370
O -2.9932145 -0.0002449 2.4009987
H -5.2057276 0.0004029 0.8960881
H -2.9068168 0.0002610 -2.6064637
H -0.9648574 -0.0000753 0.9279392
H -5.0495438 0.0000916 -1.5966575
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C -0.0268479 0.0001243 -0.0484048
N 0.0107678 -0.0000464 1.3475182
C 1.1662769 -0.0000863 2.0541365
C 2.3502411 -0.0000220 1.4003156
C 2.3025646 0.0000460 -0.0345481
N 1.1568381 0.0001109 -0.7170898
O -1.1319705 0.0002807 -0.5958040
N 3.4410032 0.0000681 -0.7244676
H -0.8870732 -0.0000629 1.8046190
H 1.0808205 -0.0001565 3.1305463
H 3.2850793 -0.0000335 1.9362836
H 4.3196967 -0.0000384 -0.2408021
H 3.4310344 0.0000688 -1.7533114
--
0 1
S 3.6261481 0.0000331 -3.9489529
C 2.0961273 -0.0000288 -4.6037680
N 0.9509789 -0.0001104 -3.8229476
C -0.3400955 -0.0001574 -4.2939721
N -0.6487077 -0.0000023 -5.5786621
C 0.4418258 0.0000453 -6.3611558
C 1.7866043 -0.0000027 -5.9848867
N 2.6124570 0.0000014 -7.0854825
C 1.7805999 0.0000520 -8.1059933
N 0.4593064 0.0000986 -7.7272032
N -1.3218629 -0.0005782 -3.3778714
H 1.0832701 -0.0001308 -2.8047028
H 2.0715160 0.0000504 -9.1429108
H -0.3508790 0.0000799 -8.3240783
H -2.2569895 0.0000670 -3.7397613
H -1.1666925 0.0000488 -2.3691590
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
N -2.3081868 0.7091068 0.0000285
C -1.2051249 1.4749768 -0.0000074
N 0.0068404 0.8955116 -0.0000239
C 1.1080909 1.6831490 -0.0000001
N 1.1718597 3.0108449 0.0000248
C -0.0467899 3.5587068 0.0000144
C -1.2685607 2.8863732 -0.0000096
N -2.3312449 3.7631581 -0.0000201
C -1.7532251 4.9525322 0.0000002
N -0.3875112 4.8900390 0.0000301
H 2.0473735 1.1474715 -0.0000027
H -2.2800394 5.8934739 -0.0000005
H 0.2491479 5.6682121 -0.0000014
H -2.2351122 -0.2997556 -0.0000060
H -3.2108389 1.1496550 -0.0000063
--
0 1
S -1.7524155 -2.8468749 -0.0000015
C -0.1157899 -3.0959877 0.0000038
N 0.7757259 -2.0472741 0.0000119
C 2.1570215 -2.1260472 0.0000127
N 2.6399748 -3.4231595 0.0000214
C 1.8308010 -4.5235818 0.0000026
C 0.4852439 -4.4036601 -0.0000120
O 2.8928976 -1.1571548 -0.0000022
H -0.1424437 -5.2790356 -0.0000032
H 3.6448728 -3.5071844 -0.0000023
H 0.4048510 -1.0816361 -0.0000008
H 2.3325010 -5.4794294 -0.0000001
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
N -0.9044942 0.3053428 -1.9849463
C -1.5722006 0.1028596 -0.8342896
N -0.8984868 -0.0828082 0.3086249
C -1.5939985 -0.2540305 1.4676013
N -2.9198181 -0.2548010 1.6283568
C -3.5512123 -0.0734507 0.4647187
C -2.9785368 0.1103594 -0.7891549
N -3.9279869 0.2613244 -1.7761223
C -5.0691734 0.1716827 -1.1179520
N -4.9024796 -0.0280544 0.2301645
N -0.8371284 -0.4982415 2.5786136
H -6.0473812 0.2435326 -1.5626408
H -5.6242357 -0.1304918 0.9234162
H 0.0816120 0.0641263 -2.0404453
H -1.4560458 0.2977892 -2.8251132
H -1.3448761 -0.4008730 3.4406879
H 0.1151602 -0.1525559 2.5800463
--
0 1
O 1.9075808 -0.3384204 -1.9978152
C 2.5680509 -0.1510537 -0.9753123
N 1.9481026 0.0211081 0.2519027
C 2.5671978 0.2530499 1.4535237
N 3.9422564 0.3000243 1.3759119
C 4.6421414 0.1305464 0.2085191
C 4.0227571 -0.0934717 -0.9708747
O 1.9735147 0.4072231 2.5136021
C 4.7417280 -0.2795724 -2.2650127
H 4.4155554 0.4660468 2.2485693
H 0.8994704 -0.0257608 0.2762592
H 5.7177824 0.1899609 0.2946839
H 4.4976921 -1.2470914 -2.7006324
H 4.4340213 0.4777637 -2.9842064
H 5.8186182 -0.2165735 -2.1235955
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
N 0.3803518 5.2710590 0.0000000
C -0.9827443 5.4382698 0.0000000
N -1.6445695 4.2964704 0.0000000
C -0.6462489 3.3468246 0.0000000
C 0.6196111 3.9194590 0.0000000
N 1.7959710 3.2884069 0.0000000
C 1.6381971 1.9602261 0.0000000
N 0.4661224 1.2614481 0.0000000
C -0.6897222 1.9395705 0.0000000
N 2.7687654 1.2167374 0.0000000
N -1.8599123 1.2852151 0.0000000
H 1.0814580 5.9922927 0.0000000
H -1.4336046 6.4162469 0.0000000
H -1.9018421 0.2719784 0.0000000
H -2.7003404 1.8337683 0.0000000
H 3.6406811 1.7091587 0.0000000
H 2.7459099 0.2072280 0.0000000
--
0 1
C 1.6184078 -2.2819447 0.0000000
N 0.4001260 -1.6517356 0.0000000
C -0.8434818 -2.2665916 0.0000000
C -0.8382446 -3.7219904 0.0000000
C 0.3574635 -4.3499319 0.0000000
N 1.5408193 -3.6563635 0.0000000
C -2.1496426 -4.4344119 0.0000000
O -1.8782774 -1.6012490 0.0000000
O 2.6941241 -1.6972351 0.0000000
H 0.4220912 -0.6062649 0.0000000
H 2.4263852 -4.1347427 0.0000000
H 0.4460260 -5.4270144 0.0000000
H -2.7348414 -4.1561542 -0.8748178
H -2.0041973 -5.5125757 0.0000000
H -2.7348414 -4.1561542 0.8748178
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
N -5.1985541 0.4936739 0.0318901
C -5.3820983 -0.8616291 0.0378780
H -6.3663575 -1.2989623 0.0563254
N -4.2515351 -1.5472672 0.0187374
C -3.2863426 -0.5669089 -0.0011454
C -1.8812597 -0.6327273 -0.0265724
N -1.2256807 -1.8085660 -0.0449899
H -1.7524474 -2.6605390 0.0028082
H -0.2208316 -1.8257421 -0.0311968
N -1.1915263 0.5144581 -0.0390690
C -1.8701238 1.6793288 -0.0286861
H -1.2534253 2.5689235 -0.0396068
N -3.1871042 1.8787826 -0.0064054
C -3.8427150 0.7125075 0.0068719
H -5.9109500 1.2044355 0.0439248
--
0 1
C 4.4082682 1.3958429 0.0182886
C 4.9187035 0.0992764 0.0212789
H 5.9905108 -0.0483384 0.0314539
C 4.0880886 -1.0223564 0.0114675
C 4.6130388 -2.4267390 0.0143734
H 4.2620014 -2.9754574 0.8873371
H 4.2783956 -2.9729351 -0.8665709
H 5.7002762 -2.4237810 0.0245680
C 2.7198280 -0.7726204 -0.0014366
F 1.8841246 -1.8434019 -0.0116101
C 2.1541812 0.4899806 -0.0052011
H 1.0780578 0.6238951 -0.0168588
C 3.0326742 1.5626996 0.0050555
F 2.5236686 2.8066583 0.0017348
H 5.0549009 2.2603064 0.0258240
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
O 0.3144345 -1.1442948 0.0144949
C 1.3535742 -0.4837792 0.0219615
N 1.2957095 0.9167643 0.0619980
C 2.3569978 1.7719209 0.0564464
N 3.6092442 1.3935257 0.0165258
C 3.7175450 0.0431271 -0.0038819
C 2.7154197 -0.9260868 -0.0053966
N 3.2450349 -2.1934744 -0.0364308
C 4.5477687 -1.9938501 -0.0526937
N 4.8854175 -0.6639760 -0.0358567
N 2.0456853 3.1046346 0.1569813
H 0.3457459 1.3051947 0.0833829
H 5.2946011 -2.7690789 -0.0763507
H 5.8090734 -0.2651897 -0.0402909
H 2.8020253 3.7137889 -0.1056218
H 1.1380549 3.3808140 -0.1794301
--
0 1
O -1.3169188 1.9540889 -0.0350694
C -2.3291669 1.2492795 -0.0287951
N -2.3117178 -0.1155253 -0.0025392
C -3.4195015 -0.9596499 0.0079063
C -4.6908528 -0.2770302 -0.0148404
C -4.7376559 1.0709532 -0.0414746
N -3.5816109 1.8128549 -0.0480085
S -3.2558282 -2.5902318 0.0425862
H -5.5930477 -0.8648219 -0.0095256
H -1.3692770 -0.5503586 0.0090372
H -5.6601199 1.6316629 -0.0586834
H -3.6021712 2.8189896 -0.0637914
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
O 0.2958263 -1.2112383 0.3424002
C 1.3078216 -0.5286166 0.1794526
N 1.2037967 0.8617133 0.0406924
C 2.2285795 1.7353917 -0.1726765
N 3.4850711 1.3842589 -0.2697628
C 3.6408598 0.0475045 -0.1126641
C 2.6766347 -0.9371114 0.0982060
N 3.2448896 -2.1840776 0.1912254
C 4.5343218 -1.9574061 0.0391817
N 4.8260700 -0.6298861 -0.1485794
N 1.8780889 3.0609691 -0.2292539
H 0.2531805 1.2355886 0.1433595
H 5.3034685 -2.7105809 0.0557498
H 5.7326070 -0.2147977 -0.2854525
H 2.6033664 3.6462550 -0.6086135
H 0.9511283 3.2665112 -0.5646975
--
0 1
S -1.8220636 2.0964300 0.3299922
C -2.7962507 0.7575832 0.0980480
N -2.3768437 -0.5243435 0.0586503
C -3.1865402 -1.6725337 -0.1260894
C -4.6014876 -1.3770894 -0.2724329
C -5.0258914 -0.0988346 -0.2322480
N -4.1419110 0.9324971 -0.0553359
O -2.6885486 -2.7802641 -0.1499990
H -5.2868250 -2.1961027 -0.4114699
H -1.3618003 -0.7128334 0.1812758
H -6.0623003 0.1861824 -0.3346801
H -4.4551541 1.8895240 -0.0237988
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -1.2382495 0.0003068 3.2761967
N -0.8377699 -0.0002822 4.6262520
C 0.4580599 -0.0008039 5.0168008
C 1.4462017 -0.0007290 4.0905459
C 1.0380467 -0.0000946 2.7139311
N -0.2347225 0.0003919 2.3461594
O -2.4294469 0.0006718 3.0053097
N 1.9638405 0.0000201 1.7458948
H -1.5882710 -0.0003035 5.2983303
H 0.6465072 -0.0012211 6.0804324
H 2.4837567 -0.0011342 4.3810193
H 2.9358093 -0.0004389 1.9899669
H 1.6736506 0.0004167 0.7589718
--
0 1
N -1.1590741 0.0004019 -0.4138632
C -0.2319446 0.0003452 -1.3716397
N 1.0782989 0.0006222 -1.0483779
C 1.9971055 0.0005759 -2.0347184
N 1.8153528 0.0002525 -3.3521684
C 0.5065246 -0.0000627 -3.6438316
C -0.5584383 -0.0000492 -2.7449616
N -1.7730910 -0.0004726 -3.3901944
C -1.4412382 -0.0006880 -4.6700413
N -0.0894486 -0.0004698 -4.8806889
H 3.0276349 0.0008204 -1.7005582
H -2.1424713 -0.0010329 -5.4876572
H 0.3894075 -0.0006068 -5.7657319
H -0.8924030 0.0005975 0.5753416
H -2.1264189 0.0000818 -0.6840313
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
O 1.7709955 2.3306811 0.0000007
C 0.5807567 2.6278573 -0.0000017
N -0.3963320 1.6134139 0.0000053
C -1.7496702 1.7814670 -0.0000008
N -2.3431748 2.9556940 0.0000044
C -1.4426427 3.9663046 -0.0000011
C -0.0499495 3.9187042 0.0000001
N 0.4958359 5.1819761 -0.0000084
C -0.5511865 5.9829108 0.0000000
N -1.7428313 5.3021768 0.0000079
N -2.4981258 0.6511289 -0.0000042
H -0.0110714 0.6591960 -0.0000028
H -0.5111672 7.0591526 0.0000000
H -2.6703203 5.6919780 -0.0000047
H -3.4931951 0.7676564 0.0000001
H -2.1057181 -0.2785664 0.0000054
--
0 1
O -1.7956163 -2.4184989 0.0000035
C -0.6750612 -2.9133012 -0.0000022
N -0.5075308 -4.3172987 0.0000000
C 0.6872157 -4.9931784 0.0000035
N 1.8564548 -4.4063354 0.0000002
C 1.7518235 -3.0547138 -0.0000036
C 0.6020117 -2.2761182 0.0000000
N 0.9066335 -0.9422663 -0.0000018
C 2.2267133 -0.8962763 0.0000026
N 2.7793888 -2.1508756 0.0000002
N 0.6219231 -6.3495411 -0.0000038
H -1.3767427 -4.8336984 0.0000000
H 2.7978322 0.0182460 0.0000000
H 3.7606420 -2.3771663 -0.0000001
H 1.4878769 -6.8540474 0.0000006
H -0.2426849 -6.8513669 0.0000027
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
O -2.1042101 2.1109877 -0.0000208
C -0.9861671 2.6187107 -0.0000392
N 0.1630602 1.8075078 0.0000102
C 1.4605433 2.2338425 0.0001555
N 1.8191828 3.4984777 -0.0000542
C 0.7428289 4.3179348 -0.0001135
C -0.6139590 4.0028703 -0.0000745
N -1.3931127 5.1362751 -0.0000141
C -0.5202985 6.1236314 -0.0000063
N 0.7801628 5.6851901 -0.0000822
N 2.4058015 1.2657395 0.0011086
H -0.0159870 0.7952237 0.0001524
H -0.7678388 7.1715226 0.0000486
H 1.6159456 6.2449111 -0.0000097
H 3.3631616 1.5602679 -0.0004150
H 2.1837654 0.2818725 -0.0005076
--
0 1
S 2.4306485 -2.2874888 -0.0000855
C 0.9168812 -2.9468359 -0.0000744
N 0.7467080 -4.3320487 -0.0000158
C -0.4440553 -5.0077630 0.0002108
N -1.6149750 -4.4178761 -0.0000758
C -1.5042335 -3.0717080 -0.0001193
C -0.3421815 -2.2986807 -0.0000424
N -0.6475784 -0.9636606 0.0000146
C -1.9693802 -0.9175554 -0.0000159
N -2.5269735 -2.1675435 -0.0001085
N -0.3741558 -6.3629805 0.0018230
H 1.6159815 -4.8501428 0.0002176
H -2.5316778 0.0048746 0.0000297
H -3.5101913 -2.3858700 -0.0000954
H -1.2351647 -6.8756289 -0.0006400
H 0.4955304 -6.8577215 -0.0011689
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
S -1.8166246 2.6821898 -0.0001323
C -0.1580956 2.6810732 -0.0000609
N 0.5668809 1.4952672 0.0000612
C 1.9289258 1.3802774 0.0002313
N 2.7529724 2.4104948 0.0000355
C 2.0872014 3.5809186 -0.0000521
C 0.7090604 3.8070941 -0.0000816
N 0.4280062 5.1539248 -0.0000845
C 1.6128743 5.7291564 -0.0000496
N 2.6486647 4.8271187 -0.0000590
N 2.4376947 0.1291202 0.0011300
H 0.0139020 0.6266213 0.0001583
H 1.7867621 6.7921840 -0.0000082
H 3.6347397 5.0250186 0.0000158
H 3.4373976 0.0573910 -0.0001455
H 1.8889030 -0.7194971 -0.0004252
--
0 1
O 1.5845436 -2.6967539 -0.0001530
C 0.4227905 -3.0880579 -0.0000036
N 0.1350039 -4.4687763 0.0000593
C -1.1124849 -5.0407117 -0.0002399
N -2.2264983 -4.3563305 -0.0000043
C -2.0034350 -3.0192765 0.0001096
C -0.7909434 -2.3397387 0.0000690
N -0.9816420 -0.9826389 0.0000103
C -2.2944101 -0.8306632 0.0000066
N -2.9493710 -2.0333946 0.0000657
N -1.1503939 -6.3976637 -0.0016596
H 0.9553753 -5.0587791 -0.0001303
H -2.7848264 0.1304484 -0.0000198
H -3.9468229 -2.1737881 0.0000098
H -2.0477574 -6.8436736 0.0002445
H -0.3206311 -6.9565723 0.0005285
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
O -1.3058058 0.3353432 -1.9024452
C -1.9900049 0.1709800 -0.8920389
N -1.3797575 0.0147348 0.3602195
C -2.0188483 -0.2185512 1.5398296
N -3.3172819 -0.2995486 1.6835149
C -3.9535777 -0.1111832 0.5011380
C -3.4163316 0.1070768 -0.7658722
N -4.4021933 0.2259169 -1.7159019
C -5.5203687 0.0828308 -1.0307622
N -5.3061034 -0.1241201 0.3081537
N -1.1984729 -0.3198503 2.6442461
H -0.3465494 0.1232217 0.3842921
H -6.5125630 0.1194762 -1.4473490
H -5.9980490 -0.2607961 1.0259833
H -1.6811020 -0.7086455 3.4387722
H -0.3023253 -0.7473222 2.4686403
--
0 1
N 1.4487686 -0.3061821 -1.8063482
C 2.1291340 -0.0639194 -0.6809352
N 1.4721887 0.2311616 0.4586152
C 2.1772393 0.4822275 1.5830193
N 3.4937302 0.4667260 1.7609342
C 4.1213725 0.1541959 0.6180841
C 3.5365208 -0.1183154 -0.6181769
N 4.4783743 -0.3859600 -1.5825801
C 5.6237470 -0.2779583 -0.9307987
N 5.4697684 0.0435735 0.3900585
H 1.5797459 0.7376023 2.4499547
H 6.5972124 -0.4228222 -1.3684582
H 6.1971892 0.1827155 1.0717315
H 0.4522459 -0.0785075 -1.8709034
H 1.9849455 -0.4365244 -2.6464500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
N 0.5317472 -1.5315785 0.0000000
C 1.6654518 -2.2639451 0.0000000
N 1.8178303 -3.5826276 0.0000000
C 0.6256673 -4.1952397 0.0000000
C -0.6270275 -3.5869658 0.0000000
C -0.6548527 -2.1760608 0.0000000
N 0.3553662 -5.5402802 0.0000000
C -1.0067488 -5.6700992 0.0000000
N -1.6430990 -4.5118780 0.0000000
H 1.0374873 -6.2804981 0.0000000
H -1.4834180 -6.6357628 0.0000000
H 2.5904811 -1.6988711 0.0000000
N -1.8018223 -1.4963325 0.0000000
H -2.6555758 -2.0258909 0.0000000
H -1.8291436 -0.4726173 0.0000000
--
0 1
C 1.5820983 2.1166821 0.0000000
N 0.3982589 1.4363592 0.0000000
C -0.8811915 2.0133126 0.0000000
C -0.7922275 3.4409053 0.0000000
C 0.4748735 4.0188928 0.0000000
N 1.6897724 3.4229374 0.0000000
N 0.2422898 5.3650391 0.0000000
C -1.1196961 5.5363613 0.0000000
N -1.7806879 4.3962812 0.0000000
N 2.7046701 1.3533161 0.0000000
H 2.6551620 0.3544770 0.0000000
H 3.5911246 1.8188331 0.0000000
H 0.4300601 0.4044282 0.0000000
O -1.8846384 1.3003960 0.0000000
H -1.5681188 6.5152131 0.0000000
H 0.9500064 6.0803260 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
O -1.3082180 -0.2837400 -5.2857830
C -1.2591040 -0.1735270 -4.0737910
N -0.0039450 -0.3880190 -3.4022040
C 0.2384190 -0.3204430 -2.0528570
N -0.7115810 -0.0321780 -1.1774330
C -1.9176870 0.2005520 -1.7572050
C -2.2714950 0.1569770 -3.1054550
N -3.6087050 0.4637830 -3.2773380
C -4.0524060 0.6893350 -2.0695760
N -3.0715720 0.5411370 -1.1003800
N 1.4926750 -0.5945950 -1.6152550
H 0.7445330 -0.6355620 -4.0377220
H -5.0658850 0.9619290 -1.8095140
H -3.1606740 0.6975300 -0.1076790
H 1.7292200 -0.3123020 -0.6555620
H 2.2464200 -0.6005410 -2.2841000
--
0 1
N -0.6357410 -0.7643850 1.7470590
C 0.2971930 -0.4605580 2.6687480
N -0.0180070 -0.6125000 3.9730480
C 0.8974790 -0.3069680 4.9024040
N 2.1418480 0.1550240 4.7356170
C 2.4316560 0.2831700 3.4365580
C 1.5994220 -0.0008880 2.3483350
N 2.2633010 0.2340730 1.1506720
C 3.4544220 0.6547710 1.5098070
N 3.6165670 0.7091770 2.8719470
H 0.5799100 -0.4518430 5.9329470
H 4.2477100 0.9349840 0.8305350
H 4.4400710 0.9945670 3.3802140
H -1.5203500 -1.0951610 2.1001930
H -0.5364890 -0.5388150 0.7550260
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '25')] = qcdb.Molecule("""
0 1
C 1.2592909 1.6400416 0.0000000
N -0.0329944 1.3939366 0.0000000
C -0.7724457 2.5351119 0.0000000
C -0.3568209 3.8625834 0.0000000
C 1.0524521 4.1391914 0.0000000
N 1.7707185 2.9096522 0.0000000
N 2.1434672 0.6224336 0.0000000
N -1.4258984 4.7253576 0.0000000
C -2.4770874 3.9323264 0.0000000
N -2.1383285 2.6014184 0.0000000
H -2.7704070 1.8193281 0.0000000
H -3.5031441 4.2568795 0.0000000
O 1.6606912 5.1923670 0.0000000
H 2.7730000 3.0373368 0.0000000
H 1.8138435 -0.3438647 0.0000000
H 3.1276914 0.8060391 0.0000000
--
0 1
C 2.2859985 -3.1747071 0.0000000
N 1.3685098 -2.2195054 0.0000000
C 0.1720555 -2.9042803 0.0000000
N 1.7524294 -4.4267217 0.0000000
C 0.3848788 -4.2845108 0.0000000
C -1.1754152 -2.4860287 0.0000000
N -2.1251928 -3.4313144 0.0000000
C -1.7646506 -4.7253100 0.0000000
N -0.5383069 -5.2487516 0.0000000
H 3.3496602 -3.0075287 0.0000000
H 2.2521180 -5.3008143 0.0000000
H -2.5835604 -5.4328271 0.0000000
N -1.5512019 -1.1969440 0.0000000
H -0.8988350 -0.4160731 0.0000000
H -2.5417242 -1.0304237 0.0000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '26')] = qcdb.Molecule("""
0 1
O 1.0272885 -1.7927509 -0.4061508
C 1.7699883 -0.8358415 -0.1967480
N 1.2319553 0.4331963 0.0618047
C 1.9364691 1.5620604 0.3460907
N 3.2411330 1.6380980 0.3913752
C 3.8110938 0.4396217 0.1071531
C 3.2032408 -0.7831141 -0.1709853
N 4.1349400 -1.7700995 -0.3829089
C 5.2907910 -1.1513051 -0.2354273
N 5.1516924 0.1802511 0.0617335
N 1.1709724 2.6978601 0.5477231
H 0.2039336 0.5207236 -0.0280886
H 6.2587500 -1.6129704 -0.3315009
H 5.8835828 0.8525276 0.2204394
H 1.7146455 3.4340823 0.9714033
H 0.3029369 2.5238305 1.0323857
--
0 1
N -1.6634540 -2.1503266 0.4844345
C -2.7337243 -1.3645844 0.2851342
N -3.9617362 -1.8916536 0.4117484
C -5.0361049 -1.1108105 0.2142857
N -5.0856134 0.1814610 -0.1169411
C -3.8521385 0.6813689 -0.2227871
C -2.6434417 0.0104508 -0.0286100
N -1.5757669 0.8650951 -0.2045833
C -2.1325255 2.0285854 -0.5143055
N -3.4907538 1.9716246 -0.5383952
H -5.9917639 -1.6042053 0.3341775
H -1.5835904 2.9296030 -0.7337374
H -4.1262380 2.7236162 -0.7489234
H -1.8639094 -3.1302901 0.5932195
H -0.7358064 -1.8946489 0.1475504
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '27')] = qcdb.Molecule("""
0 1
O 5.3545637 -1.5839084 0.1643820
C 4.3016967 -0.9781139 0.1123080
N 3.0713646 -1.6809667 0.2659334
C 1.8042754 -1.1676035 0.2494339
N 1.5592029 0.1129485 0.0745442
C 2.6978871 0.8367062 -0.0926982
C 4.0240233 0.4175244 -0.0912729
N 4.8841613 1.4681703 -0.3008504
C 4.0879153 2.5107452 -0.4295656
N 2.7596551 2.1849030 -0.3099959
N 0.7798744 -2.0309505 0.4761070
H 3.1965102 -2.6688068 0.4391670
H 4.4098825 3.5222321 -0.6091205
H 1.9722362 2.8053438 -0.3963660
H -0.1527293 -1.7022269 0.2002243
H 0.9606535 -3.0046040 0.3028696
--
0 1
N -1.2242031 1.0428672 0.4916050
C -2.2040220 0.1989638 0.1394948
N -1.9060140 -1.0547654 -0.2435106
C -2.9084418 -1.8851915 -0.5992935
N -4.2169079 -1.6502337 -0.6282927
C -4.4819036 -0.3976671 -0.2338853
C -3.5616520 0.5734472 0.1583798
N -4.1759166 1.7562758 0.4961983
C -5.4596333 1.4978989 0.3100336
N -5.7008222 0.2244483 -0.1272061
H -2.5970464 -2.8772539 -0.9009149
H -6.2592754 2.1999750 0.4769792
H -6.5949340 -0.1885904 -0.3349675
H -0.2461099 0.7667993 0.3993852
H -1.4852074 1.9635263 0.7951137
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '28')] = qcdb.Molecule("""
0 1
N -1.2744921 -0.0017953 -1.3782659
C -2.1152770 -0.0015179 -0.3381875
N -1.6375936 -0.0033248 0.9198720
C -2.5074052 -0.0030670 1.9505655
N -3.8371522 -0.0012513 1.9239415
C -4.2828181 0.0005849 0.6601536
C -3.5155952 0.0006434 -0.5034237
N -4.2984407 0.0028211 -1.6341051
C -5.5305121 0.0040931 -1.1536448
N -5.5811183 0.0027919 0.2133605
H -2.0528877 -0.0045852 2.9334633
H -6.4253358 0.0059818 -1.7533163
H -6.4045390 0.0033962 0.7920186
H -0.2596925 -0.0029765 -1.2406098
H -1.6728767 -0.0000588 -2.3000471
--
0 1
N 1.2734087 -0.0017991 1.3765409
C 2.1149782 -0.0015194 0.3371102
N 1.6382658 -0.0033244 -0.9213222
C 2.5089058 -0.0030642 -1.9513246
N 3.8386381 -0.0012477 -1.9236383
C 4.2833025 0.0005866 -0.6594937
C 3.5151554 0.0006426 0.5034653
N 4.2970793 0.0028188 1.6347785
C 5.5295364 0.0040924 1.1553161
N 5.5812428 0.0027936 -0.2116508
H 2.0551790 -0.0045813 -2.9345889
H 6.4238744 0.0059806 1.7557114
H 6.4051319 0.0033994 -0.7896412
H 0.2587211 -0.0029792 1.2389462
H 1.6714569 -0.0000645 2.2984633
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '29')] = qcdb.Molecule("""
0 1
N -1.1366363 -0.0666108 -1.4922977
C -1.9363007 -0.0408578 -0.4194776
N -1.4071800 -0.0593819 0.8158785
C -2.2352343 -0.0402428 1.8802964
N -3.5645999 -0.0046868 1.9074668
C -4.0608616 0.0128417 0.6631804
C -3.3407627 -0.0019209 -0.5302183
N -4.1677258 0.0274614 -1.6288352
C -5.3791670 0.0592559 -1.0991816
N -5.3753601 0.0520242 0.2686301
H -1.7428602 -0.0568025 2.8448773
H -6.2968580 0.0879838 -1.6624329
H -6.1747025 0.0724119 0.8798069
H -0.1211878 -0.0517077 -1.3879919
H -1.5677584 -0.0159058 -2.3974751
--
0 1
N 1.8123343 -0.0245408 -1.2588738
C 2.7152804 -0.0039456 -0.2161678
C 2.5630991 -0.0007999 1.1886196
N 3.6772242 0.0214724 1.9383141
C 4.8749026 0.0394397 1.3318824
N 5.1551234 0.0392981 0.0273056
C 4.0284465 0.0171443 -0.6906539
N 3.9051515 0.0090860 -2.0602851
C 2.5735625 -0.0163877 -2.3426881
N 1.3736145 -0.0168867 1.8043279
H 5.7275827 0.0569289 1.9982511
H 2.2043398 -0.0288835 -3.3545151
H 4.6669159 0.0187011 -2.7180078
H 1.3935693 -0.0168974 2.8088363
H 0.4799202 -0.0417563 1.3125324
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '30')] = qcdb.Molecule("""
0 1
N 1.9051383 -0.1221668 1.3018987
C 1.2306207 -0.0724496 2.4599521
N 1.9188011 -0.0911776 3.6114745
C 1.2468662 -0.0458415 4.7741848
N -0.0695564 0.0217460 4.9796590
C -0.7242768 0.0390915 3.8148784
C -0.1771642 -0.0054840 2.5309668
N -1.1622374 0.0280993 1.5676491
C -2.2864196 0.0935989 2.2653565
N -2.0826685 0.1041226 3.6120321
H 1.8637884 -0.0652578 5.6631315
H -3.2746103 0.1375165 1.8384429
H -2.7831685 0.1478311 4.3334811
H 2.9067312 -0.1495396 1.3694272
H 1.4544353 -0.0668897 0.3920587
--
0 1
N -1.9061558 -0.0641839 -1.3006378
C -1.2309020 -0.0492565 -2.4591003
N -1.9184496 -0.0947841 -3.6103362
C -1.2461054 -0.0720254 -4.7734664
N 0.0702056 -0.0055843 -4.9798973
C 0.7244044 0.0360165 -3.8154103
C 0.1769597 0.0162410 -2.5310350
N 1.1617828 0.0675370 -1.5681922
C 2.2861580 0.1193292 -2.2667167
N 2.0827966 0.1037780 -3.6134111
H -1.8625819 -0.1122096 -5.6620263
H 3.2742511 0.1708248 -1.8404275
H 2.7834317 0.1345490 -4.3353905
H -2.9057988 -0.1329805 -1.3671674
H -1.4533881 -0.0384046 -0.3902022
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '31')] = qcdb.Molecule("""
0 1
C -5.2998476 2.1696769 -0.1527418
N -4.0012332 2.3280352 0.0113365
C -3.5133825 1.0430020 0.0349501
N -5.6731934 0.8519995 -0.2385098
C -4.5348158 0.1058659 -0.1189924
N -2.1562547 -0.8394339 0.1532486
C -3.2316395 -1.6611847 -0.0119881
N -4.4661255 -1.2464671 -0.1599515
N -2.9527518 -3.0009490 0.0413727
H -6.0192845 2.9682219 -0.2165198
H -2.0059608 -3.2718138 -0.1741253
H -3.6766313 -3.5832294 -0.3425488
H -6.6016641 0.4841764 -0.3616632
H -1.2330572 -1.2973531 0.2284474
C -2.1744618 0.5575368 0.1742023
O -1.1240037 1.1950526 0.2956190
--
0 1
N 1.2638556 -0.1422038 0.1193086
C 1.2024891 -1.5065889 0.0798953
N 2.5150609 -1.9393418 -0.0155197
C 3.3711631 -0.8652191 -0.0318660
C 2.5823266 0.2608198 0.0542710
O 0.2004187 -2.2363595 0.1229392
C 3.1762467 1.5568980 0.0478905
N 4.5883289 1.4099390 -0.0474268
C 5.2845121 0.2404436 -0.1157151
N 4.7218815 -0.9391943 -0.1032764
O 2.6645381 2.6620253 0.1017547
N 6.6513076 0.3472133 -0.2677366
H 7.0896806 1.0869563 0.2575269
H 7.1055907 -0.5431980 -0.1372108
H 2.7741742 -2.9100741 -0.0564635
H 0.4151138 0.4528548 0.1972152
H 5.0828171 2.2880907 -0.1283728
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '32')] = qcdb.Molecule("""
0 1
S -0.2983354 -0.0000513 0.0606545
C -0.2090863 -0.0000888 1.7027085
N 0.9916329 -0.0001803 2.3727915
C 1.1063707 0.0000024 3.7325291
C 0.0163475 0.0001662 4.5304894
C -1.2953719 0.0000721 3.9249070
N -1.2941374 -0.0000268 2.5353745
O -2.3533146 0.0000820 4.5510728
H 0.0943097 0.0003348 5.6041112
H 1.8067829 -0.0002554 1.7802008
H -2.2194333 0.0000304 2.0853365
H 2.1154718 0.0000158 4.1172107
--
0 1
S -4.3480040 0.0005221 1.2455679
C -5.4129697 0.0002108 2.5234518
N -6.7626348 0.0001784 2.2970286
C -7.6987363 0.0000487 3.2957366
C -7.3354268 -0.0001000 4.5945383
C -5.9267360 -0.0001870 4.9466675
N -5.0628029 0.0000198 3.8318987
O -5.4752372 -0.0004103 6.0770807
H -8.0659143 -0.0001724 5.3856633
H -7.0337303 0.0003183 1.3267291
H -4.0549036 0.0000246 4.0487228
H -8.7287725 0.0000774 2.9713072
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '33')] = qcdb.Molecule("""
0 1
C 12.1619966 21.5469940 -0.5249999
N 12.0019966 20.1249944 -0.3349999
C 12.9959964 19.1989946 -0.1290000
N 12.5899965 17.9429950 -0.1260000
C 11.2289969 18.0629949 -0.3469999
C 10.2259971 17.0909952 -0.4599999
N 10.4079971 15.7719956 -0.3739999
N 8.9619975 17.5199951 -0.6819998
C 8.7349976 18.8509947 -0.7899998
N 9.6049973 19.8469944 -0.7019998
C 10.8559970 19.3909946 -0.4999999
H 12.8450824 21.9515608 0.2257099
H 12.5490085 21.7744749 -1.5236356
H 11.1843859 22.0177918 -0.4120399
H 14.0220821 19.5129525 0.0161520
H 11.3436468 15.4109067 -0.2800629
H 9.6382753 15.1406078 -0.5991948
H 7.6909448 19.1156876 -0.9420537
--
0 1
C 3.0629991 16.2869954 -0.5529998
N 4.3679988 15.6949956 -0.7379998
C 5.4889985 16.5069954 -0.6549998
O 5.3979985 17.7169950 -0.4679999
N 6.6749981 15.8589956 -0.7949998
C 6.8699981 14.5069959 -0.9999997
O 8.0199978 14.0679961 -1.0789997
C 5.6559984 13.7139962 -1.1019997
C 5.7709984 12.2569966 -1.4029996
C 4.4739987 14.3319960 -0.9639997
H 7.5313379 16.4637704 -0.7443448
H 6.3741672 11.7424167 -0.6472968
H 4.7881707 11.7797217 -1.4448876
H 6.2751442 12.0930036 -2.3618343
H 3.5293140 13.8026561 -1.0289747
H 2.3790703 15.9479585 -1.3364316
H 2.6423583 16.0249025 0.4245489
H 3.1730521 17.3682771 -0.6086068
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '34')] = qcdb.Molecule("""
0 1
N 10.3469971 14.4959959 8.8169975
C 11.5789968 13.8469961 8.7069976
O 11.6019967 12.6419965 8.4119976
N 12.6939964 14.5549959 8.8809975
C 12.6739964 15.9259955 9.1859974
N 13.8309961 16.5099954 9.3349974
C 11.4219968 16.5639954 9.2669974
C 10.3209971 15.8539956 9.0929975
H 9.3699974 16.4009954 9.1789974
H 11.3019968 17.6379951 9.4699973
H 14.6739959 15.9769955 9.2609974
H 13.8749961 17.4909951 9.5239973
C 9.1059774 13.7460371 8.6280336
H 9.4001314 12.7260934 8.3864956
H 8.5051816 13.7537151 9.5428113
H 8.5206636 14.1698120 7.8064238
--
0 1
C 18.8919947 9.6579973 9.7709973
N 18.5279948 11.0699969 9.5879973
C 19.3769946 12.1419966 9.6129973
N 18.7759947 13.3089963 9.4319974
C 17.4529951 12.9639964 9.3169974
C 16.2779954 13.7529961 9.1209974
O 16.2219955 14.9839958 9.0219975
N 15.1359958 13.0409963 9.0449975
C 15.0849958 11.6719967 9.1349974
N 13.8449961 11.1639969 9.0359975
N 16.1359955 10.8809970 9.3169974
C 17.2759952 11.5909968 9.3939974
H 14.2561290 13.5779002 8.9264415
H 13.0353973 11.7259537 8.7509445
H 13.7773141 10.1594092 9.0213535
H 17.9866610 9.0649795 9.6385253
H 19.2909706 9.4904943 10.7753660
H 19.6360815 9.3587324 9.0282525
H 20.4431063 12.0114766 9.7460263
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '35')] = qcdb.Molecule("""
0 1
N 10.9240000 16.7550000 5.5620000
C 11.6470000 17.8510000 5.8140000
N 12.9490000 17.6590000 5.9790000
C 13.0500000 16.2780000 5.7950000
C 14.1950000 15.4230000 5.8560000
N 15.4060000 15.8590000 6.0610000
N 13.9020000 14.1180000 5.6250000
C 12.6770000 13.6430000 5.3990000
N 11.5490000 14.4040000 5.3300000
C 11.8450000 15.6910000 5.5460000
H 11.1804230 18.8265530 5.8822870
H 12.5884030 12.5696370 5.2620740
H 16.1977530 15.2199420 5.9750360
H 15.5570940 16.8510580 6.1500010
C 9.4931860 16.6413650 5.3399050
H 9.0446590 17.6337380 5.4112840
H 9.2947180 16.2234190 4.3499330
H 9.0442270 15.9854440 6.0897950
--
0 1
N 16.2460000 9.7810000 5.9650000
C 17.5950000 10.0510000 5.9930000
C 18.0920000 11.2690000 5.9020000
C 17.1390000 12.3410000 5.7640000
O 17.4920000 13.5330000 5.6630000
N 15.8280000 12.0550000 5.7130000
C 15.3100000 10.7970000 5.7960000
O 14.1120000 10.5770000 5.7580000
H 18.2280000 9.1744860 6.1031120
C 19.5529600 11.6051630 5.9357380
H 20.1631860 10.7042230 6.0438290
H 19.7760320 12.2828240 6.7658180
H 19.8526100 12.1260780 5.0209680
H 15.1383860 12.8499570 5.6472680
C 15.7717470 8.4029560 6.0779300
H 14.6864640 8.4223240 6.0045990
H 16.1825380 7.7884380 5.2708940
H 16.0652090 7.9755790 7.0417370
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '36')] = qcdb.Molecule("""
0 1
H 0.0112670 4.2441280 0.3057270
N -0.1600000 4.2010000 1.2990000
C 0.1490000 5.1520000 2.2350000
H 0.8336150 5.9557770 2.0023890
N -0.3040000 4.9000000 3.4380000
C -1.1470000 3.7970000 3.2290000
C -2.0790000 3.1160000 4.0900000
O -2.3440000 3.3110000 5.2740000
N -2.7730000 2.0930000 3.4630000
H -3.4444620 1.6202680 4.0533010
C -2.5700000 1.7190000 2.1650000
N -3.2200000 0.6740000 1.7040000
H -3.7884800 0.1079360 2.3113460
H -3.0424470 0.3264300 0.7529310
N -1.7100000 2.3160000 1.3470000
C -1.0480000 3.3630000 1.9240000
--
0 1
H -3.4958570 -1.4150050 -3.9137580
N -3.0510000 -1.0010000 -3.1090000
C -3.5590000 -0.8800000 -1.8360000
H -4.5790060 -1.1582720 -1.6128580
N -2.7220000 -0.3740000 -0.9680000
C -1.5590000 -0.1810000 -1.7250000
C -0.2720000 0.3480000 -1.4650000
N 0.1070000 0.8840000 -0.3230000
H 1.0433330 1.2579620 -0.3065570
H -0.5751070 1.2407790 0.3499520
N 0.6670000 0.3750000 -2.4130000
C 0.3480000 -0.0810000 -3.6160000
H 1.1321870 -0.0417550 -4.3673920
N -0.8160000 -0.5790000 -4.0190000
C -1.7380000 -0.6050000 -3.0150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '37')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '38')] = qcdb.Molecule("""
0 1
H 4.0780890 0.2050200 6.5267380
N 3.3380000 -0.4520000 6.3380000
C 2.1440000 -0.6140000 7.0100000
H 1.9445960 -0.0744500 7.9251340
N 1.3390000 -1.4880000 6.4770000
C 2.0190000 -1.9110000 5.3320000
C 1.6500000 -2.8430000 4.3020000
O 0.6370000 -3.5330000 4.1980000
N 2.5960000 -2.9520000 3.3010000
H 2.3705000 -3.6388980 2.5623150
C 3.7610000 -2.2490000 3.2730000
N 4.5620000 -2.4690000 2.2580000
H 4.3528370 -3.1696290 1.5459440
H 5.4428290 -1.9835850 2.2550440
N 4.1450000 -1.3880000 4.2160000
C 3.2280000 -1.2560000 5.2240000
--
0 1
H 3.2823840 -6.1134940 -1.3105350
N 2.5530000 -6.0070000 -0.6210000
C 1.3990000 -6.7620000 -0.6490000
H 1.3017290 -7.4646550 -1.4662410
C 0.4550000 -6.5890000 0.3070000
H -0.4593850 -7.1648600 0.2947650
C 0.7210000 -5.6290000 1.3280000
N -0.1590000 -5.3940000 2.2700000
H -1.0266130 -5.9017830 2.3125200
H 0.0709100 -4.7127400 3.0149280
N 1.8460000 -4.9310000 1.3860000
C 2.7800000 -5.0940000 0.4140000
O 3.8210000 -4.4400000 0.4780000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '39')] = qcdb.Molecule("""
0 1
O 0.9601320 1.3436400 0.0000000
C 1.5166980 0.2684520 0.0000000
N 0.7573320 -0.9011610 0.0000000
C 1.2481620 -2.1702510 0.0000000
N 2.5209460 -2.4496950 0.0000000
C 3.2915230 -1.3476830 0.0000000
C 2.9121790 -0.0279190 0.0000000
N 4.0200060 0.7969640 0.0000000
C 5.0170310 0.0003310 0.0000000
N 4.6446780 -1.3255770 0.0000000
N 0.3459700 -3.1553460 0.0000000
H -0.2412520 -0.7659240 0.0000000
H 6.0483360 0.2895830 0.0000000
H 5.2362800 -2.1226110 0.0000000
H 0.6928700 -4.0838600 0.0000000
H -0.6408270 -2.9885130 0.0000000
--
0 1
C -1.5982280 -2.9490360 3.3600000
N -2.8308990 -3.5868360 3.3600000
C -4.0005400 -2.9065270 3.3600000
C -4.0107280 -1.5698660 3.3600000
C -2.7192980 -0.9187180 3.3600000
N -1.5949260 -1.5998660 3.3600000
O -0.5980710 -3.6295230 3.3600000
N -2.6531990 0.4024280 3.3600000
H -2.8066410 -4.5810390 3.3600000
H -4.8972920 -3.4971900 3.3600000
H -4.9235800 -1.0089750 3.3600000
H -3.4794940 0.9500750 3.3600000
H -1.7581040 0.8646590 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '40')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
O -0.0130090 1.6513790 3.3600000
C 1.0692420 1.1086750 3.3600000
N 1.1423840 -0.2839060 3.3600000
C 2.2854260 -1.0221180 3.3600000
N 3.4793830 -0.5000700 3.3600000
C 3.4550460 0.8444100 3.3600000
C 2.3724120 1.6891490 3.3600000
N 2.7838090 3.0076580 3.3600000
C 4.0586690 2.9492050 3.3600000
N 4.5367780 1.6576590 3.3600000
N 2.1345620 -2.3493720 3.3600000
H 0.2550220 -0.7614500 3.3600000
H 4.7229940 3.7894010 3.3600000
H 5.4838790 1.3605800 3.3600000
H 2.9609760 -2.8966530 3.3600000
H 1.2381640 -2.7944260 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '41')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.5825730 -0.2790500
C -1.8533500 -0.3518640 -0.0620430
N -0.9943890 0.6521290 0.1149880
C -1.4604570 1.8814980 0.3317590
N -2.7070820 2.2763020 0.4013740
C -3.5527210 1.2640760 0.2228910
C -3.2236500 -0.0504790 -0.0089010
N -4.3580740 -0.8272780 -0.1458710
C -5.3247240 -0.0009840 -0.0001730
N -4.9130980 1.2870000 0.2269330
H -0.7040060 2.6348130 0.4645890
H -6.3651290 -0.2529400 -0.0446000
H -5.4840420 2.0871050 0.3680130
H -0.4093220 -1.7576030 -0.3099130
H -2.0356960 -2.3259680 -0.4101310
--
0 1
O 2.4555320 -0.5209070 3.3788050
C 2.5333330 0.6704300 3.2169230
N 1.4067200 1.4246400 2.9925690
C 1.3497150 2.7756270 2.7939400
N 2.5708460 3.3948650 2.8321660
C 3.7496420 2.7230470 3.0501750
C 3.8036770 1.4072660 3.2434740
O 0.3307920 3.3742620 2.6029400
C 5.0685490 0.6342580 3.4848390
H 2.5588020 4.3783590 2.6906210
H 0.5200190 0.9342720 2.9706210
H 4.6288470 3.3398640 3.0533080
H 5.0316430 0.1233820 4.4405330
H 5.2089370 -0.1230850 2.7216660
H 5.9296670 1.2931580 3.4791940
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '42')] = qcdb.Molecule("""
0 1
O 1.6803850 -1.8647480 0.3288050
C 2.4435780 -0.9466670 0.1669230
N 1.9754430 0.3257080 -0.0574310
C 2.7234150 1.4521870 -0.2560600
N 4.0753100 1.2353980 -0.2178340
C 4.6340910 -0.0009940 0.0001750
C 3.9044100 -1.0972430 0.1934740
O 2.2509580 2.5354000 -0.4470600
C 4.4733490 -2.4660930 0.4348390
H 4.6436490 2.0381400 -0.3593790
H 0.9698550 0.4501820 -0.0793790
H 5.7079390 -0.0187620 0.0033080
H 4.1432070 -2.8577080 1.3905330
H 4.1417710 -3.1613140 -0.3283340
H 5.5573000 -2.4391850 0.4291940
--
0 1
N -0.1962490 -2.0987510 2.7709500
C -1.2925710 -1.3740360 2.9879570
N -1.1877890 -0.0569040 3.1649880
C -2.2874510 0.6637280 3.3817590
N -3.5280520 0.2503840 3.4513740
C -3.6172170 -1.0655780 3.2728910
C -2.5783160 -1.9356530 3.0410990
N -3.0394940 -3.2308940 2.9041290
C -4.3072140 -3.1305910 3.0498260
N -4.7312590 -1.8466420 3.2769330
H -2.1182570 1.7178040 3.5145890
H -5.0008230 -3.9459620 3.0054000
H -5.6634530 -1.5349360 3.4180130
H 0.7019450 -1.6625240 2.7400870
H -0.2797430 -3.0783000 2.6398690
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '43')] = qcdb.Molecule("""
0 1
C 2.4313070 1.6249990 -1.4530130
N 3.8007370 1.6249990 -1.6786800
C 4.7029040 1.6249990 -0.6702290
C 4.2995430 1.6249990 0.6041590
C 2.8701050 1.6249990 0.8243640
N 2.0112500 1.6249990 -0.1708960
O 1.6903830 1.6249990 -2.4092590
N 2.3989850 1.6249990 2.0604220
H 4.0848920 1.6249990 -2.6317200
H 5.7382910 1.6249990 -0.9548720
H 4.9943920 1.6249990 1.4196840
H 3.0156050 1.6249990 2.8366040
H 1.4048620 1.6249990 2.2234300
--
0 1
C -2.4313070 -1.6249990 -1.4530130
N -3.8007370 -1.6249990 -1.6786800
C -4.7029040 -1.6249990 -0.6702290
C -4.2995430 -1.6249990 0.6041590
C -2.8701050 -1.6249990 0.8243640
N -2.0112500 -1.6249990 -0.1708960
O -1.6903830 -1.6249990 -2.4092590
N -2.3989850 -1.6249990 2.0604220
H -4.0848920 -1.6249990 -2.6317200
H -5.7382910 -1.6249990 -0.9548720
H -4.9943920 -1.6249990 1.4196840
H -3.0156050 -1.6249990 2.8366040
H -1.4048620 -1.6249990 2.2234300
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '44')] = qcdb.Molecule("""
0 1
O -0.4979320 1.6249990 1.9422390
C -1.3595090 1.6249990 1.0916630
N -0.9987390 1.6249990 -0.2553610
C -1.8577170 1.6249990 -1.3106620
N -3.1545590 1.6249990 -1.1831180
C -3.5468800 1.6249990 0.1030790
C -2.7782730 1.6249990 1.2410250
N -3.5769760 1.6249990 2.3678730
C -4.7713760 1.6249990 1.9183280
N -4.8269760 1.6249990 0.5422510
N -1.3040920 1.6249990 -2.5263360
H -0.0072390 1.6249990 -0.4353230
H -5.6628210 1.6249990 2.5121130
H -5.6359190 1.6249990 -0.0329580
H -1.9209400 1.6249990 -3.3022070
H -0.3140390 1.6249990 -2.6726050
--
0 1
O 0.4979320 -1.6249990 1.9422390
C 1.3595090 -1.6249990 1.0916630
N 0.9987390 -1.6249990 -0.2553610
C 1.8577170 -1.6249990 -1.3106620
N 3.1545590 -1.6249990 -1.1831180
C 3.5468800 -1.6249990 0.1030790
C 2.7782730 -1.6249990 1.2410250
N 3.5769760 -1.6249990 2.3678730
C 4.7713760 -1.6249990 1.9183280
N 4.8269760 -1.6249990 0.5422510
N 1.3040920 -1.6249990 -2.5263360
H 0.0072390 -1.6249990 -0.4353230
H 5.6628210 -1.6249990 2.5121130
H 5.6359190 -1.6249990 -0.0329580
H 1.9209400 -1.6249990 -3.3022070
H 0.3140390 -1.6249990 -2.6726050
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '45')] = qcdb.Molecule("""
0 1
O 0.9601320 1.3436400 0.0000000
C 1.5166980 0.2684520 0.0000000
N 0.7573320 -0.9011610 0.0000000
C 1.2481620 -2.1702510 0.0000000
N 2.5209460 -2.4496950 0.0000000
C 3.2915230 -1.3476830 0.0000000
C 2.9121790 -0.0279190 0.0000000
N 4.0200060 0.7969640 0.0000000
C 5.0170310 0.0003310 0.0000000
N 4.6446780 -1.3255770 0.0000000
N 0.3459700 -3.1553460 0.0000000
H -0.2412520 -0.7659240 0.0000000
H 6.0483360 0.2895830 0.0000000
H 5.2362800 -2.1226110 0.0000000
H 0.6928700 -4.0838600 0.0000000
H -0.6408270 -2.9885130 0.0000000
--
0 1
O -1.5665350 0.5226760 3.1900000
C -1.3848270 -0.6743110 3.1900000
N -0.0830050 -1.1742030 3.1900000
C 0.2658570 -2.4894210 3.1900000
N -0.5995930 -3.4636200 3.1900000
C -1.8707500 -3.0250070 3.1900000
C -2.3395920 -1.7343230 3.1900000
N -3.7206970 -1.7181430 3.1900000
C -4.0590580 -2.9486690 3.1900000
N -2.9784690 -3.8024880 3.1900000
N 1.5747700 -2.7560840 3.1900000
H 0.6453760 -0.4778410 3.1900000
H -5.0634190 -3.3208450 3.1900000
H -2.9886000 -4.7950370 3.1900000
H 1.8398890 -3.7111710 3.1900000
H 2.2750440 -2.0410890 3.1900000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '46')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
C 3.2985790 0.6087040 3.1900000
N 4.2860790 1.5839520 3.1900000
C 4.0005050 2.9065740 3.1900000
C 2.7324140 3.3293140 3.1900000
C 1.7140620 2.3023070 3.1900000
N 2.0144220 1.0224800 3.1900000
O 3.6366950 -0.5527840 3.1900000
N 0.4371510 2.6477000 3.1900000
H 5.2241270 1.2536560 3.1900000
H 4.8393710 3.5769110 3.1900000
H 2.4810610 4.3708130 3.1900000
H 0.1716470 3.6027840 3.1900000
H -0.2790560 1.9392500 3.1900000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '47')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6008720 0.1400580
C 1.5033500 -0.3559320 0.0311400
N 0.6443890 0.6596690 -0.0577140
C 1.1104570 1.9032530 -0.1665130
N 2.3570820 2.3026230 -0.2014530
C 3.2027210 1.2786930 -0.1118710
C 2.8736500 -0.0510630 0.0044670
N 4.0080740 -0.8368430 0.0732140
C 4.9747240 -0.0009950 0.0000870
N 4.5630980 1.3018810 -0.1139000
H 0.3540060 2.6652780 -0.2331820
H 6.0151290 -0.2558650 0.0223850
H 5.1340420 2.1112380 -0.1847090
H 0.0593220 -1.7779260 0.1555480
H 1.6856960 -2.3528630 0.2058490
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '48')] = qcdb.Molecule("""
0 1
O -2.0303850 -1.8863100 -0.1650310
C -2.7935780 -0.9576130 -0.0837800
N -2.3254430 0.3294740 0.0288250
C -3.0734150 1.4689780 0.1285190
N -4.4253100 1.2496820 0.1093330
C -4.9840910 -0.0010050 -0.0000880
C -4.2544100 -1.1099300 -0.0971060
O -2.6009580 2.5647160 0.2243840
C -4.8233490 -2.4946080 -0.2182500
H -4.9936490 2.0617070 0.1803760
H -1.3198550 0.4553880 0.0398410
H -6.0579390 -0.0189790 -0.0016600
H -4.4932070 -3.1202310 0.6035230
H -4.4917710 -2.9686160 -1.1353540
H -5.9073000 -2.4671550 -0.2167380
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '49')] = qcdb.Molecule("""
0 1
O 1.5241600 -0.5494170 3.3837280
C 1.3439910 0.6454500 3.3087260
N 0.0438450 1.1430380 3.2271380
C -0.3032010 2.4557550 3.1386110
N 0.5626500 3.4294030 3.1191180
C 1.8322280 2.9929620 3.1959900
C 2.2991810 1.7048790 3.2880520
N 3.6791050 1.6903250 3.3455930
C 4.0186060 2.9192810 3.2900230
N 2.9399160 3.7704860 3.1975320
N -1.6107030 2.7204770 3.0698940
H -0.6847300 0.4469420 3.2365720
H 5.0225530 3.2920270 3.3102000
H 2.9511880 4.7614640 3.1419340
H -1.8744930 3.6737330 3.0051240
H -2.3112160 2.0058100 3.0815320
--
0 1
O -2.0303850 -1.8889030 -0.1320850
C -2.7935780 -0.9589290 -0.0670550
N -2.3254430 0.3299270 0.0230710
C -3.0734150 1.4709970 0.1028620
N -4.4253100 1.2514000 0.0875060
C -4.9840910 -0.0010070 -0.0000700
C -4.2544100 -1.1114560 -0.0777210
O -2.6009580 2.5682420 0.1795890
C -4.8233490 -2.4980370 -0.1746800
H -4.9936490 2.0645410 0.1443670
H -1.3198550 0.4560130 0.0318880
H -6.0579390 -0.0190050 -0.0013290
H -4.4932070 -3.1092230 0.6578860
H -4.4917710 -2.9879780 -1.0833720
H -5.9073000 -2.4705620 -0.1736470
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '50')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6030730 0.1120980
C 1.5033500 -0.3564220 0.0249230
N 0.6443890 0.6605760 -0.0461920
C 1.1104570 1.9058690 -0.1332710
N 2.3570820 2.3057880 -0.1612360
C 3.2027210 1.2804500 -0.0895380
C 2.8736500 -0.0511330 0.0035760
N 4.0080740 -0.8379940 0.0585980
C 4.9747240 -0.0009970 0.0000700
N 4.5630980 1.3036710 -0.0911620
H 0.3540060 2.6689420 -0.1866310
H 6.0151290 -0.2562160 0.0179160
H 5.1340420 2.1141400 -0.1478350
H 0.0593220 -1.7803700 0.1244960
H 1.6856960 -2.3560970 0.1647540
--
0 1
C -3.3369590 -0.6409430 3.3908960
N -4.3247580 -1.6157810 3.3763480
C -4.0409560 -2.9359630 3.2899980
C -2.7744220 -3.3565600 3.2141470
C -1.7557370 -2.3300110 3.2303510
N -2.0543620 -1.0525720 3.3148920
O -3.6734450 0.5183010 3.4703070
N -0.4803010 -2.6733740 3.1585030
H -5.2616330 -1.2870980 3.4334500
H -4.8798930 -3.6062030 3.2865630
H -2.5244870 -4.3961080 3.1450650
H -0.2161270 -3.6266280 3.0937180
H 0.2361240 -1.9652240 3.1691180
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '51')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6069870 0.0000000
C -1.5033500 -0.3572920 0.0000000
N -0.6443890 0.6621890 0.0000000
C -1.1104570 1.9105230 0.0000000
N -2.3570820 2.3114180 0.0000000
C -3.2027210 1.2835770 0.0000000
C -2.8736500 -0.0512580 0.0000000
N -4.0080740 -0.8400400 0.0000000
C -4.9747240 -0.0009990 0.0000000
N -4.5630980 1.3068540 0.0000000
H -0.3540060 2.6754590 0.0000000
H -6.0151290 -0.2568420 0.0000000
H -5.1340420 2.1193020 0.0000000
H -0.0593220 -1.7847170 0.0000000
H -1.6856960 -2.3618500 0.0000000
--
0 1
O 1.5260840 -0.5520650 3.1800000
C 1.3443760 0.6449210 3.1800000
N 0.0425540 1.1448140 3.1800000
C -0.3063080 2.4600320 3.1800000
N 0.5591430 3.4342300 3.1800000
C 1.8302990 2.9956180 3.1800000
C 2.2991410 1.7049340 3.1800000
N 3.6802460 1.6887540 3.1800000
C 4.0186070 2.9192800 3.1800000
N 2.9380180 3.7730980 3.1800000
N -1.6152210 2.7266950 3.1800000
H -0.6858270 0.4484520 3.1800000
H 5.0229680 3.2914560 3.1800000
H 2.9481490 4.7656470 3.1800000
H -1.8803400 3.6817820 3.1800000
H -2.3154950 2.0117000 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '52')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8935150 0.0000000
C 2.7935780 -0.9612710 0.0000000
N 2.3254430 0.3307330 0.0000000
C 3.0734150 1.4745890 0.0000000
N 4.4253100 1.2544560 0.0000000
C 4.9840910 -0.0010090 0.0000000
C 4.2544100 -1.1141700 0.0000000
O 2.6009580 2.5745130 0.0000000
C 4.8233490 -2.5041370 0.0000000
H 4.9936490 2.0695820 0.0000000
H 1.3198550 0.4571270 0.0000000
H 6.0579390 -0.0190510 0.0000000
H 4.4932070 -3.0557570 0.8731720
H 4.4917710 -3.0562720 -0.8723020
H 5.9073000 -2.4766570 -0.0008860
--
0 1
C -3.3390300 -0.6380930 3.1800000
N -4.3265300 -1.6133420 3.1800000
C -4.0409560 -2.9359630 3.1800000
C -2.7728650 -3.3587040 3.1800000
C -1.7545120 -2.3316960 3.1800000
N -2.0548730 -1.0518690 3.1800000
O -3.6771460 0.5233950 3.1800000
N -0.4776020 -2.6770890 3.1800000
H -5.2645780 -1.2830450 3.1800000
H -4.8798220 -3.6063000 3.1800000
H -2.5215120 -4.4002020 3.1800000
H -0.2120980 -3.6321740 3.1800000
H 0.2386050 -1.9686390 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '53')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8863100 0.1650310
C 2.7935780 -0.9576130 0.0837800
N 2.3254430 0.3294740 -0.0288250
C 3.0734150 1.4689780 -0.1285190
N 4.4253100 1.2496820 -0.1093330
C 4.9840910 -0.0010050 0.0000880
C 4.2544100 -1.1099300 0.0971060
O 2.6009580 2.5647160 -0.2243840
C 4.8233490 -2.4946080 0.2182500
H 4.9936490 2.0617070 -0.1803760
H 1.3198550 0.4553880 -0.0398410
H 6.0579390 -0.0189790 0.0016600
H 4.4932070 -2.9680270 1.1361760
H 4.4917710 -3.1206680 -0.6026110
H 5.9073000 -2.4673100 0.2149720
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '54')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6008720 -0.1400580
C -1.5033500 -0.3559320 -0.0311400
N -0.6443890 0.6596690 0.0577140
C -1.1104570 1.9032530 0.1665130
N -2.3570820 2.3026230 0.2014530
C -3.2027210 1.2786930 0.1118710
C -2.8736500 -0.0510630 -0.0044670
N -4.0080740 -0.8368430 -0.0732140
C -4.9747240 -0.0009950 -0.0000870
N -4.5630980 1.3018810 0.1139000
H -0.3540060 2.6652780 0.2331820
H -6.0151290 -0.2558650 -0.0223850
H -5.1340420 2.1112380 0.1847090
H -0.0593220 -1.7779260 -0.1555480
H -1.6856960 -2.3528630 -0.2058490
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '55')] = qcdb.Molecule("""
0 1
O -1.6803850 -1.8863100 -0.1650310
C -2.4435780 -0.9576130 -0.0837800
N -1.9754430 0.3294740 0.0288250
C -2.7234150 1.4689780 0.1285190
N -4.0753100 1.2496820 0.1093330
C -4.6340910 -0.0010050 -0.0000880
C -3.9044100 -1.1099300 -0.0971060
O -2.2509580 2.5647160 0.2243840
C -4.4733490 -2.4946080 -0.2182500
H -4.6436490 2.0617070 0.1803760
H -0.9698550 0.4553880 0.0398410
H -5.7079390 -0.0189790 -0.0016600
H -4.1432070 -3.1202310 0.6035230
H -4.1417710 -2.9686160 -1.1353540
H -5.5573000 -2.4671550 -0.2167380
--
0 1
O 2.4682050 -0.5383510 3.4250310
C 2.5397670 0.6615740 3.3437800
N 1.4045070 1.4276870 3.2311750
C 1.3398450 2.7892110 3.1314810
N 2.5624500 3.4064220 3.1506670
C 3.7496490 2.7230370 3.2600880
C 3.8111350 1.3970020 3.3571060
O 0.3135610 3.3979790 3.0356160
C 5.0853090 0.6111890 3.4782500
H 2.5449500 4.3974240 3.0796240
H 0.5169590 0.9384830 3.2201590
H 4.6289750 3.3396890 3.2616600
H 5.0964880 0.0341320 4.3961760
H 5.1850460 -0.0902010 2.6573890
H 5.9461980 1.2704040 3.4749720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '56')] = qcdb.Molecule("""
0 1
N 1.3923840 -1.6008720 0.1400580
C 1.8533500 -0.3559320 0.0311400
N 0.9943890 0.6596690 -0.0577140
C 1.4604570 1.9032530 -0.1665130
N 2.7070820 2.3026230 -0.2014530
C 3.5527210 1.2786930 -0.1118710
C 3.2236500 -0.0510630 0.0044670
N 4.3580740 -0.8368430 0.0732140
C 5.3247240 -0.0009950 0.0000870
N 4.9130980 1.3018810 -0.1139000
H 0.7040060 2.6652780 -0.2331820
H 6.3651290 -0.2558650 0.0223850
H 5.4840420 2.1112380 -0.1847090
H 0.4093220 -1.7779260 0.1555480
H 2.0356960 -2.3528630 0.2058490
--
0 1
N -0.1854930 -2.1135550 3.1199420
C -1.2901800 -1.3773270 3.2288600
N -1.1922210 -0.0508040 3.3177130
C -2.3002380 0.6813290 3.4265130
N -3.5435230 0.2716780 3.4614530
C -3.6258080 -1.0537530 3.3718710
C -2.5779730 -1.9361250 3.2555330
N -3.0338720 -3.2386320 3.1867860
C -4.3072070 -3.1306000 3.2599130
N -4.7400060 -1.8346030 3.3739000
H -2.1361640 1.7424510 3.4931820
H -4.9991040 -3.9483280 3.2376150
H -5.6776380 -1.5154120 3.4447090
H 0.7138900 -1.6789650 3.1044520
H -0.2639350 -3.1000580 3.0541510
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '57')] = qcdb.Molecule("""
0 1
N -1.4867430 1.6920980 -2.3336600
C -1.5399110 1.6049230 -1.0055780
N -0.4087210 1.5338080 -0.3037890
C -0.4671620 1.4467290 1.0245780
N -1.5291910 1.4187640 1.7901520
C -2.6502880 1.4904620 1.0763140
C -2.7488050 1.5835760 -0.2917850
N -4.0708590 1.6385980 -0.6895780
C -4.7315520 1.5800700 0.4051650
N -3.9369080 1.4888380 1.5187780
H 0.4880690 1.3933690 1.5165470
H -5.7999030 1.5979160 0.4839400
H -4.2294590 1.4321650 2.4660110
H -0.6065830 1.7044960 -2.8060620
H -2.3312660 1.7447540 -2.8510340
--
0 1
N 1.4867430 -1.6920980 -2.3336600
C 1.5399110 -1.6049230 -1.0055780
N 0.4087210 -1.5338080 -0.3037890
C 0.4671620 -1.4467290 1.0245780
N 1.5291910 -1.4187640 1.7901520
C 2.6502880 -1.4904620 1.0763140
C 2.7488050 -1.5835760 -0.2917850
N 4.0708590 -1.6385980 -0.6895780
C 4.7315520 -1.5800700 0.4051650
N 3.9369080 -1.4888380 1.5187780
H -0.4880690 -1.3933690 1.5165470
H 5.7999030 -1.5979160 0.4839400
H 4.2294590 -1.4321650 2.4660110
H 0.6065830 -1.7044960 -2.8060620
H 2.3312660 -1.7447540 -2.8510340
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '58')] = qcdb.Molecule("""
0 1
O 1.3473090 1.4479140 -0.7794320
C 2.3605260 1.5129430 -0.1308130
N 2.3135810 1.6030670 1.2396240
C 3.3775550 1.6828570 2.0937100
N 4.5954240 1.6675010 1.4671030
C 4.7398420 1.5799270 0.1033200
C 3.7027260 1.5022770 -0.7272960
O 3.2672880 1.7595820 3.2832490
C 3.8153430 1.4053200 -2.2218250
H 5.3872210 1.7243610 2.0648200
H 1.3961730 1.6118840 1.6702820
H 5.7555700 1.5786680 -0.2456340
H 3.3501360 0.4957970 -2.5852230
H 3.3109870 2.2369830 -2.7010640
H 4.8546930 1.4081210 -2.5307720
--
0 1
O -1.3473090 -1.4479140 -0.7794320
C -2.3605260 -1.5129430 -0.1308130
N -2.3135810 -1.6030670 1.2396240
C -3.3775550 -1.6828570 2.0937100
N -4.5954240 -1.6675010 1.4671030
C -4.7398420 -1.5799270 0.1033200
C -3.7027260 -1.5022770 -0.7272960
O -3.2672880 -1.7595820 3.2832490
C -3.8153430 -1.4053200 -2.2218250
H -5.3872210 -1.7243610 2.0648200
H -1.3961730 -1.6118840 1.6702820
H -5.7555700 -1.5786680 -0.2456340
H -3.3109870 -2.2369830 -2.7010640
H -3.3501360 -0.4957970 -2.5852230
H -4.8546930 -1.4081210 -2.5307720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '59')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.6069870 0.0000000
C -1.8533500 -0.3572920 0.0000000
N -0.9943890 0.6621890 0.0000000
C -1.4604570 1.9105230 0.0000000
N -2.7070820 2.3114180 0.0000000
C -3.5527210 1.2835770 0.0000000
C -3.2236500 -0.0512580 0.0000000
N -4.3580740 -0.8400400 0.0000000
C -5.3247240 -0.0009990 0.0000000
N -4.9130980 1.3068540 0.0000000
H -0.7040060 2.6754590 0.0000000
H -6.3651290 -0.2568420 0.0000000
H -5.4840420 2.1193020 0.0000000
H -0.4093220 -1.7847170 0.0000000
H -2.0356960 -2.3618500 0.0000000
--
0 1
O 2.4724400 -0.5441800 3.2400000
C 2.5419170 0.6586150 3.2400000
N 1.4037670 1.4287050 3.2400000
C 1.3365470 2.7937510 3.2400000
N 2.5596440 3.4102840 3.2400000
C 3.7496510 2.7230340 3.2400000
C 3.8136270 1.3935720 3.2400000
O 0.3078020 3.4059050 3.2400000
C 5.0909100 0.6034800 3.2400000
H 2.5403210 4.4037960 3.2400000
H 0.5159370 0.9398900 3.2400000
H 4.6290170 3.3396300 3.2400000
H 5.1480540 -0.0368430 4.1131720
H 5.1471940 -0.0381040 2.3676980
H 5.9516930 1.2628420 3.2391140
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '60')] = qcdb.Molecule("""
0 1
N -0.1818990 -2.1185030 3.2400000
C -1.2893810 -1.3784270 3.2400000
N -1.1937020 -0.0487650 3.2400000
C -2.3045120 0.6872100 3.2400000
N -3.5486930 0.2787930 3.2400000
C -3.6286790 -1.0498020 3.2400000
C -2.5778590 -1.9362830 3.2400000
N -3.0319930 -3.2412190 3.2400000
C -4.3072050 -3.1306030 3.2400000
N -4.7429290 -1.8305800 3.2400000
H -2.1421480 1.7506870 3.2400000
H -4.9985290 -3.9491190 3.2400000
H -5.6823780 -1.5088880 3.2400000
H 0.7178820 -1.6844600 3.2400000
H -0.2586520 -3.1073290 3.2400000
--
0 1
O 1.6803850 -1.8935150 0.0000000
C 2.4435780 -0.9612710 0.0000000
N 1.9754430 0.3307330 0.0000000
C 2.7234150 1.4745890 0.0000000
N 4.0753100 1.2544560 0.0000000
C 4.6340910 -0.0010090 0.0000000
C 3.9044100 -1.1141700 0.0000000
O 2.2509580 2.5745130 0.0000000
C 4.4733490 -2.5041370 0.0000000
H 4.6436490 2.0695820 0.0000000
H 0.9698550 0.4571270 0.0000000
H 5.7079390 -0.0190510 0.0000000
H 4.1432070 -3.0557570 0.8731720
H 4.1417710 -3.0562720 -0.8723020
H 5.5573000 -2.4766570 -0.0008860
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '61')] = qcdb.Molecule("""
0 1
C 12.1619966 21.5469940 -0.5249999
N 12.0019966 20.1249944 -0.3349999
C 12.9959964 19.1989946 -0.1290000
N 12.5899965 17.9429950 -0.1260000
C 11.2289969 18.0629949 -0.3469999
C 10.2259971 17.0909952 -0.4599999
N 10.4079971 15.7719956 -0.3739999
N 8.9619975 17.5199951 -0.6819998
C 8.7349976 18.8509947 -0.7899998
N 9.6049973 19.8469944 -0.7019998
C 10.8559970 19.3909946 -0.4999999
H 12.8450824 21.9515608 0.2257099
H 12.5490085 21.7744749 -1.5236356
H 11.1843859 22.0177918 -0.4120399
H 14.0220821 19.5129525 0.0161520
H 11.3436468 15.4109067 -0.2800629
H 9.6382753 15.1406078 -0.5991948
H 7.6909448 19.1156876 -0.9420537
--
0 1
C 3.5239990 12.7489964 2.4389993
N 4.9449986 12.8539964 2.2449994
C 5.8529984 11.8509967 2.0569994
N 7.1019980 12.2539966 2.0409994
C 6.9979980 13.6219962 2.2459994
C 7.9829978 14.6269959 2.3449993
N 9.3019974 14.3749960 2.2649994
N 7.5379979 15.8889955 2.5409993
C 6.2229983 16.1279955 2.6329993
N 5.2169985 15.2499957 2.5399993
C 5.6739984 14.0109961 2.3699993
H 9.6079353 13.4170922 2.2138804
H 9.9620862 15.1183578 2.4869203
H 5.5326604 10.8241690 1.9326585
H 5.9571083 17.1738952 2.7655592
H 3.0968081 13.7487911 2.3499173
H 3.0789261 12.1004316 1.6796125
H 3.2840151 12.3521085 3.4311880
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '62')] = qcdb.Molecule("""
0 1
C 3.0629991 16.2869954 -0.5529998
N 4.3679988 15.6949956 -0.7379998
C 5.4889985 16.5069954 -0.6549998
O 5.3979985 17.7169950 -0.4679999
N 6.6749981 15.8589956 -0.7949998
C 6.8699981 14.5069959 -0.9999997
O 8.0199978 14.0679961 -1.0789997
C 5.6559984 13.7139962 -1.1019997
C 5.7709984 12.2569966 -1.4029996
C 4.4739987 14.3319960 -0.9639997
H 7.5313379 16.4637704 -0.7443448
H 6.3741672 11.7424167 -0.6472968
H 4.7881707 11.7797217 -1.4448876
H 6.2751442 12.0930036 -2.3618343
H 3.5293140 13.8026561 -1.0289747
H 2.3790703 15.9479585 -1.3364316
H 2.6423583 16.0249025 0.4245489
H 3.1730521 17.3682771 -0.6086068
--
0 1
C 8.5479976 21.7979939 2.3959993
N 9.1919974 20.5259942 2.6589993
C 8.4229976 19.3799946 2.5429993
O 7.2269980 19.3959946 2.3429993
N 9.0979975 18.2049949 2.7069992
C 10.4579971 18.0869949 2.9379992
O 10.9519969 16.9699952 3.0289992
C 11.2079969 19.3189946 3.0599991
C 12.6759964 19.2659946 3.3619991
C 10.5419970 20.4719943 2.8979992
H 7.4741299 21.6651819 2.5133333
H 8.9049615 22.5495287 3.1049871
H 8.7503455 22.1445498 1.3760436
H 11.0339909 21.4374260 2.9618352
H 13.2133913 18.6878638 2.6029743
H 13.1061963 20.2701373 3.4050200
H 12.8619664 18.7673097 4.3193848
H 8.5371916 17.3217571 2.6353613
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '63')] = qcdb.Molecule("""
0 1
C 10.7049970 9.6579973 11.8009967
N 11.0689969 11.0699969 11.9839966
C 10.2199971 12.1419966 11.9589966
N 10.8209970 13.3089963 12.1399966
C 12.1439966 12.9639964 12.2549966
C 13.3189963 13.7529961 12.4509965
O 13.3749963 14.9839958 12.5499965
N 14.4609959 13.0409963 12.5269965
C 14.5119959 11.6719967 12.4369965
N 15.7519956 11.1639969 12.5359965
N 13.4609962 10.8809970 12.2549966
C 12.3209965 11.5909968 12.1779966
H 11.6087247 9.0642815 11.9411017
H 10.3130781 9.4887283 10.7941210
H 9.9552752 9.3611644 12.5389945
H 15.3408647 13.5779012 12.6455145
H 9.1538724 12.0114576 11.8260867
H 15.8197976 10.1594152 12.5501065
H 16.5616854 11.7259467 12.8207994
--
0 1
C 18.8919947 9.6579973 9.7709973
N 18.5279948 11.0699969 9.5879973
C 19.3769946 12.1419966 9.6129973
N 18.7759947 13.3089963 9.4319974
C 17.4529951 12.9639964 9.3169974
C 16.2779954 13.7529961 9.1209974
O 16.2219955 14.9839958 9.0219975
N 15.1359958 13.0409963 9.0449975
C 15.0849958 11.6719967 9.1349974
N 13.8449961 11.1639969 9.0359975
N 16.1359955 10.8809970 9.3169974
C 17.2759952 11.5909968 9.3939974
H 14.2561290 13.5779002 8.9264415
H 13.0353973 11.7259537 8.7509445
H 13.7773141 10.1594092 9.0213535
H 17.9866610 9.0649795 9.6385253
H 19.2909706 9.4904943 10.7753660
H 19.6360815 9.3587324 9.0282525
H 20.4431063 12.0114766 9.7460263
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '64')] = qcdb.Molecule("""
0 1
N 10.3469971 14.4959959 8.8169975
C 11.5789968 13.8469961 8.7069976
O 11.6019967 12.6419965 8.4119976
N 12.6939964 14.5549959 8.8809975
C 12.6739964 15.9259955 9.1859974
N 13.8309961 16.5099954 9.3349974
C 11.4219968 16.5639954 9.2669974
C 10.3209971 15.8539956 9.0929975
H 9.3699974 16.4009954 9.1789974
H 11.3019968 17.6379951 9.4699973
H 14.6739959 15.9769955 9.2609974
H 13.8749961 17.4909951 9.5239973
C 9.1059774 13.7460371 8.6280336
H 9.4001314 12.7260934 8.3864956
H 8.5051816 13.7537151 9.5428113
H 8.5206636 14.1698120 7.8064238
--
0 1
N 19.2499946 14.4959959 12.7549964
C 18.0179950 13.8469961 12.8649964
O 17.9949950 12.6419965 13.1599963
N 16.9029953 14.5549959 12.6909964
C 16.9229953 15.9259955 12.3859965
N 15.7659956 16.5099954 12.2369966
C 18.1749949 16.5639954 12.3049966
C 19.2759946 15.8539956 12.4789965
H 20.2269943 16.4009954 12.3929965
H 18.2949949 17.6379951 12.1019966
H 14.9229958 15.9769955 12.3109965
H 15.7219956 17.4909951 12.0479966
C 20.4910143 13.7460371 12.9439604
H 20.1968603 12.7260934 13.1854983
H 21.0918101 13.7537151 12.0291826
H 21.0763281 14.1698120 13.7655701
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '65')] = qcdb.Molecule("""
0 1
N 10.9240000 16.7550000 5.5620000
C 11.6470000 17.8510000 5.8140000
N 12.9490000 17.6590000 5.9790000
C 13.0500000 16.2780000 5.7950000
C 14.1950000 15.4230000 5.8560000
N 15.4060000 15.8590000 6.0610000
N 13.9020000 14.1180000 5.6250000
C 12.6770000 13.6430000 5.3990000
N 11.5490000 14.4040000 5.3300000
C 11.8450000 15.6910000 5.5460000
H 11.1804230 18.8265530 5.8822870
H 12.5884030 12.5696370 5.2620740
H 16.1977530 15.2199420 5.9750360
H 15.5570940 16.8510580 6.1500010
C 9.4931860 16.6413650 5.3399050
H 9.0446590 17.6337380 5.4112840
H 9.2947180 16.2234190 4.3499330
H 9.0442270 15.9854440 6.0897950
--
0 1
C 18.8920000 9.6580000 9.7710000
N 18.5280000 11.0700000 9.5880000
C 19.3770000 12.1420000 9.6130000
N 18.7760000 13.3090000 9.4320000
C 17.4530000 12.9640000 9.3170000
C 16.2780000 13.7530000 9.1210000
O 16.2220000 14.9840000 9.0220000
N 15.1360000 13.0410000 9.0450000
C 15.0850000 11.6720000 9.1350000
N 13.8450000 11.1640000 9.0360000
N 16.1360000 10.8810000 9.3170000
C 17.2760000 11.5910000 9.3940000
H 14.2561290 13.5779040 8.9264920
H 13.0354310 11.7259420 8.7508330
H 13.7773690 10.1594100 9.0211800
H 17.9880060 9.0643740 9.6322420
H 19.2851540 9.4890700 10.7774400
H 19.6407390 9.3607520 9.0321720
H 20.4431070 12.0114850 9.7460660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '66')] = qcdb.Molecule("""
0 1
C 9.1690000 13.6920000 8.6010000
N 10.3470000 14.4960000 8.8170000
C 11.5790000 13.8470000 8.7070000
O 11.6020000 12.6420000 8.4120000
N 12.6940000 14.5550000 8.8810000
C 12.6740000 15.9260000 9.1860000
N 13.8310000 16.5100000 9.3350000
C 11.4220000 16.5640000 9.2670000
C 10.3210000 15.8540000 9.0930000
H 9.1403680 12.8642760 9.3131620
H 8.2785600 14.3117950 8.7260530
H 9.1795130 13.2651190 7.5953140
H 11.3501160 17.6252970 9.4808030
H 9.3300790 16.2918180 9.1491660
H 14.7113690 15.9651740 9.2135180
H 13.8876420 17.4962710 9.5342540
--
0 1
N 16.2460000 9.7810000 5.9650000
C 17.5950000 10.0510000 5.9930000
C 18.0920000 11.2690000 5.9020000
C 17.1390000 12.3410000 5.7640000
O 17.4920000 13.5330000 5.6630000
N 15.8280000 12.0550000 5.7130000
C 15.3100000 10.7970000 5.7960000
O 14.1120000 10.5770000 5.7580000
H 18.2280000 9.1744860 6.1031120
C 19.5529600 11.6051630 5.9357380
H 20.1631860 10.7042230 6.0438290
H 19.7760320 12.2828240 6.7658180
H 19.8526100 12.1260780 5.0209680
H 15.1383860 12.8499570 5.6472680
C 15.7717470 8.4029560 6.0779300
H 14.6864640 8.4223240 6.0045990
H 16.1825380 7.7884380 5.2708940
H 16.0652090 7.9755790 7.0417370
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '67')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H -3.4958570 -1.4150050 -3.9137580
N -3.0510000 -1.0010000 -3.1090000
C -3.5590000 -0.8800000 -1.8360000
H -4.5790060 -1.1582720 -1.6128580
N -2.7220000 -0.3740000 -0.9680000
C -1.5590000 -0.1810000 -1.7250000
C -0.2720000 0.3480000 -1.4650000
N 0.1070000 0.8840000 -0.3230000
H 1.0433330 1.2579620 -0.3065570
H -0.5751070 1.2407790 0.3499520
N 0.6670000 0.3750000 -2.4130000
C 0.3480000 -0.0810000 -3.6160000
H 1.1321870 -0.0417550 -4.3673920
N -0.8160000 -0.5790000 -4.0190000
C -1.7380000 -0.6050000 -3.0150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '68')] = qcdb.Molecule("""
0 1
H 0.0112670 4.2441280 0.3057270
N -0.1600000 4.2010000 1.2990000
C 0.1490000 5.1520000 2.2350000
H 0.8336150 5.9557770 2.0023890
N -0.3040000 4.9000000 3.4380000
C -1.1470000 3.7970000 3.2290000
C -2.0790000 3.1160000 4.0900000
O -2.3440000 3.3110000 5.2740000
N -2.7730000 2.0930000 3.4630000
H -3.4444620 1.6202680 4.0533010
C -2.5700000 1.7190000 2.1650000
N -3.2200000 0.6740000 1.7040000
H -3.7884800 0.1079360 2.3113460
H -3.0424470 0.3264300 0.7529310
N -1.7100000 2.3160000 1.3470000
C -1.0480000 3.3630000 1.9240000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '69')] = qcdb.Molecule("""
0 1
H 4.0780890 0.2050200 6.5267380
N 3.3380000 -0.4520000 6.3380000
C 2.1440000 -0.6140000 7.0100000
H 1.9445960 -0.0744500 7.9251340
N 1.3390000 -1.4880000 6.4770000
C 2.0190000 -1.9110000 5.3320000
C 1.6500000 -2.8430000 4.3020000
O 0.6370000 -3.5330000 4.1980000
N 2.5960000 -2.9520000 3.3010000
H 2.3705000 -3.6388980 2.5623150
C 3.7610000 -2.2490000 3.2730000
N 4.5620000 -2.4690000 2.2580000
H 4.3528370 -3.1696290 1.5459440
H 5.4428290 -1.9835850 2.2550440
N 4.1450000 -1.3880000 4.2160000
C 3.2280000 -1.2560000 5.2240000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '70')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H 3.2823840 -6.1134940 -1.3105350
N 2.5530000 -6.0070000 -0.6210000
C 1.3990000 -6.7620000 -0.6490000
H 1.3017290 -7.4646550 -1.4662410
C 0.4550000 -6.5890000 0.3070000
H -0.4593850 -7.1648600 0.2947650
C 0.7210000 -5.6290000 1.3280000
N -0.1590000 -5.3940000 2.2700000
H -1.0266130 -5.9017830 2.3125200
H 0.0709100 -4.7127400 3.0149280
N 1.8460000 -4.9310000 1.3860000
C 2.7800000 -5.0940000 0.4140000
O 3.8210000 -4.4400000 0.4780000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '71')] = qcdb.Molecule("""
0 1
O -1.2390176 -2.5490521 0.6548924
C -1.0284571 -1.3714583 0.9008651
N -0.0318511 -0.9949528 1.8248233
C 0.3841646 0.2706806 2.1182164
N -0.1910285 1.3513281 1.6527710
C -1.2092305 1.0513624 0.8089237
C -1.6565083 -0.1915101 0.3706051
N -2.6541580 -0.0639048 -0.5661534
C -2.8177333 1.2431899 -0.6803818
N -1.9753657 1.9574414 0.1290579
N 1.4525454 0.3558875 2.9872621
H 0.4866119 -1.7695272 2.2174674
H -3.5338415 1.7253425 -1.3240899
H -1.9138820 2.9580997 0.2181746
H 1.7298659 1.3225221 3.0797421
H 2.2376547 -0.1901480 2.6476325
--
0 1
C 2.2123373 -0.0590839 -0.4645529
N 2.1205577 1.1822577 -1.1169007
C 1.2003987 1.4553092 -2.0711004
C 0.3300220 0.4917324 -2.4615962
C 0.4626198 -0.7818118 -1.8195186
N 1.3658705 -1.0412675 -0.8919664
O 3.0203933 -0.1851683 0.4516286
N -0.3645719 -1.7922584 -2.1870353
H 2.7522574 1.8928231 -0.7832658
H 1.2077958 2.4531651 -2.4849122
H -0.4090987 0.6756472 -3.2236499
H -1.2619684 -1.5144746 -2.5505972
H -0.4171470 -2.5417743 -1.5096178
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '72')] = qcdb.Molecule("""
0 1
O -1.6144948 -2.7570519 -0.2060980
C -1.1842160 -1.7852952 0.3968248
N -0.0652525 -1.9057030 1.2481671
C 0.5967440 -0.9002665 1.8887926
N 0.1973220 0.3471640 1.8866901
C -0.9263931 0.5128083 1.1439041
C -1.6366545 -0.4213094 0.3921297
N -2.6684418 0.1720265 -0.2870095
C -2.5827038 1.4477282 0.0619406
N -1.5537529 1.7066351 0.9265244
N 1.7211367 -1.2836170 2.5904485
H 0.3310406 -2.8355291 1.2850314
H -3.2456406 2.2307336 -0.2693001
C -1.1552104 2.9806914 1.4836593
H 2.1791458 -0.4655292 2.9651066
H 2.3669253 -1.7840635 1.9899163
H -0.1579041 3.2445863 1.1378850
H -1.8652414 3.7371235 1.1620610
H -1.1470859 2.9258678 2.5690805
--
0 1
C 1.9196368 -0.2893692 -0.7963336
N 1.8412129 1.1192423 -0.8402021
C 0.8544988 1.7477483 -1.5184980
C -0.1003666 1.0437295 -2.1815479
C -0.0071603 -0.3764662 -2.1036673
N 0.9713285 -0.9972682 -1.4703627
O 2.8208711 -0.8014248 -0.1303355
N -0.9502154 -1.1428384 -2.7108142
C 2.8076706 1.8576425 -0.0455040
H 0.8766575 2.8292157 -1.5014613
H -0.8834995 1.5417865 -2.7295416
H -1.8580411 -0.7155517 -2.8051204
H -0.9899772 -2.0895195 -2.3567972
H 3.8087243 1.5028551 -0.2699237
H 2.6081184 1.6978177 1.0127736
H 2.7199672 2.9140199 -0.2854865
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '73')] = qcdb.Molecule("""
0 1
N 0.2793014 2.4068393 -0.6057517
C -1.0848570 2.4457461 -0.5511608
H -1.6594403 3.0230294 -1.2560905
N -1.5977117 1.7179877 0.4287543
C -0.4897255 1.1714358 1.0301910
C -0.3461366 0.2914710 2.1172343
N -1.4187090 -0.1677767 2.8101441
H -1.2388750 -0.9594802 3.4047578
H -2.2918734 -0.1788223 2.3073619
N 0.8857630 -0.0700763 2.4919494
C 1.9352348 0.4072878 1.7968022
H 2.9060330 0.0788414 2.1458181
N 1.9409775 1.2242019 0.7402202
C 0.6952186 1.5779858 0.4063984
H 0.8610073 2.8298045 -1.3104502
--
0 1
N 1.2754606 -0.6478993 -1.9779104
C 1.4130533 -1.5536850 -0.9550667
H 2.4258769 -1.8670780 -0.7468778
C 0.3575976 -2.0239499 -0.2530575
C 0.4821292 -3.0179494 0.8521221
H 0.1757705 -2.5756065 1.7986281
H -0.1601691 -3.8770412 0.6639498
H 1.5112443 -3.3572767 0.9513659
C -0.9684711 -1.5298112 -0.5939792
O -2.0029280 -1.8396957 -0.0199453
N -0.9956916 -0.6383870 -1.6720420
H -1.9014057 -0.2501720 -1.8985760
C 0.0684702 -0.1191762 -2.3763759
O -0.0397875 0.7227006 -3.2531083
H 2.0853289 -0.2760176 -2.4454577
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '74')] = qcdb.Molecule("""
0 1
N -0.3455004 1.7703632 1.4950792
C -1.6474050 1.3634505 1.5386766
H -2.4523693 2.0803127 1.5703490
N -1.8053639 0.0450392 1.5375118
C -0.5193842 -0.4240596 1.4834056
C 0.0152186 -1.7249725 1.4821754
N -0.7782381 -2.8218524 1.5417158
H -0.3281681 -3.6995564 1.3432557
H -1.7192874 -2.7111068 1.1983318
N 1.3452903 -1.8718583 1.4651757
C 2.1159101 -0.7701212 1.4213994
H 3.1830548 -0.9527061 1.4028830
N 1.7419114 0.5131994 1.4043323
C 0.4096081 0.6245403 1.4501833
C 0.1512980 3.1326941 1.4689984
H -0.0424219 3.5749692 0.4946347
H -0.3347704 3.7141185 2.2479916
H 1.2201020 3.0964449 1.6609900
--
0 1
N 0.8076098 1.0547322 -1.6591556
C 1.2548662 -0.2426109 -1.7103022
H 2.3275169 -0.3452707 -1.8079765
C 0.4450062 -1.3265062 -1.6516166
C 0.9521849 -2.7269269 -1.7314581
H 0.7400336 -3.2617591 -0.8079097
H 0.4633129 -3.2624261 -2.5442266
H 2.0282910 -2.7371647 -1.8922437
C -0.9813923 -1.1031917 -1.5070970
O -1.8286775 -1.9792278 -1.3834794
N -1.3482304 0.2425066 -1.5277277
H -2.3301840 0.4328271 -1.3774912
C -0.5338001 1.3531361 -1.5364698
O -0.9719491 2.4936714 -1.4469221
C 1.7769330 2.1404377 -1.6201082
H 2.2553696 2.1734198 -0.6418794
H 2.5269559 1.9765736 -2.3901046
H 1.2518503 3.0690500 -1.8139519
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '75')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.9181960 -0.9215090 3.4000000
N -0.3693690 -1.5141310 3.4000000
C -1.5252510 -0.8082010 3.4000000
C -1.4858600 0.5568310 3.4000000
C -0.1723650 1.1455600 3.4000000
N 0.9526960 0.4540270 3.4000000
O 1.9020460 -1.6508420 3.4000000
N -0.0596430 2.5018840 3.4000000
H -0.3693760 -2.5309310 3.4000000
H -2.4533460 -1.3813360 3.4000000
H -2.3977890 1.1506030 3.4000000
H -0.8684590 3.1052900 3.4000000
H 0.8696630 2.9040660 3.4000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '76')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 1.2571480 0.3344260 3.3000000
N 1.1265920 -1.0769480 3.3000000
C -0.0627020 -1.7250070 3.3000000
C -1.2251600 -1.0083780 3.3000000
C -1.0782670 0.4235070 3.3000000
N 0.0831490 1.0520730 3.3000000
O 2.3806940 0.8218000 3.3000000
N -2.1965170 1.1992900 3.3000000
H 2.0071630 -1.5853550 3.3000000
H -0.0304010 -2.8153280 3.3000000
H -2.1953460 -1.5012440 3.3000000
H -3.1234900 0.8005370 3.3000000
H -2.0801630 2.2051830 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '77')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.3389520 1.2559350 3.3000000
N 1.4959600 0.4371830 3.3000000
C 1.4625490 -0.9168050 3.3000000
C 0.2607010 -1.5652080 3.3000000
C -0.9059010 -0.7220530 3.3000000
N -0.8695470 0.5980460 3.3000000
O 0.4786470 2.4726420 3.3000000
N -2.1368730 -1.3025950 3.3000000
H 2.3765390 0.9455770 3.3000000
H 2.4229450 -1.4339920 3.3000000
H 0.2024430 -2.6518470 3.3000000
H -2.2550290 -2.3047530 3.3000000
H -2.9498260 -0.6988840 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '78')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 0.9215090 3.3000000
N 0.3693690 1.5141310 3.3000000
C 1.5252510 0.8082010 3.3000000
C 1.4858600 -0.5568310 3.3000000
C 0.1723650 -1.1455600 3.3000000
N -0.9526960 -0.4540270 3.3000000
O -1.9020460 1.6508420 3.3000000
N 0.0596430 -2.5018840 3.3000000
H 0.3693760 2.5309310 3.3000000
H 2.4533460 1.3813360 3.3000000
H 2.3977890 -1.1506030 3.3000000
H 0.8684590 -3.1052900 3.3000000
H -0.8696630 -2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '79')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 1.9181950 -0.9215090 3.3000000
N 0.6306310 -1.5141310 3.3000000
C -0.5252510 -0.8082010 3.3000000
C -0.4858600 0.5568310 3.3000000
C 0.8276350 1.1455600 3.3000000
N 1.9526960 0.4540270 3.3000000
O 2.9020460 -1.6508420 3.3000000
N 0.9403570 2.5018840 3.3000000
H 0.6306240 -2.5309310 3.3000000
H -1.4533460 -1.3813360 3.3000000
H -1.3977890 1.1506030 3.3000000
H 0.1315410 3.1052900 3.3000000
H 1.8696630 2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '80')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.9181960 0.0784910 3.3000000
N -0.3693690 -0.5141310 3.3000000
C -1.5252510 0.1917990 3.3000000
C -1.4858600 1.5568310 3.3000000
C -0.1723650 2.1455600 3.3000000
N 0.9526960 1.4540270 3.3000000
O 1.9020460 -0.6508420 3.3000000
N -0.0596430 3.5018840 3.3000000
H -0.3693760 -1.5309310 3.3000000
H -2.4533460 -0.3813360 3.3000000
H -2.3977890 2.1506030 3.3000000
H -0.8684590 4.1052890 3.3000000
H 0.8696630 3.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '81')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 2.9181950 -2.9215090 3.3000000
N 1.6306310 -3.5141310 3.3000000
C 0.4747490 -2.8082010 3.3000000
C 0.5141400 -1.4431690 3.3000000
C 1.8276350 -0.8544400 3.3000000
N 2.9526960 -1.5459730 3.3000000
O 3.9020460 -3.6508420 3.3000000
N 1.9403570 0.5018840 3.3000000
H 1.6306240 -4.5309310 3.3000000
H -0.4533460 -3.3813360 3.3000000
H -0.3977890 -0.8493970 3.3000000
H 1.1315410 1.1052900 3.3000000
H 2.8696630 0.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '82')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 0.0818040 0.9215090 3.3000000
N 1.3693690 1.5141310 3.3000000
C 2.5252510 0.8082010 3.3000000
C 2.4858600 -0.5568310 3.3000000
C 1.1723650 -1.1455600 3.3000000
N 0.0473040 -0.4540270 3.3000000
O -0.9020460 1.6508420 3.3000000
N 1.0596430 -2.5018840 3.3000000
H 1.3693760 2.5309310 3.3000000
H 3.4533460 1.3813360 3.3000000
H 3.3977890 -1.1506030 3.3000000
H 1.8684590 -3.1052900 3.3000000
H 0.1303370 -2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '83')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -1.9181960 0.9215090 3.3000000
N -0.6306310 1.5141310 3.3000000
C 0.5252510 0.8082010 3.3000000
C 0.4858600 -0.5568310 3.3000000
C -0.8276350 -1.1455600 3.3000000
N -1.9526960 -0.4540270 3.3000000
O -2.9020460 1.6508420 3.3000000
N -0.9403570 -2.5018840 3.3000000
H -0.6306240 2.5309320 3.3000000
H 1.4533460 1.3813360 3.3000000
H 1.3977890 -1.1506030 3.3000000
H -0.1315410 -3.1052900 3.3000000
H -1.8696630 -2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '84')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 1.9215090 3.3000000
N 0.3693690 2.5141310 3.3000000
C 1.5252510 1.8082010 3.3000000
C 1.4858600 0.4431690 3.3000000
C 0.1723650 -0.1455600 3.3000000
N -0.9526960 0.5459730 3.3000000
O -1.9020460 2.6508420 3.3000000
N 0.0596430 -1.5018840 3.3000000
H 0.3693760 3.5309310 3.3000000
H 2.4533460 2.3813360 3.3000000
H 2.3977890 -0.1506030 3.3000000
H 0.8684590 -2.1052900 3.3000000
H -0.8696630 -1.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '85')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 -0.0784910 3.3000000
N 0.3693690 0.5141310 3.3000000
C 1.5252510 -0.1917990 3.3000000
C 1.4858600 -1.5568310 3.3000000
C 0.1723650 -2.1455600 3.3000000
N -0.9526960 -1.4540270 3.3000000
O -1.9020460 0.6508420 3.3000000
N 0.0596430 -3.5018840 3.3000000
H 0.3693760 1.5309310 3.3000000
H 2.4533460 0.3813360 3.3000000
H 2.3977890 -2.1506030 3.3000000
H 0.8684590 -4.1052900 3.3000000
H -0.8696630 -3.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '86')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C 1.0818040 -1.0784910 3.3000000
N 2.3693690 -0.4858690 3.3000000
C 3.5252510 -1.1917990 3.3000000
C 3.4858600 -2.5568310 3.3000000
C 2.1723650 -3.1455600 3.3000000
N 1.0473050 -2.4540270 3.3000000
O 0.0979540 -0.3491580 3.3000000
N 2.0596430 -4.5018840 3.3000000
H 2.3693760 0.5309310 3.3000000
H 4.4533460 -0.6186640 3.3000000
H 4.3977890 -3.1506030 3.3000000
H 2.8684590 -5.1052900 3.3000000
H 1.1303370 -4.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '87')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 2.9215090 3.3000000
N 0.3693690 3.5141310 3.3000000
C 1.5252510 2.8082010 3.3000000
C 1.4858600 1.4431690 3.3000000
C 0.1723660 0.8544400 3.3000000
N -0.9526960 1.5459730 3.3000000
O -1.9020460 3.6508420 3.3000000
N 0.0596430 -0.5018840 3.3000000
H 0.3693760 4.5309310 3.3000000
H 2.4533460 3.3813360 3.3000000
H 2.3977890 0.8493970 3.3000000
H 0.8684590 -1.1052900 3.3000000
H -0.8696630 -0.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '88')] = qcdb.Molecule("""
0 1
C 0.9181960 -0.9215090 0.0000000
N -0.3693690 -1.5141310 0.0000000
C -1.5252510 -0.8082010 0.0000000
C -1.4858600 0.5568310 0.0000000
C -0.1723650 1.1455600 0.0000000
N 0.9526960 0.4540270 0.0000000
O 1.9020460 -1.6508420 0.0000000
N -0.0596430 2.5018840 0.0000000
H -0.3693760 -2.5309310 0.0000000
H -2.4533460 -1.3813360 0.0000000
H -2.3977890 1.1506030 0.0000000
H -0.8684590 3.1052900 0.0000000
H 0.8696630 2.9040660 0.0000000
--
0 1
C -0.9181960 -0.9215090 3.3000000
N 0.3693690 -1.5141310 3.3000000
C 1.5252510 -0.8082010 3.3000000
C 1.4858600 0.5568310 3.3000000
C 0.1723650 1.1455600 3.3000000
N -0.9526960 0.4540270 3.3000000
O -1.9020460 -1.6508420 3.3000000
N 0.0596430 2.5018840 3.3000000
H 0.3693760 -2.5309310 3.3000000
H 2.4533460 -1.3813360 3.3000000
H 2.3977890 1.1506030 3.3000000
H 0.8684590 3.1052900 3.3000000
H -0.8696630 2.9040660 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '89')] = qcdb.Molecule("""
0 1
N -1.9000000 -0.3579200 0.0000000
C -1.9000000 0.9808800 0.0000000
N -0.8497640 1.8329300 0.0000000
C 0.3886960 1.3139590 0.0000000
C 0.5403660 -0.0879600 0.0000000
C -0.6427490 -0.8323270 0.0000000
N -0.2184070 -2.1434690 0.0000000
C 1.1532860 -2.1196680 0.0000000
N 1.6612330 -0.8947060 0.0000000
N 1.4545080 2.1469300 0.0000000
H -2.8785740 1.4564190 0.0000000
H 1.7387770 -3.0306410 0.0000000
H -0.8201870 -2.9587230 0.0000000
H 1.2990760 3.1441900 0.0000000
H 2.3918120 1.7733760 0.0000000
--
0 1
N 2.0400330 1.0244080 3.3000000
C 3.1994670 0.3550080 3.3000000
N 3.4122460 -0.9805480 3.3000000
C 2.3435740 -1.7936000 3.3000000
C 1.0536410 -1.2239910 3.3000000
C 1.0005580 0.1728010 3.3000000
N -0.3470950 0.4608810 3.3000000
C -1.0123290 -0.7389400 3.3000000
N -0.2054550 -1.7913170 3.3000000
N 2.5320410 -3.1331060 3.3000000
H 4.1005830 0.9647080 3.3000000
H -2.0940000 -0.7905040 3.3000000
H -0.7522350 1.3896650 3.3000000
H 3.4734100 -3.4971280 3.3000000
H 1.7398820 -3.7580580 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '90')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920590 0.0000000
C 0.2392880 -1.4664590 0.0000000
N 1.4831650 -0.7585720 0.0000000
C 1.6585390 0.6049970 0.0000000
N 0.6694090 1.4698410 0.0000000
C -0.5424070 0.8439610 0.0000000
C -0.8433090 -0.5171760 0.0000000
N -2.2044110 -0.7367510 0.0000000
C -2.7203200 0.4816210 0.0000000
N -1.7621040 1.4687240 0.0000000
N 2.9429720 1.0619610 0.0000000
H 2.2894380 -1.3782560 0.0000000
H -3.7780480 0.7114080 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608880 0.0000000
H 3.7442410 0.4525240 0.0000000
--
0 1
O -3.4927120 1.0318190 3.3000000
C -2.2857320 0.8189950 3.3000000
N -1.8045960 -0.5289080 3.3000000
C -0.4921960 -0.9383990 3.3000000
N 0.5312690 -0.1144750 3.3000000
C 0.1253280 1.1876140 3.3000000
C -1.1628790 1.7203040 3.3000000
N -1.1427660 3.0988560 3.3000000
C 0.1466840 3.3953590 3.3000000
N 0.9523970 2.2802920 3.3000000
N -0.2652140 -2.2826700 3.3000000
H -2.5548740 -1.2153250 3.3000000
H 0.5566520 4.3971160 3.3000000
H 1.9651610 2.2473370 3.3000000
H 0.6944640 -2.5926540 3.3000000
H -1.0045320 -2.9659380 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '91')] = qcdb.Molecule("""
0 1
N -1.9000000 -0.3579200 0.0000000
C -1.9000000 0.9808800 0.0000000
N -0.8497640 1.8329300 0.0000000
C 0.3886960 1.3139590 0.0000000
C 0.5403660 -0.0879600 0.0000000
C -0.6427490 -0.8323270 0.0000000
N -0.2184070 -2.1434690 0.0000000
C 1.1532860 -2.1196680 0.0000000
N 1.6612330 -0.8947060 0.0000000
N 1.4545080 2.1469300 0.0000000
H -2.8785740 1.4564190 0.0000000
H 1.7387770 -3.0306410 0.0000000
H -0.8201870 -2.9587230 0.0000000
H 1.2990760 3.1441900 0.0000000
H 2.3918120 1.7733760 0.0000000
--
0 1
C 0.2481770 -2.1847000 3.3000000
N 0.9375100 -0.9462160 3.3000000
C 0.3220220 0.2602560 3.3000000
C -1.0420250 0.3253500 3.3000000
C -1.7294600 -0.9392870 3.3000000
N -1.1259700 -2.1139300 3.3000000
O 0.9001510 -3.2214350 3.3000000
N -3.0904320 -0.9479780 3.3000000
H 1.9513330 -1.0239520 3.3000000
H 0.9644400 1.1418140 3.3000000
H -1.5643350 1.2800070 3.3000000
H -3.6302290 -0.0953930 3.3000000
H -3.5624890 -1.8438140 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '92')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920580 0.0000000
C 0.2392880 -1.4664580 0.0000000
N 1.4831650 -0.7585710 0.0000000
C 1.6585390 0.6049980 0.0000000
N 0.6694090 1.4698420 0.0000000
C -0.5424070 0.8439620 0.0000000
C -0.8433090 -0.5171750 0.0000000
N -2.2044110 -0.7367500 0.0000000
C -2.7203200 0.4816220 0.0000000
N -1.7621040 1.4687250 0.0000000
N 2.9429720 1.0619620 0.0000000
H 2.2894380 -1.3782550 0.0000000
H -3.7780480 0.7114090 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608890 0.0000000
H 3.7442410 0.4525250 0.0000000
--
0 1
N -2.9334410 0.9976940 -3.3000000
C -2.6871670 2.3136480 -3.3000000
N -1.4981170 2.9579650 -3.3000000
C -0.3762570 2.2200360 -3.3000000
C -0.4850590 0.8141390 -3.3000000
C -1.7849120 0.3001090 -3.3000000
N -1.6089980 -1.0667170 -3.3000000
C -0.2563330 -1.2956460 -3.3000000
N 0.4682790 -0.1850240 -3.3000000
N 0.8245940 2.8427340 -3.3000000
H -3.5615660 2.9610820 -3.3000000
H 0.1515920 -2.2987750 -3.3000000
H -2.3504750 -1.7573600 -3.3000000
H 0.8552610 3.8515680 -3.3000000
H 1.6771880 2.3031370 -3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '93')] = qcdb.Molecule("""
0 1
C -1.2210000 -0.4488000 0.0000000
N -1.2210000 0.9686000 0.0000000
C -0.0964540 1.7234490 0.0000000
C 1.1270700 1.1169400 0.0000000
C 1.1126920 -0.3223880 0.0000000
N 0.0141100 -1.0552590 0.0000000
O -2.2948780 -1.0375910 0.0000000
N 2.2976450 -0.9918710 0.0000000
H -2.1446570 1.3937360 0.0000000
H -0.2290470 2.8061600 0.0000000
H 2.0477340 1.6970750 0.0000000
H 3.1839480 -0.5094300 0.0000000
H 2.2744390 -2.0042050 0.0000000
--
0 1
C 0.8210000 1.4488000 3.3000000
N 0.8210000 0.0314000 3.3000000
C -0.3035460 -0.7234490 3.3000000
C -1.5270700 -0.1169400 3.3000000
C -1.5126920 1.3223880 3.3000000
N -0.4141100 2.0552590 3.3000000
O 1.8948780 2.0375920 3.3000000
N -2.6976450 1.9918700 3.3000000
H 1.7446570 -0.3937350 3.3000000
H -0.1709520 -1.8061600 3.3000000
H -2.4477340 -0.6970760 3.3000000
H -3.5839490 1.5094290 3.3000000
H -2.6744400 3.0042040 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '94')] = qcdb.Molecule("""
0 1
N -1.9000000 -0.3579200 0.0000000
C -1.9000000 0.9808800 0.0000000
N -0.8497640 1.8329300 0.0000000
C 0.3886960 1.3139590 0.0000000
C 0.5403660 -0.0879600 0.0000000
C -0.6427490 -0.8323270 0.0000000
N -0.2184070 -2.1434690 0.0000000
C 1.1532860 -2.1196680 0.0000000
N 1.6612330 -0.8947060 0.0000000
N 1.4545080 2.1469300 0.0000000
H -2.8785740 1.4564190 0.0000000
H 1.7387770 -3.0306410 0.0000000
H -0.8201870 -2.9587230 0.0000000
H 1.2990760 3.1441900 0.0000000
H 2.3918120 1.7733760 0.0000000
--
0 1
O 0.8290540 -2.7349420 3.3000000
C 0.6180080 -1.5257000 3.3000000
N 1.6882380 -0.6117020 3.3000000
C 1.6473780 0.7731900 3.3000000
N 0.3483000 1.2653660 3.3000000
C -0.7746550 0.4682380 3.3000000
C -0.6885800 -0.8807150 3.3000000
O 2.6370680 1.4932450 3.3000000
H -1.5708660 -1.5073280 3.3000000
H 2.6203010 -1.0186580 3.3000000
H 0.2745460 2.2757330 3.3000000
H -1.7216600 0.9981100 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '95')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920580 0.0000000
C 0.2392880 -1.4664580 0.0000000
N 1.4831650 -0.7585710 0.0000000
C 1.6585390 0.6049980 0.0000000
N 0.6694090 1.4698420 0.0000000
C -0.5424070 0.8439620 0.0000000
C -0.8433090 -0.5171750 0.0000000
N -2.2044110 -0.7367500 0.0000000
C -2.7203200 0.4816220 0.0000000
N -1.7621040 1.4687250 0.0000000
N 2.9429720 1.0619620 0.0000000
H 2.2894380 -1.3782550 0.0000000
H -3.7780480 0.7114090 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608890 0.0000000
H 3.7442410 0.4525250 0.0000000
--
0 1
C 2.5140740 0.5212730 -3.3000000
N 1.2263530 1.1135940 -3.3000000
C 0.0706410 0.4073870 -3.3000000
C 0.1103570 -0.9576360 -3.3000000
C 1.4239940 -1.5460500 -3.3000000
N 2.5488900 -0.8542470 -3.3000000
O 3.4977500 1.2508430 -3.3000000
N 1.5370420 -2.9023480 -3.3000000
H 1.2261020 2.1303940 -3.3000000
H -0.8575930 0.9802990 -3.3000000
H -0.8014280 -1.5516270 -3.3000000
H 0.7283690 -3.5059470 -3.3000000
H 2.4664440 -3.3043070 -3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '96')] = qcdb.Molecule("""
0 1
C -1.2210000 -0.4488000 0.0000000
N -1.2210000 0.9686000 0.0000000
C -0.0964540 1.7234490 0.0000000
C 1.1270700 1.1169400 0.0000000
C 1.1126920 -0.3223880 0.0000000
N 0.0141100 -1.0552590 0.0000000
O -2.2948780 -1.0375910 0.0000000
N 2.2976450 -0.9918710 0.0000000
H -2.1446570 1.3937360 0.0000000
H -0.2290470 2.8061600 0.0000000
H 2.0477340 1.6970750 0.0000000
H 3.1839480 -0.5094300 0.0000000
H 2.2744390 -2.0042050 0.0000000
--
0 1
O -0.2290330 2.1280320 3.3000000
C -0.3146620 0.9035020 3.3000000
N 0.8438480 0.1043590 3.3000000
C 0.9455600 -1.2773930 3.3000000
N -0.2960490 -1.9004980 3.3000000
C -1.4949900 -1.2230250 3.3000000
C -1.5480330 0.1276340 3.3000000
O 2.0040190 -1.8919000 3.3000000
H -2.4900550 0.6602360 3.3000000
H 1.7291390 0.6049670 3.3000000
H -0.2655530 -2.9130890 3.3000000
H -2.3825080 -1.8474300 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '97')] = qcdb.Molecule("""
0 1
O -0.4072070 -2.5021900 0.0000000
C -0.4072070 -1.2746690 0.0000000
N 0.8042290 -0.5582850 0.0000000
C 1.0020800 0.8130100 0.0000000
N -0.1930340 1.5212070 0.0000000
C -1.4363170 0.9290170 0.0000000
C -1.5834480 -0.4146490 0.0000000
O 2.1008310 1.3521860 0.0000000
H -2.5603280 -0.8802410 0.0000000
H 1.6524450 -1.1194300 0.0000000
H -0.0919780 2.5292090 0.0000000
H -2.2781210 1.6138160 0.0000000
--
0 1
O -1.1927920 2.1021900 3.3000000
C -1.1927930 0.8746690 3.3000000
N -2.4042290 0.1582850 3.3000000
C -2.6020800 -1.2130100 3.3000000
N -1.4069670 -1.9212070 3.3000000
C -0.1636830 -1.3290170 3.3000000
C -0.0165520 0.0146480 3.3000000
O -3.7008310 -1.7521850 3.3000000
H 0.9603280 0.4802400 3.3000000
H -3.2524450 0.7194310 3.3000000
H -1.5080230 -2.9292090 3.3000000
H 0.6781200 -2.0138170 3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '98')] = qcdb.Molecule("""
0 1
O 0.2392880 -2.6920580 0.0000000
C 0.2392880 -1.4664580 0.0000000
N 1.4831650 -0.7585710 0.0000000
C 1.6585390 0.6049980 0.0000000
N 0.6694090 1.4698420 0.0000000
C -0.5424070 0.8439620 0.0000000
C -0.8433090 -0.5171750 0.0000000
N -2.2044110 -0.7367500 0.0000000
C -2.7203200 0.4816220 0.0000000
N -1.7621040 1.4687250 0.0000000
N 2.9429720 1.0619620 0.0000000
H 2.2894380 -1.3782550 0.0000000
H -3.7780480 0.7114090 0.0000000
H -1.9055140 2.4718250 0.0000000
H 3.0816010 2.0608890 0.0000000
H 3.7442410 0.4525250 0.0000000
--
0 1
O 2.7274930 0.0284280 -3.3000000
C 1.5380200 -0.2749280 -3.3000000
N 0.5444810 0.7218990 -3.3000000
C -0.8331760 0.5747330 -3.3000000
N -1.2240650 -0.7583250 -3.3000000
C -0.3429990 -1.8167020 -3.3000000
C 0.9953510 -1.6272180 -3.3000000
O -1.6271560 1.5061620 -3.3000000
H 1.6879130 -2.4587410 -3.3000000
H 0.8786070 1.6824790 -3.3000000
H -2.2257760 -0.9095050 -3.3000000
H -0.7985270 -2.8016270 -3.3000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '99')] = qcdb.Molecule("""
0 1
O 0.9601320 1.3436400 0.0000000
C 1.5166980 0.2684520 0.0000000
N 0.7573320 -0.9011610 0.0000000
C 1.2481620 -2.1702510 0.0000000
N 2.5209460 -2.4496950 0.0000000
C 3.2915230 -1.3476830 0.0000000
C 2.9121790 -0.0279190 0.0000000
N 4.0200060 0.7969640 0.0000000
C 5.0170310 0.0003310 0.0000000
N 4.6446780 -1.3255770 0.0000000
N 0.3459700 -3.1553460 0.0000000
H -0.2412520 -0.7659240 0.0000000
H 6.0483360 0.2895830 0.0000000
H 5.2362800 -2.1226110 0.0000000
H 0.6928700 -4.0838600 0.0000000
H -0.6408270 -2.9885130 0.0000000
--
0 1
O -0.0130090 1.6513790 3.3600000
C 1.0692420 1.1086750 3.3600000
N 1.1423840 -0.2839060 3.3600000
C 2.2854260 -1.0221180 3.3600000
N 3.4793830 -0.5000700 3.3600000
C 3.4550460 0.8444100 3.3600000
C 2.3724120 1.6891490 3.3600000
N 2.7838090 3.0076580 3.3600000
C 4.0586690 2.9492050 3.3600000
N 4.5367780 1.6576590 3.3600000
N 2.1345620 -2.3493720 3.3600000
H 0.2550220 -0.7614500 3.3600000
H 4.7229940 3.7894010 3.3600000
H 5.4838790 1.3605800 3.3600000
H 2.9609760 -2.8966530 3.3600000
H 1.2381640 -2.7944260 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '100')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
C -1.5982280 -2.9490360 3.3600000
N -2.8308990 -3.5868360 3.3600000
C -4.0005400 -2.9065270 3.3600000
C -4.0107280 -1.5698660 3.3600000
C -2.7192980 -0.9187180 3.3600000
N -1.5949260 -1.5998660 3.3600000
O -0.5980710 -3.6295230 3.3600000
N -2.6531990 0.4024280 3.3600000
H -2.8066410 -4.5810390 3.3600000
H -4.8972920 -3.4971900 3.3600000
H -4.9235800 -1.0089750 3.3600000
H -3.4794940 0.9500750 3.3600000
H -1.7581040 0.8646590 3.3600000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '101')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.5825730 -0.2790500
C -1.8533500 -0.3518640 -0.0620430
N -0.9943890 0.6521290 0.1149880
C -1.4604570 1.8814980 0.3317590
N -2.7070820 2.2763020 0.4013740
C -3.5527210 1.2640760 0.2228910
C -3.2236500 -0.0504790 -0.0089010
N -4.3580740 -0.8272780 -0.1458710
C -5.3247240 -0.0009840 -0.0001730
N -4.9130980 1.2870000 0.2269330
H -0.7040060 2.6348130 0.4645890
H -6.3651290 -0.2529400 -0.0446000
H -5.4840420 2.0871050 0.3680130
H -0.4093220 -1.7576030 -0.3099130
H -2.0356960 -2.3259680 -0.4101310
--
0 1
N -0.1962490 -2.0987510 2.7709500
C -1.2925710 -1.3740360 2.9879570
N -1.1877890 -0.0569040 3.1649880
C -2.2874510 0.6637280 3.3817590
N -3.5280520 0.2503840 3.4513740
C -3.6172170 -1.0655780 3.2728910
C -2.5783160 -1.9356530 3.0410990
N -3.0394940 -3.2308940 2.9041290
C -4.3072140 -3.1305910 3.0498260
N -4.7312590 -1.8466420 3.2769330
H -2.1182570 1.7178040 3.5145890
H -5.0008230 -3.9459620 3.0054000
H -5.6634530 -1.5349360 3.4180130
H 0.7019450 -1.6625240 2.7400870
H -0.2797430 -3.0783000 2.6398690
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '102')] = qcdb.Molecule("""
0 1
O 1.6803850 -1.8647480 0.3288050
C 2.4435780 -0.9466670 0.1669230
N 1.9754430 0.3257080 -0.0574310
C 2.7234150 1.4521870 -0.2560600
N 4.0753100 1.2353980 -0.2178340
C 4.6340910 -0.0009940 0.0001750
C 3.9044100 -1.0972430 0.1934740
O 2.2509580 2.5354000 -0.4470600
C 4.4733490 -2.4660930 0.4348390
H 4.6436490 2.0381400 -0.3593790
H 0.9698550 0.4501820 -0.0793790
H 5.7079390 -0.0187620 0.0033080
H 4.1432070 -2.8577080 1.3905330
H 4.1417710 -3.1613140 -0.3283340
H 5.5573000 -2.4391850 0.4291940
--
0 1
O 2.4555320 -0.5209070 3.3788050
C 2.5333330 0.6704300 3.2169230
N 1.4067200 1.4246400 2.9925690
C 1.3497150 2.7756270 2.7939400
N 2.5708460 3.3948650 2.8321660
C 3.7496420 2.7230470 3.0501750
C 3.8036770 1.4072660 3.2434740
O 0.3307920 3.3742620 2.6029400
C 5.0685490 0.6342580 3.4848390
H 2.5588020 4.3783590 2.6906210
H 0.5200190 0.9342720 2.9706210
H 4.6288470 3.3398640 3.0533080
H 5.0316430 0.1233820 4.4405330
H 5.2089370 -0.1230850 2.7216660
H 5.9296670 1.2931580 3.4791940
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '103')] = qcdb.Molecule("""
0 1
C 2.4313070 1.6249990 -1.4530130
N 3.8007370 1.6249990 -1.6786800
C 4.7029040 1.6249990 -0.6702290
C 4.2995430 1.6249990 0.6041590
C 2.8701050 1.6249990 0.8243640
N 2.0112500 1.6249990 -0.1708960
O 1.6903830 1.6249990 -2.4092590
N 2.3989850 1.6249990 2.0604220
H 4.0848920 1.6249990 -2.6317200
H 5.7382910 1.6249990 -0.9548720
H 4.9943920 1.6249990 1.4196840
H 3.0156050 1.6249990 2.8366040
H 1.4048620 1.6249990 2.2234300
--
0 1
O 0.4979320 -1.6249990 1.9422390
C 1.3595090 -1.6249990 1.0916630
N 0.9987390 -1.6249990 -0.2553610
C 1.8577170 -1.6249990 -1.3106620
N 3.1545590 -1.6249990 -1.1831180
C 3.5468800 -1.6249990 0.1030790
C 2.7782730 -1.6249990 1.2410250
N 3.5769760 -1.6249990 2.3678730
C 4.7713760 -1.6249990 1.9183280
N 4.8269760 -1.6249990 0.5422510
N 1.3040920 -1.6249990 -2.5263360
H 0.0072390 -1.6249990 -0.4353230
H 5.6628210 -1.6249990 2.5121130
H 5.6359190 -1.6249990 -0.0329580
H 1.9209400 -1.6249990 -3.3022070
H 0.3140390 -1.6249990 -2.6726050
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '104')] = qcdb.Molecule("""
0 1
C -3.0263940 -1.4464050 0.0000000
N -4.3985350 -1.2378500 0.0000000
C -4.9449180 0.0000290 0.0000000
C -4.1674910 1.0873990 0.0000000
C -2.7399670 0.8551050 0.0000000
N -2.2307000 -0.3568440 0.0000000
O -2.6172300 -2.5848080 0.0000000
N -1.9099420 1.8850830 0.0000000
H -4.9632880 -2.0564360 0.0000000
H -6.0175890 0.0492700 0.0000000
H -4.5763200 2.0777300 0.0000000
H -2.2565290 2.8138220 0.0000000
H -0.9141020 1.7329110 0.0000000
--
0 1
O -1.5665350 0.5226760 3.1900000
C -1.3848270 -0.6743110 3.1900000
N -0.0830050 -1.1742030 3.1900000
C 0.2658570 -2.4894210 3.1900000
N -0.5995930 -3.4636200 3.1900000
C -1.8707500 -3.0250070 3.1900000
C -2.3395920 -1.7343230 3.1900000
N -3.7206970 -1.7181430 3.1900000
C -4.0590580 -2.9486690 3.1900000
N -2.9784690 -3.8024880 3.1900000
N 1.5747700 -2.7560840 3.1900000
H 0.6453760 -0.4778410 3.1900000
H -5.0634190 -3.3208450 3.1900000
H -2.9886000 -4.7950370 3.1900000
H 1.8398890 -3.7111710 3.1900000
H 2.2750440 -2.0410890 3.1900000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '105')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6008720 0.1400580
C 1.5033500 -0.3559320 0.0311400
N 0.6443890 0.6596690 -0.0577140
C 1.1104570 1.9032530 -0.1665130
N 2.3570820 2.3026230 -0.2014530
C 3.2027210 1.2786930 -0.1118710
C 2.8736500 -0.0510630 0.0044670
N 4.0080740 -0.8368430 0.0732140
C 4.9747240 -0.0009950 0.0000870
N 4.5630980 1.3018810 -0.1139000
H 0.3540060 2.6652780 -0.2331820
H 6.0151290 -0.2558650 0.0223850
H 5.1340420 2.1112380 -0.1847090
H 0.0593220 -1.7779260 0.1555480
H 1.6856960 -2.3528630 0.2058490
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '106')] = qcdb.Molecule("""
0 1
O -2.0303850 -1.8863100 -0.1650310
C -2.7935780 -0.9576130 -0.0837800
N -2.3254430 0.3294740 0.0288250
C -3.0734150 1.4689780 0.1285190
N -4.4253100 1.2496820 0.1093330
C -4.9840910 -0.0010050 -0.0000880
C -4.2544100 -1.1099300 -0.0971060
O -2.6009580 2.5647160 0.2243840
C -4.8233490 -2.4946080 -0.2182500
H -4.9936490 2.0617070 0.1803760
H -1.3198550 0.4553880 0.0398410
H -6.0579390 -0.0189790 -0.0016600
H -4.4932070 -3.1202310 0.6035230
H -4.4917710 -2.9686160 -1.1353540
H -5.9073000 -2.4671550 -0.2167380
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '107')] = qcdb.Molecule("""
0 1
N 1.0423840 -1.6030730 0.1120980
C 1.5033500 -0.3564220 0.0249230
N 0.6443890 0.6605760 -0.0461920
C 1.1104570 1.9058690 -0.1332710
N 2.3570820 2.3057880 -0.1612360
C 3.2027210 1.2804500 -0.0895380
C 2.8736500 -0.0511330 0.0035760
N 4.0080740 -0.8379940 0.0585980
C 4.9747240 -0.0009970 0.0000700
N 4.5630980 1.3036710 -0.0911620
H 0.3540060 2.6689420 -0.1866310
H 6.0151290 -0.2562160 0.0179160
H 5.1340420 2.1141400 -0.1478350
H 0.0593220 -1.7803700 0.1244960
H 1.6856960 -2.3560970 0.1647540
--
0 1
O 1.5241600 -0.5494170 3.3837280
C 1.3439910 0.6454500 3.3087260
N 0.0438450 1.1430380 3.2271380
C -0.3032010 2.4557550 3.1386110
N 0.5626500 3.4294030 3.1191180
C 1.8322280 2.9929620 3.1959900
C 2.2991810 1.7048790 3.2880520
N 3.6791050 1.6903250 3.3455930
C 4.0186060 2.9192810 3.2900230
N 2.9399160 3.7704860 3.1975320
N -1.6107030 2.7204770 3.0698940
H -0.6847300 0.4469420 3.2365720
H 5.0225530 3.2920270 3.3102000
H 2.9511880 4.7614640 3.1419340
H -1.8744930 3.6737330 3.0051240
H -2.3112160 2.0058100 3.0815320
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '108')] = qcdb.Molecule("""
0 1
O -2.0303850 -1.8889030 -0.1320850
C -2.7935780 -0.9589290 -0.0670550
N -2.3254430 0.3299270 0.0230710
C -3.0734150 1.4709970 0.1028620
N -4.4253100 1.2514000 0.0875060
C -4.9840910 -0.0010070 -0.0000700
C -4.2544100 -1.1114560 -0.0777210
O -2.6009580 2.5682420 0.1795890
C -4.8233490 -2.4980370 -0.1746800
H -4.9936490 2.0645410 0.1443670
H -1.3198550 0.4560130 0.0318880
H -6.0579390 -0.0190050 -0.0013290
H -4.4932070 -3.1092230 0.6578860
H -4.4917710 -2.9879780 -1.0833720
H -5.9073000 -2.4705620 -0.1736470
--
0 1
C -3.3369590 -0.6409430 3.3908960
N -4.3247580 -1.6157810 3.3763480
C -4.0409560 -2.9359630 3.2899980
C -2.7744220 -3.3565600 3.2141470
C -1.7557370 -2.3300110 3.2303510
N -2.0543620 -1.0525720 3.3148920
O -3.6734450 0.5183010 3.4703070
N -0.4803010 -2.6733740 3.1585030
H -5.2616330 -1.2870980 3.4334500
H -4.8798930 -3.6062030 3.2865630
H -2.5244870 -4.3961080 3.1450650
H -0.2161270 -3.6266280 3.0937180
H 0.2361240 -1.9652240 3.1691180
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '109')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8935150 0.0000000
C 2.7935780 -0.9612710 0.0000000
N 2.3254430 0.3307330 0.0000000
C 3.0734150 1.4745890 0.0000000
N 4.4253100 1.2544560 0.0000000
C 4.9840910 -0.0010090 0.0000000
C 4.2544100 -1.1141700 0.0000000
O 2.6009580 2.5745130 0.0000000
C 4.8233490 -2.5041370 0.0000000
H 4.9936490 2.0695820 0.0000000
H 1.3198550 0.4571270 0.0000000
H 6.0579390 -0.0190510 0.0000000
H 4.4932070 -3.0557570 0.8731720
H 4.4917710 -3.0562720 -0.8723020
H 5.9073000 -2.4766570 -0.0008860
--
0 1
O 1.5260840 -0.5520650 3.1800000
C 1.3443760 0.6449210 3.1800000
N 0.0425540 1.1448140 3.1800000
C -0.3063080 2.4600320 3.1800000
N 0.5591430 3.4342300 3.1800000
C 1.8302990 2.9956180 3.1800000
C 2.2991410 1.7049340 3.1800000
N 3.6802460 1.6887540 3.1800000
C 4.0186070 2.9192800 3.1800000
N 2.9380180 3.7730980 3.1800000
N -1.6152210 2.7266950 3.1800000
H -0.6858270 0.4484520 3.1800000
H 5.0229680 3.2914560 3.1800000
H 2.9481490 4.7656470 3.1800000
H -1.8803400 3.6817820 3.1800000
H -2.3154950 2.0117000 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '110')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6069870 0.0000000
C -1.5033500 -0.3572920 0.0000000
N -0.6443890 0.6621890 0.0000000
C -1.1104570 1.9105230 0.0000000
N -2.3570820 2.3114180 0.0000000
C -3.2027210 1.2835770 0.0000000
C -2.8736500 -0.0512580 0.0000000
N -4.0080740 -0.8400400 0.0000000
C -4.9747240 -0.0009990 0.0000000
N -4.5630980 1.3068540 0.0000000
H -0.3540060 2.6754590 0.0000000
H -6.0151290 -0.2568420 0.0000000
H -5.1340420 2.1193020 0.0000000
H -0.0593220 -1.7847170 0.0000000
H -1.6856960 -2.3618500 0.0000000
--
0 1
C -3.3390300 -0.6380930 3.1800000
N -4.3265300 -1.6133420 3.1800000
C -4.0409560 -2.9359630 3.1800000
C -2.7728650 -3.3587040 3.1800000
C -1.7545120 -2.3316960 3.1800000
N -2.0548730 -1.0518690 3.1800000
O -3.6771460 0.5233950 3.1800000
N -0.4776020 -2.6770890 3.1800000
H -5.2645780 -1.2830450 3.1800000
H -4.8798220 -3.6063000 3.1800000
H -2.5215120 -4.4002020 3.1800000
H -0.2120980 -3.6321740 3.1800000
H 0.2386050 -1.9686390 3.1800000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '111')] = qcdb.Molecule("""
0 1
O 2.0303850 -1.8863100 0.1650310
C 2.7935780 -0.9576130 0.0837800
N 2.3254430 0.3294740 -0.0288250
C 3.0734150 1.4689780 -0.1285190
N 4.4253100 1.2496820 -0.1093330
C 4.9840910 -0.0010050 0.0000880
C 4.2544100 -1.1099300 0.0971060
O 2.6009580 2.5647160 -0.2243840
C 4.8233490 -2.4946080 0.2182500
H 4.9936490 2.0617070 -0.1803760
H 1.3198550 0.4553880 -0.0398410
H 6.0579390 -0.0189790 0.0016600
H 4.4932070 -2.9680270 1.1361760
H 4.4917710 -3.1206680 -0.6026110
H 5.9073000 -2.4673100 0.2149720
--
0 1
O -0.0504540 -1.6178530 -3.0328940
C 1.0293920 -1.0784590 -3.1266030
N 1.0999170 0.3105210 -3.2285410
C 2.2401210 1.0448270 -3.3391500
N 3.4334530 0.5219170 -3.3635050
C 3.4115810 -0.8191700 -3.2674580
C 2.3318990 -1.6598460 -3.1524330
N 2.7451410 -2.9758150 -3.0805400
C 4.0182190 -2.9198150 -3.1499710
N 4.4933620 -1.6323510 -3.2655320
N 2.0870530 2.3690480 -3.4250070
H 0.2128580 0.7884810 -3.2167550
H 4.6831900 -3.7591200 -3.1247610
H 5.4386800 -1.3377250 -3.3349980
H 2.9113910 2.9134700 -3.5059320
H 1.1910290 2.8146150 -3.4104660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '112')] = qcdb.Molecule("""
0 1
N -1.0423840 -1.6008720 -0.1400580
C -1.5033500 -0.3559320 -0.0311400
N -0.6443890 0.6596690 0.0577140
C -1.1104570 1.9032530 0.1665130
N -2.3570820 2.3026230 0.2014530
C -3.2027210 1.2786930 0.1118710
C -2.8736500 -0.0510630 -0.0044670
N -4.0080740 -0.8368430 -0.0732140
C -4.9747240 -0.0009950 -0.0000870
N -4.5630980 1.3018810 0.1139000
H -0.3540060 2.6652780 0.2331820
H -6.0151290 -0.2558650 -0.0223850
H -5.1340420 2.1112380 0.1847090
H -0.0593220 -1.7779260 -0.1555480
H -1.6856960 -2.3528630 -0.2058490
--
0 1
C -1.6419140 2.9739730 -3.0239370
N -2.8741190 3.6124140 -3.0421140
C -4.0409900 2.9359160 -3.1500030
C -4.0487470 1.6026030 -3.2447730
C -2.7578360 0.9507400 -3.2245270
N -1.6361750 1.6281560 -3.1188990
O -0.6443040 3.6509540 -2.9247190
N -2.6894340 -0.3672360 -3.3142960
H -2.8516920 4.6040980 -2.9707700
H -4.9376330 3.5267310 -3.1542940
H -4.9593830 1.0447610 -3.3310860
H -3.5136510 -0.9120230 -3.3952410
H -1.7946790 -0.8299350 -3.3010330
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '113')] = qcdb.Molecule("""
0 1
N 1.3923840 -1.6008720 0.1400580
C 1.8533500 -0.3559320 0.0311400
N 0.9943890 0.6596690 -0.0577140
C 1.4604570 1.9032530 -0.1665130
N 2.7070820 2.3026230 -0.2014530
C 3.5527210 1.2786930 -0.1118710
C 3.2236500 -0.0510630 0.0044670
N 4.3580740 -0.8368430 0.0732140
C 5.3247240 -0.0009950 0.0000870
N 4.9130980 1.3018810 -0.1139000
H 0.7040060 2.6652780 -0.2331820
H 6.3651290 -0.2558650 0.0223850
H 5.4840420 2.1112380 -0.1847090
H 0.4093220 -1.7779260 0.1555480
H 2.0356960 -2.3528630 0.2058490
--
0 1
O 2.4682050 -0.5383510 3.4250310
C 2.5397670 0.6615740 3.3437800
N 1.4045070 1.4276870 3.2311750
C 1.3398450 2.7892110 3.1314810
N 2.5624500 3.4064220 3.1506670
C 3.7496490 2.7230370 3.2600880
C 3.8111350 1.3970020 3.3571060
O 0.3135610 3.3979790 3.0356160
C 5.0853090 0.6111890 3.4782500
H 2.5449500 4.3974240 3.0796240
H 0.5169590 0.9384830 3.2201590
H 4.6289750 3.3396890 3.2616600
H 5.0964880 0.0341320 4.3961760
H 5.1850460 -0.0902010 2.6573890
H 5.9461980 1.2704040 3.4749720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '114')] = qcdb.Molecule("""
0 1
N -1.4867430 1.6920980 -2.3336600
C -1.5399110 1.6049230 -1.0055780
N -0.4087210 1.5338080 -0.3037890
C -0.4671620 1.4467290 1.0245780
N -1.5291910 1.4187640 1.7901520
C -2.6502880 1.4904620 1.0763140
C -2.7488050 1.5835760 -0.2917850
N -4.0708590 1.6385980 -0.6895780
C -4.7315520 1.5800700 0.4051650
N -3.9369080 1.4888380 1.5187780
H 0.4880690 1.3933690 1.5165470
H -5.7999030 1.5979160 0.4839400
H -4.2294590 1.4321650 2.4660110
H -0.6065830 1.7044960 -2.8060620
H -2.3312660 1.7447540 -2.8510340
--
0 1
O -1.3473090 -1.4479140 -0.7794320
C -2.3605260 -1.5129430 -0.1308130
N -2.3135810 -1.6030670 1.2396240
C -3.3775550 -1.6828570 2.0937100
N -4.5954240 -1.6675010 1.4671030
C -4.7398420 -1.5799270 0.1033200
C -3.7027260 -1.5022770 -0.7272960
O -3.2672880 -1.7595820 3.2832490
C -3.8153430 -1.4053200 -2.2218250
H -5.3872210 -1.7243610 2.0648200
H -1.3961730 -1.6118840 1.6702820
H -5.7555700 -1.5786680 -0.2456340
H -3.3109870 -2.2369830 -2.7010640
H -3.3501360 -0.4957970 -2.5852230
H -4.8546930 -1.4081210 -2.5307720
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '115')] = qcdb.Molecule("""
0 1
N -1.3923840 -1.6069870 0.0000000
C -1.8533500 -0.3572920 0.0000000
N -0.9943890 0.6621890 0.0000000
C -1.4604570 1.9105230 0.0000000
N -2.7070820 2.3114180 0.0000000
C -3.5527210 1.2835770 0.0000000
C -3.2236500 -0.0512580 0.0000000
N -4.3580740 -0.8400400 0.0000000
C -5.3247240 -0.0009990 0.0000000
N -4.9130980 1.3068540 0.0000000
H -0.7040060 2.6754590 0.0000000
H -6.3651290 -0.2568420 0.0000000
H -5.4840420 2.1193020 0.0000000
H -0.4093220 -1.7847170 0.0000000
H -2.0356960 -2.3618500 0.0000000
--
0 1
N -0.1818990 -2.1185030 3.2400000
C -1.2893810 -1.3784270 3.2400000
N -1.1937020 -0.0487650 3.2400000
C -2.3045120 0.6872100 3.2400000
N -3.5486930 0.2787930 3.2400000
C -3.6286790 -1.0498020 3.2400000
C -2.5778590 -1.9362830 3.2400000
N -3.0319930 -3.2412190 3.2400000
C -4.3072050 -3.1306030 3.2400000
N -4.7429290 -1.8305800 3.2400000
H -2.1421480 1.7506870 3.2400000
H -4.9985290 -3.9491190 3.2400000
H -5.6823780 -1.5088880 3.2400000
H 0.7178820 -1.6844600 3.2400000
H -0.2586520 -3.1073290 3.2400000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '116')] = qcdb.Molecule("""
0 1
O 1.6803850 -1.8935150 0.0000000
C 2.4435780 -0.9612710 0.0000000
N 1.9754430 0.3307330 0.0000000
C 2.7234150 1.4745890 0.0000000
N 4.0753100 1.2544560 0.0000000
C 4.6340910 -0.0010090 0.0000000
C 3.9044100 -1.1141700 0.0000000
O 2.2509580 2.5745130 0.0000000
C 4.4733490 -2.5041370 0.0000000
H 4.6436490 2.0695820 0.0000000
H 0.9698550 0.4571270 0.0000000
H 5.7079390 -0.0190510 0.0000000
H 4.1432070 -3.0557570 0.8731720
H 4.1417710 -3.0562720 -0.8723020
H 5.5573000 -2.4766570 -0.0008860
--
0 1
O 2.4724400 -0.5441800 3.2400000
C 2.5419170 0.6586150 3.2400000
N 1.4037670 1.4287050 3.2400000
C 1.3365470 2.7937510 3.2400000
N 2.5596440 3.4102840 3.2400000
C 3.7496510 2.7230340 3.2400000
C 3.8136270 1.3935720 3.2400000
O 0.3078020 3.4059050 3.2400000
C 5.0909100 0.6034800 3.2400000
H 2.5403210 4.4037960 3.2400000
H 0.5159370 0.9398900 3.2400000
H 4.6290170 3.3396300 3.2400000
H 5.1480540 -0.0368430 4.1131720
H 5.1471940 -0.0381040 2.3676980
H 5.9516930 1.2628420 3.2391140
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '117')] = qcdb.Molecule("""
0 1
C 12.1619966 21.5469940 -0.5249999
N 12.0019966 20.1249944 -0.3349999
C 12.9959964 19.1989946 -0.1290000
N 12.5899965 17.9429950 -0.1260000
C 11.2289969 18.0629949 -0.3469999
C 10.2259971 17.0909952 -0.4599999
N 10.4079971 15.7719956 -0.3739999
N 8.9619975 17.5199951 -0.6819998
C 8.7349976 18.8509947 -0.7899998
N 9.6049973 19.8469944 -0.7019998
C 10.8559970 19.3909946 -0.4999999
H 12.8450824 21.9515608 0.2257099
H 12.5490085 21.7744749 -1.5236356
H 11.1843859 22.0177918 -0.4120399
H 14.0220821 19.5129525 0.0161520
H 11.3436468 15.4109067 -0.2800629
H 9.6382753 15.1406078 -0.5991948
H 7.6909448 19.1156876 -0.9420537
--
0 1
C 8.5479976 21.7979939 2.3959993
N 9.1919974 20.5259942 2.6589993
C 8.4229976 19.3799946 2.5429993
O 7.2269980 19.3959946 2.3429993
N 9.0979975 18.2049949 2.7069992
C 10.4579971 18.0869949 2.9379992
O 10.9519969 16.9699952 3.0289992
C 11.2079969 19.3189946 3.0599991
C 12.6759964 19.2659946 3.3619991
C 10.5419970 20.4719943 2.8979992
H 7.4741299 21.6651819 2.5133333
H 8.9049615 22.5495287 3.1049871
H 8.7503455 22.1445498 1.3760436
H 11.0339909 21.4374260 2.9618352
H 13.2133913 18.6878638 2.6029743
H 13.1061963 20.2701373 3.4050200
H 12.8619664 18.7673097 4.3193848
H 8.5371916 17.3217571 2.6353613
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '118')] = qcdb.Molecule("""
0 1
N 10.3469971 14.4959959 8.8169975
C 11.5789968 13.8469961 8.7069976
O 11.6019967 12.6419965 8.4119976
N 12.6939964 14.5549959 8.8809975
C 12.6739964 15.9259955 9.1859974
N 13.8309961 16.5099954 9.3349974
C 11.4219968 16.5639954 9.2669974
C 10.3209971 15.8539956 9.0929975
H 9.3699974 16.4009954 9.1789974
H 11.3019968 17.6379951 9.4699973
H 14.6739959 15.9769955 9.2609974
H 13.8749961 17.4909951 9.5239973
C 9.1059774 13.7460371 8.6280336
H 9.4001314 12.7260934 8.3864956
H 8.5051816 13.7537151 9.5428113
H 8.5206636 14.1698120 7.8064238
--
0 1
C 10.7049970 9.6579973 11.8009967
N 11.0689969 11.0699969 11.9839966
C 10.2199971 12.1419966 11.9589966
N 10.8209970 13.3089963 12.1399966
C 12.1439966 12.9639964 12.2549966
C 13.3189963 13.7529961 12.4509965
O 13.3749963 14.9839958 12.5499965
N 14.4609959 13.0409963 12.5269965
C 14.5119959 11.6719967 12.4369965
N 15.7519956 11.1639969 12.5359965
N 13.4609962 10.8809970 12.2549966
C 12.3209965 11.5909968 12.1779966
H 11.6087247 9.0642815 11.9411017
H 10.3130781 9.4887283 10.7941210
H 9.9552752 9.3611644 12.5389945
H 15.3408647 13.5779012 12.6455145
H 9.1538724 12.0114576 11.8260867
H 15.8197976 10.1594152 12.5501065
H 16.5616854 11.7259467 12.8207994
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '119')] = qcdb.Molecule("""
0 1
N 10.9240000 16.7550000 5.5620000
C 11.6470000 17.8510000 5.8140000
N 12.9490000 17.6590000 5.9790000
C 13.0500000 16.2780000 5.7950000
C 14.1950000 15.4230000 5.8560000
N 15.4060000 15.8590000 6.0610000
N 13.9020000 14.1180000 5.6250000
C 12.6770000 13.6430000 5.3990000
N 11.5490000 14.4040000 5.3300000
C 11.8450000 15.6910000 5.5460000
H 11.1804230 18.8265530 5.8822870
H 12.5884030 12.5696370 5.2620740
H 16.1977530 15.2199420 5.9750360
H 15.5570940 16.8510580 6.1500010
C 9.4931860 16.6413650 5.3399050
H 9.0446590 17.6337380 5.4112840
H 9.2947180 16.2234190 4.3499330
H 9.0442270 15.9854440 6.0897950
--
0 1
C 9.1690000 13.6920000 8.6010000
N 10.3470000 14.4960000 8.8170000
C 11.5790000 13.8470000 8.7070000
O 11.6020000 12.6420000 8.4120000
N 12.6940000 14.5550000 8.8810000
C 12.6740000 15.9260000 9.1860000
N 13.8310000 16.5100000 9.3350000
C 11.4220000 16.5640000 9.2670000
C 10.3210000 15.8540000 9.0930000
H 9.1403680 12.8642760 9.3131620
H 8.2785600 14.3117950 8.7260530
H 9.1795130 13.2651190 7.5953140
H 11.3501160 17.6252970 9.4808030
H 9.3300790 16.2918180 9.1491660
H 14.7113690 15.9651740 9.2135180
H 13.8876420 17.4962710 9.5342540
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '120')] = qcdb.Molecule("""
0 1
N 16.2460000 9.7810000 5.9650000
C 17.5950000 10.0510000 5.9930000
C 18.0920000 11.2690000 5.9020000
C 17.1390000 12.3410000 5.7640000
O 17.4920000 13.5330000 5.6630000
N 15.8280000 12.0550000 5.7130000
C 15.3100000 10.7970000 5.7960000
O 14.1120000 10.5770000 5.7580000
H 18.2280000 9.1744860 6.1031120
C 19.5529600 11.6051630 5.9357380
H 20.1631860 10.7042230 6.0438290
H 19.7760320 12.2828240 6.7658180
H 19.8526100 12.1260780 5.0209680
H 15.1383860 12.8499570 5.6472680
C 15.7717470 8.4029560 6.0779300
H 14.6864640 8.4223240 6.0045990
H 16.1825380 7.7884380 5.2708940
H 16.0652090 7.9755790 7.0417370
--
0 1
C 18.8920000 9.6580000 9.7710000
N 18.5280000 11.0700000 9.5880000
C 19.3770000 12.1420000 9.6130000
N 18.7760000 13.3090000 9.4320000
C 17.4530000 12.9640000 9.3170000
C 16.2780000 13.7530000 9.1210000
O 16.2220000 14.9840000 9.0220000
N 15.1360000 13.0410000 9.0450000
C 15.0850000 11.6720000 9.1350000
N 13.8450000 11.1640000 9.0360000
N 16.1360000 10.8810000 9.3170000
C 17.2760000 11.5910000 9.3940000
H 14.2561290 13.5779040 8.9264920
H 13.0354310 11.7259420 8.7508330
H 13.7773690 10.1594100 9.0211800
H 17.9880060 9.0643740 9.6322420
H 19.2851540 9.4890700 10.7774400
H 19.6407390 9.3607520 9.0321720
H 20.4431070 12.0114850 9.7460660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '121')] = qcdb.Molecule("""
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
--
0 1
H 0.0112670 4.2441280 0.3057270
N -0.1600000 4.2010000 1.2990000
C 0.1490000 5.1520000 2.2350000
H 0.8336150 5.9557770 2.0023890
N -0.3040000 4.9000000 3.4380000
C -1.1470000 3.7970000 3.2290000
C -2.0790000 3.1160000 4.0900000
O -2.3440000 3.3110000 5.2740000
N -2.7730000 2.0930000 3.4630000
H -3.4444620 1.6202680 4.0533010
C -2.5700000 1.7190000 2.1650000
N -3.2200000 0.6740000 1.7040000
H -3.7884800 0.1079360 2.3113460
H -3.0424470 0.3264300 0.7529310
N -1.7100000 2.3160000 1.3470000
C -1.0480000 3.3630000 1.9240000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '122')] = qcdb.Molecule("""
0 1
H -3.4958570 -1.4150050 -3.9137580
N -3.0510000 -1.0010000 -3.1090000
C -3.5590000 -0.8800000 -1.8360000
H -4.5790060 -1.1582720 -1.6128580
N -2.7220000 -0.3740000 -0.9680000
C -1.5590000 -0.1810000 -1.7250000
C -0.2720000 0.3480000 -1.4650000
N 0.1070000 0.8840000 -0.3230000
H 1.0433330 1.2579620 -0.3065570
H -0.5751070 1.2407790 0.3499520
N 0.6670000 0.3750000 -2.4130000
C 0.3480000 -0.0810000 -3.6160000
H 1.1321870 -0.0417550 -4.3673920
N -0.8160000 -0.5790000 -4.0190000
C -1.7380000 -0.6050000 -3.0150000
--
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '123')] = qcdb.Molecule("""
0 1
H 4.0780890 0.2050200 6.5267380
N 3.3380000 -0.4520000 6.3380000
C 2.1440000 -0.6140000 7.0100000
H 1.9445960 -0.0744500 7.9251340
N 1.3390000 -1.4880000 6.4770000
C 2.0190000 -1.9110000 5.3320000
C 1.6500000 -2.8430000 4.3020000
O 0.6370000 -3.5330000 4.1980000
N 2.5960000 -2.9520000 3.3010000
H 2.3705000 -3.6388980 2.5623150
C 3.7610000 -2.2490000 3.2730000
N 4.5620000 -2.4690000 2.2580000
H 4.3528370 -3.1696290 1.5459440
H 5.4428290 -1.9835850 2.2550440
N 4.1450000 -1.3880000 4.2160000
C 3.2280000 -1.2560000 5.2240000
--
0 1
H 3.1762460 2.3738070 2.9634160
N 2.3770000 1.8470000 3.2830000
C 1.6370000 2.2160000 4.3790000
H 1.9902970 3.0843050 4.9210710
C 0.5610000 1.4930000 4.7730000
H -0.0085000 1.7736330 5.6470440
C 0.1830000 0.3990000 3.9430000
N -0.8510000 -0.3400000 4.2540000
H -1.1799330 -1.0651510 3.5908230
H -1.4362750 -0.1022370 5.0377650
N 0.8500000 0.0580000 2.8540000
C 1.9550000 0.7640000 2.4990000
O 2.5580000 0.4150000 1.4830000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '124')] = qcdb.Molecule("""
0 1
H -1.2611710 -4.7286740 -2.6257100
N -1.6090000 -4.2940000 -1.7860000
C -2.7550000 -4.5990000 -1.0690000
H -3.5136190 -5.2427470 -1.4922410
N -2.8650000 -3.9860000 0.0730000
C -1.6740000 -3.2820000 0.1910000
C -1.1780000 -2.4570000 1.2560000
O -1.7150000 -2.1460000 2.3170000
N 0.0980000 -1.9830000 1.0200000
H 0.4562670 -1.3045040 1.7132710
C 0.8280000 -2.2730000 -0.0890000
N 2.0180000 -1.7250000 -0.1770000
H 2.3044660 -0.9690820 0.4476800
H 2.5064670 -1.8555350 -1.0472790
N 0.3920000 -3.0250000 -1.1030000
C -0.8790000 -3.5010000 -0.9150000
--
0 1
H 3.2823840 -6.1134940 -1.3105350
N 2.5530000 -6.0070000 -0.6210000
C 1.3990000 -6.7620000 -0.6490000
H 1.3017290 -7.4646550 -1.4662410
C 0.4550000 -6.5890000 0.3070000
H -0.4593850 -7.1648600 0.2947650
C 0.7210000 -5.6290000 1.3280000
N -0.1590000 -5.3940000 2.2700000
H -1.0266130 -5.9017830 2.3125200
H 0.0709100 -4.7127400 3.0149280
N 1.8460000 -4.9310000 1.3860000
C 2.7800000 -5.0940000 0.4140000
O 3.8210000 -4.4400000 0.4780000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-dimer' ] = 1391.98129069
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoA-unCP' ] = 357.13933560
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoB-unCP' ] = 596.62760720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-dimer' ] = 1654.40527853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoA-unCP' ] = 443.56399475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoB-unCP' ] = 696.60732032
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-dimer' ] = 1365.23227533
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoA-unCP' ] = 503.39630679
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoB-unCP' ] = 440.30156925
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-dimer' ] = 1645.63864536
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoA-unCP' ] = 596.45767348
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoB-unCP' ] = 533.27333592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-dimer' ] = 1519.08619634
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoA-unCP' ] = 694.08169190
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoB-unCP' ] = 357.17481831
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-dimer' ] = 1250.60241408
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoA-unCP' ] = 357.05937707
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoB-unCP' ] = 502.93669666
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-dimer' ] = 1377.89785724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoA-unCP' ] = 596.76364865
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoB-unCP' ] = 357.05278633
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-dimer' ] = 1101.46127813
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoA-unCP' ] = 357.43034135
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoB-unCP' ] = 369.97349400
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-dimer' ] = 1026.69630020
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoA-unCP' ] = 357.07506993
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoB-unCP' ] = 357.22791266
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-dimer' ] = 1049.26311591
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoA-unCP' ] = 357.30966824
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoB-unCP' ] = 357.25457437
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-dimer' ] = 1501.52577097
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoA-unCP' ] = 357.30771904
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoB-unCP' ] = 670.49331720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-dimer' ] = 1338.80888094
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoA-unCP' ] = 502.97292629
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoB-unCP' ] = 412.74720533
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-dimer' ] = 1521.60537748
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoA-unCP' ] = 596.65701652
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoB-unCP' ] = 440.44274318
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-dimer' ] = 1516.62359887
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoA-unCP' ] = 596.92255465
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoB-unCP' ] = 440.54554467
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-dimer' ] = 1318.42675206
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoA-unCP' ] = 503.53728859
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoB-unCP' ] = 425.75653587
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-dimer' ] = 1478.61731319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoA-unCP' ] = 596.66795120
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoB-unCP' ] = 413.04224329
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-dimer' ] = 1487.72900733
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoA-unCP' ] = 596.75974596
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoB-unCP' ] = 412.84579804
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-dimer' ] = 1229.51638352
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoA-unCP' ] = 356.91023176
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoB-unCP' ] = 503.30931271
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-dimer' ] = 1706.17310708
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoA-unCP' ] = 596.15051246
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoB-unCP' ] = 596.63218121
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-dimer' ] = 1830.02648907
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoA-unCP' ] = 596.58545524
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoB-unCP' ] = 670.44234386
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-dimer' ] = 1835.32380783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoA-unCP' ] = 670.04325278
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoB-unCP' ] = 596.84640861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-dimer' ] = 1578.33475973
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoA-unCP' ] = 596.48090327
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoB-unCP' ] = 503.33140569
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-dimer' ] = 1570.60868318
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoA-unCP' ] = 503.54456755
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoB-unCP' ] = 596.89708469
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-dimer' ] = 1563.56410044
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoA-unCP' ] = 593.67756289
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoB-unCP' ] = 501.45867869
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-dimer' ] = 1563.69890911
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoA-unCP' ] = 595.94249141
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoB-unCP' ] = 503.12213297
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-dimer' ] = 1590.81054033
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoA-unCP' ] = 596.44241276
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoB-unCP' ] = 502.87235332
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-dimer' ] = 1551.55026390
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoA-unCP' ] = 595.72714752
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoB-unCP' ] = 503.45401843
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-dimer' ] = 1411.30275525
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoA-unCP' ] = 503.40799836
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoB-unCP' ] = 503.40916818
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-dimer' ] = 1424.44630670
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoA-unCP' ] = 503.49043267
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoB-unCP' ] = 502.94567640
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-dimer' ] = 1435.87093606
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoA-unCP' ] = 503.11592074
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoB-unCP' ] = 503.11223193
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-dimer' ] = 1849.09724927
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoA-unCP' ] = 596.75580700
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoB-unCP' ] = 693.16448502
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-dimer' ] = 1250.78225068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoA-unCP' ] = 413.69053788
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoB-unCP' ] = 413.05557496
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-dimer' ] = 1622.96228374
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoA-unCP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoB-unCP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-dimer' ] = 1657.51101967
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoA-unCP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoB-unCP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-dimer' ] = 1626.09750599
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoA-unCP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoB-unCP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-dimer' ] = 1590.75136012
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoA-unCP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoB-unCP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-dimer' ] = 1401.39568382
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-dimer' ] = 1399.57843792
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoA-unCP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoB-unCP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-dimer' ] = 1349.21626455
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoA-unCP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoB-unCP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-dimer' ] = 1314.37579643
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoB-unCP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-dimer' ] = 1334.39438102
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoA-unCP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoB-unCP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-dimer' ] = 1319.28084098
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoA-unCP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoB-unCP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-dimer' ] = 973.06142895
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoA-unCP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoB-unCP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-dimer' ] = 1724.48127327
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoA-unCP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoB-unCP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-dimer' ] = 1806.98548380
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoA-unCP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoB-unCP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-dimer' ] = 971.53306922
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoB-unCP' ] = 359.95491240
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-dimer' ] = 1208.85975313
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-dimer' ] = 1443.15679999
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoA-unCP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-dimer' ] = 1481.99538897
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoA-unCP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoB-unCP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-dimer' ] = 1189.16944115
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoA-unCP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoB-unCP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-dimer' ] = 1634.77658138
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoB-unCP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-dimer' ] = 1081.19928424
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoA-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoB-unCP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-dimer' ] = 1083.47826689
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoA-unCP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-dimer' ] = 1581.29557886
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-dimer' ] = 1219.56453397
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoA-unCP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoB-unCP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-dimer' ] = 1405.28947121
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoB-unCP' ] = 507.48987021
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-dimer' ] = 1474.05108110
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoA-unCP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoB-unCP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-dimer' ] = 1198.83425089
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoA-unCP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoB-unCP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-dimer' ] = 1321.59763058
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoB-unCP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-dimer' ] = 1311.14164882
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoA-unCP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoB-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-dimer' ] = 1662.05565427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoA-unCP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoB-unCP' ] = 596.81166025
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-dimer' ] = 1471.46519284
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoA-unCP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoB-unCP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-dimer' ] = 2118.75518694
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoA-unCP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoB-unCP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-dimer' ] = 1195.41740656
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoA-unCP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoB-unCP' ] = 442.44825964
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-dimer' ] = 1827.15190604
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoA-unCP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoB-unCP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-dimer' ] = 1330.55895484
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoA-unCP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoB-unCP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-dimer' ] = 1207.89930362
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoB-unCP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-dimer' ] = 1669.65398984
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoA-unCP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-dimer' ] = 1734.47387907
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoA-unCP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-dimer' ] = 963.62312494
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoB-unCP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-dimer' ] = 1537.57227681
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoA-unCP' ] = 596.44964921
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoB-unCP' ] = 357.10169648
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-dimer' ] = 1870.03529750
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoA-unCP' ] = 696.44803543
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoB-unCP' ] = 443.64584898
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-dimer' ] = 1542.14304870
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoA-unCP' ] = 503.36564485
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoB-unCP' ] = 440.14700689
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-dimer' ] = 1873.30862324
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoA-unCP' ] = 596.40342598
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoB-unCP' ] = 532.86039581
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-dimer' ] = 1136.50020569
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-dimer' ] = 1143.60873849
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoB-unCP' ] = 355.44451746
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-dimer' ] = 1144.33569661
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoB-unCP' ] = 355.44455365
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-dimer' ] = 1144.53152982
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-dimer' ] = 1136.39531003
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoB-unCP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-dimer' ] = 1137.56590421
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoB-unCP' ] = 355.44458162
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-dimer' ] = 1089.71176518
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoB-unCP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-dimer' ] = 1135.52588803
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-dimer' ] = 1135.89252554
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoB-unCP' ] = 355.44457113
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-dimer' ] = 1136.27990430
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-dimer' ] = 1137.68428928
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-dimer' ] = 1091.48755032
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoB-unCP' ] = 355.44459806
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-dimer' ] = 1114.79473660
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoB-unCP' ] = 355.44457375
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-dimer' ] = 1144.74104397
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoA-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoB-unCP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-dimer' ] = 1593.04361768
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoA-unCP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoB-unCP' ] = 501.81461592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-dimer' ] = 1914.59068159
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoA-unCP' ] = 593.90346744
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoB-unCP' ] = 593.90347753
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-dimer' ] = 1358.00357589
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoA-unCP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoB-unCP' ] = 355.44452826
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-dimer' ] = 1749.63836451
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoA-unCP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoB-unCP' ] = 501.81458877
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-dimer' ] = 1135.19068685
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoA-unCP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoB-unCP' ] = 355.44453848
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-dimer' ] = 1368.42192946
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoA-unCP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoB-unCP' ] = 355.38546038
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-dimer' ] = 1491.03516654
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoA-unCP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoB-unCP' ] = 355.44418383
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-dimer' ] = 1143.55810352
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoA-unCP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoB-unCP' ] = 355.38590060
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-dimer' ] = 1124.41284995
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoA-unCP' ] = 355.38547127
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoB-unCP' ] = 355.38549385
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-dimer' ] = 1517.60433270
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoA-unCP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoB-unCP' ] = 355.38464230
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-dimer' ] = 1912.03719777
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoA-unCP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoB-unCP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-dimer' ] = 1120.88525374
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoB-unCP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-dimer' ] = 1612.73592913
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoA-unCP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoB-unCP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-dimer' ] = 1415.77211916
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoA-unCP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoB-unCP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-dimer' ] = 1529.52830806
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoA-unCP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoB-unCP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-dimer' ] = 1475.57522935
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoA-unCP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoB-unCP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-dimer' ] = 1782.23519943
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-dimer' ] = 1257.02669139
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoA-unCP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-dimer' ] = 1740.95680727
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoA-unCP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoB-unCP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-dimer' ] = 1260.01230981
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoA-unCP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoB-unCP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-dimer' ] = 1609.15794755
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoA-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoB-unCP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-dimer' ] = 1349.63628460
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoB-unCP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-dimer' ] = 1673.67295485
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoA-unCP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoB-unCP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-dimer' ] = 1367.26317388
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoB-unCP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-dimer' ] = 1509.79318924
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoA-unCP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoB-unCP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-dimer' ] = 1545.03032944
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoA-unCP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoB-unCP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-dimer' ] = 1601.56827337
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoA-unCP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoB-unCP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-dimer' ] = 1410.31245614
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoA-unCP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoB-unCP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-dimer' ] = 1816.15304322
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoA-unCP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoB-unCP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-dimer' ] = 1727.56215886
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoA-unCP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoB-unCP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-dimer' ] = 1650.54443625
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoA-unCP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoB-unCP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-dimer' ] = 1964.24212034
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoA-unCP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoB-unCP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-dimer' ] = 1496.57764615
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoA-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoB-unCP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-dimer' ] = 1752.69730428
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoA-unCP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoB-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-dimer' ] = 1512.39205830
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoA-unCP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoB-unCP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-dimer' ] = 1498.52644117
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoA-unCP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoB-unCP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoA-CP' ] = 357.13933560
DATA['NUCLEAR REPULSION ENERGY']['JSCH-1-monoB-CP' ] = 596.62760720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoA-CP' ] = 443.56399475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-2-monoB-CP' ] = 696.60732032
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoA-CP' ] = 503.39630679
DATA['NUCLEAR REPULSION ENERGY']['JSCH-3-monoB-CP' ] = 440.30156925
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoA-CP' ] = 596.45767348
DATA['NUCLEAR REPULSION ENERGY']['JSCH-4-monoB-CP' ] = 533.27333592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoA-CP' ] = 694.08169190
DATA['NUCLEAR REPULSION ENERGY']['JSCH-5-monoB-CP' ] = 357.17481831
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoA-CP' ] = 357.05937707
DATA['NUCLEAR REPULSION ENERGY']['JSCH-6-monoB-CP' ] = 502.93669666
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoA-CP' ] = 596.76364865
DATA['NUCLEAR REPULSION ENERGY']['JSCH-7-monoB-CP' ] = 357.05278633
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoA-CP' ] = 357.43034135
DATA['NUCLEAR REPULSION ENERGY']['JSCH-8-monoB-CP' ] = 369.97349400
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoA-CP' ] = 357.07506993
DATA['NUCLEAR REPULSION ENERGY']['JSCH-9-monoB-CP' ] = 357.22791266
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoA-CP' ] = 357.30966824
DATA['NUCLEAR REPULSION ENERGY']['JSCH-10-monoB-CP' ] = 357.25457437
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoA-CP' ] = 357.30771904
DATA['NUCLEAR REPULSION ENERGY']['JSCH-11-monoB-CP' ] = 670.49331720
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoA-CP' ] = 502.97292629
DATA['NUCLEAR REPULSION ENERGY']['JSCH-12-monoB-CP' ] = 412.74720533
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoA-CP' ] = 596.65701652
DATA['NUCLEAR REPULSION ENERGY']['JSCH-13-monoB-CP' ] = 440.44274318
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoA-CP' ] = 596.92255465
DATA['NUCLEAR REPULSION ENERGY']['JSCH-14-monoB-CP' ] = 440.54554467
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoA-CP' ] = 503.53728859
DATA['NUCLEAR REPULSION ENERGY']['JSCH-15-monoB-CP' ] = 425.75653587
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoA-CP' ] = 596.66795120
DATA['NUCLEAR REPULSION ENERGY']['JSCH-16-monoB-CP' ] = 413.04224329
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoA-CP' ] = 596.75974596
DATA['NUCLEAR REPULSION ENERGY']['JSCH-17-monoB-CP' ] = 412.84579804
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoA-CP' ] = 356.91023176
DATA['NUCLEAR REPULSION ENERGY']['JSCH-18-monoB-CP' ] = 503.30931271
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoA-CP' ] = 596.15051246
DATA['NUCLEAR REPULSION ENERGY']['JSCH-19-monoB-CP' ] = 596.63218121
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoA-CP' ] = 596.58545524
DATA['NUCLEAR REPULSION ENERGY']['JSCH-20-monoB-CP' ] = 670.44234386
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoA-CP' ] = 670.04325278
DATA['NUCLEAR REPULSION ENERGY']['JSCH-21-monoB-CP' ] = 596.84640861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoA-CP' ] = 596.48090327
DATA['NUCLEAR REPULSION ENERGY']['JSCH-22-monoB-CP' ] = 503.33140569
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoA-CP' ] = 503.54456755
DATA['NUCLEAR REPULSION ENERGY']['JSCH-23-monoB-CP' ] = 596.89708469
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoA-CP' ] = 593.67756289
DATA['NUCLEAR REPULSION ENERGY']['JSCH-24-monoB-CP' ] = 501.45867869
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoA-CP' ] = 595.94249141
DATA['NUCLEAR REPULSION ENERGY']['JSCH-25-monoB-CP' ] = 503.12213297
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoA-CP' ] = 596.44241276
DATA['NUCLEAR REPULSION ENERGY']['JSCH-26-monoB-CP' ] = 502.87235332
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoA-CP' ] = 595.72714752
DATA['NUCLEAR REPULSION ENERGY']['JSCH-27-monoB-CP' ] = 503.45401843
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoA-CP' ] = 503.40799836
DATA['NUCLEAR REPULSION ENERGY']['JSCH-28-monoB-CP' ] = 503.40916818
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoA-CP' ] = 503.49043267
DATA['NUCLEAR REPULSION ENERGY']['JSCH-29-monoB-CP' ] = 502.94567640
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoA-CP' ] = 503.11592074
DATA['NUCLEAR REPULSION ENERGY']['JSCH-30-monoB-CP' ] = 503.11223193
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoA-CP' ] = 596.75580700
DATA['NUCLEAR REPULSION ENERGY']['JSCH-31-monoB-CP' ] = 693.16448502
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoA-CP' ] = 413.69053788
DATA['NUCLEAR REPULSION ENERGY']['JSCH-32-monoB-CP' ] = 413.05557496
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoA-CP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-33-monoB-CP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoA-CP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-34-monoB-CP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoA-CP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-35-monoB-CP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoA-CP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-36-monoB-CP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-37-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoA-CP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-38-monoB-CP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoA-CP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-39-monoB-CP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-40-monoB-CP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoA-CP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-41-monoB-CP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoA-CP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-42-monoB-CP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoA-CP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-43-monoB-CP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoA-CP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-44-monoB-CP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoA-CP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-45-monoB-CP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-46-monoB-CP' ] = 359.95491240
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-47-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoA-CP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-48-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoA-CP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-49-monoB-CP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoA-CP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-50-monoB-CP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-51-monoB-CP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoA-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-52-monoB-CP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoA-CP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-53-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-54-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoA-CP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-55-monoB-CP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-56-monoB-CP' ] = 507.48987021
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoA-CP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-57-monoB-CP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoA-CP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-58-monoB-CP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-59-monoB-CP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoA-CP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-60-monoB-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoA-CP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-61-monoB-CP' ] = 596.81166025
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoA-CP' ] = 533.43445531
DATA['NUCLEAR REPULSION ENERGY']['JSCH-62-monoB-CP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoA-CP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-63-monoB-CP' ] = 697.73092506
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoA-CP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-64-monoB-CP' ] = 442.44825964
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoA-CP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-65-monoB-CP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoA-CP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-66-monoB-CP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-67-monoB-CP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoA-CP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-68-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoA-CP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-69-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-70-monoB-CP' ] = 357.96626427
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoA-CP' ] = 596.44964921
DATA['NUCLEAR REPULSION ENERGY']['JSCH-71-monoB-CP' ] = 357.10169648
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoA-CP' ] = 696.44803543
DATA['NUCLEAR REPULSION ENERGY']['JSCH-72-monoB-CP' ] = 443.64584898
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoA-CP' ] = 503.36564485
DATA['NUCLEAR REPULSION ENERGY']['JSCH-73-monoB-CP' ] = 440.14700689
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoA-CP' ] = 596.40342598
DATA['NUCLEAR REPULSION ENERGY']['JSCH-74-monoB-CP' ] = 532.86039581
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-75-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-76-monoB-CP' ] = 355.44451746
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-77-monoB-CP' ] = 355.44455365
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-78-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-79-monoB-CP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-80-monoB-CP' ] = 355.44458162
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-81-monoB-CP' ] = 355.44458170
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-82-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-83-monoB-CP' ] = 355.44457113
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-84-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-85-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-86-monoB-CP' ] = 355.44459806
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-87-monoB-CP' ] = 355.44457375
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoA-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-88-monoB-CP' ] = 355.44457724
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoA-CP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-89-monoB-CP' ] = 501.81461592
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoA-CP' ] = 593.90346744
DATA['NUCLEAR REPULSION ENERGY']['JSCH-90-monoB-CP' ] = 593.90347753
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoA-CP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-91-monoB-CP' ] = 355.44452826
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoA-CP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-92-monoB-CP' ] = 501.81458877
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoA-CP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-93-monoB-CP' ] = 355.44453848
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoA-CP' ] = 501.81461749
DATA['NUCLEAR REPULSION ENERGY']['JSCH-94-monoB-CP' ] = 355.38546038
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoA-CP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-95-monoB-CP' ] = 355.44418383
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoA-CP' ] = 355.44454853
DATA['NUCLEAR REPULSION ENERGY']['JSCH-96-monoB-CP' ] = 355.38590060
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoA-CP' ] = 355.38547127
DATA['NUCLEAR REPULSION ENERGY']['JSCH-97-monoB-CP' ] = 355.38549385
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoA-CP' ] = 593.90347360
DATA['NUCLEAR REPULSION ENERGY']['JSCH-98-monoB-CP' ] = 355.38464230
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoA-CP' ] = 601.53395829
DATA['NUCLEAR REPULSION ENERGY']['JSCH-99-monoB-CP' ] = 601.53394221
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-100-monoB-CP' ] = 359.95486861
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoA-CP' ] = 507.48990840
DATA['NUCLEAR REPULSION ENERGY']['JSCH-101-monoB-CP' ] = 507.48988528
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoA-CP' ] = 443.36745667
DATA['NUCLEAR REPULSION ENERGY']['JSCH-102-monoB-CP' ] = 443.36744333
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoA-CP' ] = 359.95499475
DATA['NUCLEAR REPULSION ENERGY']['JSCH-103-monoB-CP' ] = 601.53410726
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoA-CP' ] = 359.95489055
DATA['NUCLEAR REPULSION ENERGY']['JSCH-104-monoB-CP' ] = 601.53394398
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-105-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoA-CP' ] = 443.36742635
DATA['NUCLEAR REPULSION ENERGY']['JSCH-106-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoA-CP' ] = 507.48984219
DATA['NUCLEAR REPULSION ENERGY']['JSCH-107-monoB-CP' ] = 601.53400057
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoA-CP' ] = 443.36740384
DATA['NUCLEAR REPULSION ENERGY']['JSCH-108-monoB-CP' ] = 359.95489222
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoA-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-109-monoB-CP' ] = 601.53400527
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-110-monoB-CP' ] = 359.95488319
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoA-CP' ] = 443.36742741
DATA['NUCLEAR REPULSION ENERGY']['JSCH-111-monoB-CP' ] = 601.53395163
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-112-monoB-CP' ] = 359.95490302
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoA-CP' ] = 507.48987556
DATA['NUCLEAR REPULSION ENERGY']['JSCH-113-monoB-CP' ] = 443.36742934
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoA-CP' ] = 507.48984783
DATA['NUCLEAR REPULSION ENERGY']['JSCH-114-monoB-CP' ] = 443.36742068
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoA-CP' ] = 507.48989123
DATA['NUCLEAR REPULSION ENERGY']['JSCH-115-monoB-CP' ] = 507.48987743
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoA-CP' ] = 443.36742642
DATA['NUCLEAR REPULSION ENERGY']['JSCH-116-monoB-CP' ] = 443.36741425
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoA-CP' ] = 595.94046611
DATA['NUCLEAR REPULSION ENERGY']['JSCH-117-monoB-CP' ] = 534.51236588
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoA-CP' ] = 442.44825872
DATA['NUCLEAR REPULSION ENERGY']['JSCH-118-monoB-CP' ] = 697.73026630
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoA-CP' ] = 595.84177555
DATA['NUCLEAR REPULSION ENERGY']['JSCH-119-monoB-CP' ] = 443.14986171
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoA-CP' ] = 535.63812262
DATA['NUCLEAR REPULSION ENERGY']['JSCH-120-monoB-CP' ] = 697.73051558
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoA-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-121-monoB-CP' ] = 596.77416801
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoA-CP' ] = 503.84093948
DATA['NUCLEAR REPULSION ENERGY']['JSCH-122-monoB-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoA-CP' ] = 596.88729965
DATA['NUCLEAR REPULSION ENERGY']['JSCH-123-monoB-CP' ] = 358.21308540
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoA-CP' ] = 596.89846546
DATA['NUCLEAR REPULSION ENERGY']['JSCH-124-monoB-CP' ] = 357.96626427
|
CDSherrill/psi4
|
psi4/share/psi4/databases/JSCH.py
|
Python
|
lgpl-3.0
| 287,170
|
[
"Psi4"
] |
b5dae590126a0f7751dbe3986318edde0994adc98225757d3e838f1aa6545039
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 27 14:03:34 2011
@author: Shreejoy
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
from SPARQLWrapper import SPARQLWrapper, JSON, XML, N3, RDF
import re
from pubapp.models import Neuron
# get all subclasses of kir channels
sparql = SPARQLWrapper("http://rdf.neuinfo.org/sparql")
sparql.setQuery("""
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
prefix sao: <http://ontology.neuinfo.org/NIF/BiomaterialEntities/NIF-Cell.owl#>
SELECT distinct ?e, ?l
WHERE {
{{
select *
where
{
?e rdfs:subClassOf ?n
}
}
OPTION (TRANSITIVE, t_distinct, t_in(?n), t_out(?e) )
FILTER (?n = sao:sao1417703748) . ?e rdfs:label ?l
}
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
neuronList = []
neuronIdList = []
cnt = 0
for result in results["results"]["bindings"]:
neuronName = result["l"]["value"]
neuronID = re.search(r'\#[\w]+', result["e"]["value"]).group()
neuronID = neuronID[1:] # remove starting hashtag
n = Neuron.objects.get_or_create(nlexID = neuronID, name = neuronName)[0]
neuronList.append(result["l"]["value"])
neuronIdList.append(neuronID[1:])
n.save()
|
lessc0de/neuroelectro_org
|
neurolex_integration/get_neuron_types.py
|
Python
|
gpl-2.0
| 1,218
|
[
"NEURON"
] |
666a696d6956f73599bdf6ccecabbe1c1bc0b248c76ba44f7b032db42565ef64
|
from toee import *
from utilities import *
from combat_standard_routines import *
def san_dialog( attachee, triggerer ):
if (attachee.leader_get() != OBJ_HANDLE_NULL):
triggerer.begin_dialog( attachee, 330 ) ## burne in party
elif (game.global_vars[909] == 32 and attachee.map != 5016 and attachee.map != 5018):
triggerer.begin_dialog( attachee, 1060 ) ## have attacked 3 or more farm animals with burne in party and not in castle main hall or upper hall
elif (game.global_flags[839] == 1):
triggerer.begin_dialog(attachee,1160) ## have liberated lareth
elif (game.global_flags[835] == 1 and game.global_flags[37] == 0 and game.global_flags[842] == 1 and game.global_flags[839] == 0):
triggerer.begin_dialog(attachee,1000) ## handled tower fight diplomatically and lareth is alive and have heard about prisoner lareth and have not liberated lareth
elif (game.party[0].reputation_has( 28 ) == 1):
triggerer.begin_dialog( attachee, 590 ) ## have dominatrix reputation - burne will kiss your ass
elif (game.party[0].reputation_has( 27 ) == 1):
triggerer.begin_dialog( attachee, 11002 ) ## have rabble-rouser reputation - burne won't talk to you
else:
triggerer.begin_dialog( attachee, 1 ) ## none of the above
return SKIP_DEFAULT
def san_first_heartbeat( attachee, triggerer ):
if (attachee.leader_get() == OBJ_HANDLE_NULL):
if ((game.global_vars[501] >= 2 and game.quests[97].state != qs_completed and game.quests[96].state != qs_completed) or game.global_vars[510] == 2):
attachee.object_flag_set(OF_OFF)
else:
attachee.object_flag_unset(OF_OFF)
if (not game.combat_is_active()):
game.global_vars[730] = 0
return RUN_DEFAULT
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
attachee.float_line(12014,triggerer)
game.global_flags[336] = 1
game.global_flags[282] = 1
if (game.global_flags[231] == 0):
game.global_vars[23] = game.global_vars[23] + 1
if (game.global_vars[23] >= 2):
game.party[0].reputation_add( 92 )
else:
game.global_vars[29] = game.global_vars[29] + 1
return RUN_DEFAULT
def san_enter_combat( attachee, triggerer ):
ProtectTheInnocent(attachee, triggerer)
return RUN_DEFAULT
def san_resurrect( attachee, triggerer ):
game.global_flags[336] = 0
game.global_flags[282] = 0
return RUN_DEFAULT
def san_heartbeat( attachee, triggerer ):
if (not game.combat_is_active()):
if (game.global_vars[909] >= 3):
if (attachee != OBJ_HANDLE_NULL):
leader = attachee.leader_get()
if (leader != OBJ_HANDLE_NULL):
leader.follower_remove(attachee)
attachee.float_line(22000,triggerer)
if (game.global_vars[730] == 0 and attachee.leader_get() == OBJ_HANDLE_NULL):
attachee.cast_spell(spell_mage_armor, attachee)
attachee.spells_pending_to_memorized()
game.global_vars[730] = 1
return RUN_DEFAULT
def san_join( attachee, triggerer ):
game.global_flags[231] = 1
diamond = attachee.item_find( 12036 )
diamond.item_flag_set(OIF_NO_TRANSFER)
amber = attachee.item_find( 12040 )
amber.item_flag_set(OIF_NO_TRANSFER)
silver_medallion_necklace = attachee.item_find( 6197 )
silver_medallion_necklace.item_flag_set(OIF_NO_TRANSFER)
emerald = attachee.item_find( 12010 )
emerald.item_flag_set(OIF_NO_TRANSFER)
silver_necklace = attachee.item_find( 6194 )
silver_necklace.item_flag_set(OIF_NO_TRANSFER)
dagger = attachee.item_find( 4058 )
dagger.item_flag_set(OIF_NO_TRANSFER)
wand = attachee.item_find( 12007 )
wand.item_flag_set(OIF_NO_TRANSFER)
chime = attachee.item_find( 12008 )
chime.item_flag_set(OIF_NO_TRANSFER)
ring = attachee.item_find( 6083 )
ring.item_flag_set(OIF_NO_TRANSFER)
kit = attachee.item_find( 12848 )
kit.item_flag_set(OIF_NO_TRANSFER)
return RUN_DEFAULT
def san_disband( attachee, triggerer ):
game.global_flags[231] = 0
for pc in game.party:
attachee.ai_shitlist_remove( pc )
attachee.reaction_set( pc, 50 )
diamond = attachee.item_find( 12036 )
diamond.item_flag_unset(OIF_NO_TRANSFER)
amber = attachee.item_find( 12040 )
amber.item_flag_unset(OIF_NO_TRANSFER)
silver_medallion_necklace = attachee.item_find( 6197 )
silver_medallion_necklace.item_flag_unset(OIF_NO_TRANSFER)
emerald = attachee.item_find( 12010 )
emerald.item_flag_unset(OIF_NO_TRANSFER)
silver_necklace = attachee.item_find( 6194 )
silver_necklace.item_flag_unset(OIF_NO_TRANSFER)
dagger = attachee.item_find( 4058 )
dagger.item_flag_unset(OIF_NO_TRANSFER)
wand = attachee.item_find( 12007 )
wand.item_flag_unset(OIF_NO_TRANSFER)
chime = attachee.item_find( 12008 )
chime.item_flag_unset(OIF_NO_TRANSFER)
ring = attachee.item_find( 6083 )
ring.item_flag_unset(OIF_NO_TRANSFER)
kit = attachee.item_find( 12848 )
kit.item_flag_unset(OIF_NO_TRANSFER)
return RUN_DEFAULT
def san_new_map( attachee, triggerer ):
if ( game.global_flags[195] == 1 ):
game.leader.begin_dialog( attachee, 480 )
return SKIP_DEFAULT
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8fixes/scr/py00004burne.py
|
Python
|
mit
| 4,926
|
[
"Amber"
] |
6683608fd3929004d10bee0a25debb641a678039534d7e3a6f16d5e0b62b8c89
|
# From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press
# (c) 2017, Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida
#
# you are kindly asked to include the complete citation if you used this
# material in a publication
#
# Code 10.11 Beta model in Python using Stan, for accessing the
# relationship between the fraction of atomic gas and
# the galaxy stellar mass
#
# Statistical Model: Beta model in Python using Stan
#
# Astronomy case: Relation between atomic gas fraction
# and stellar mass
# taken from Bradford et al., 2015, ApJ 809, id. 146
#
# 1 response (Y - atomic gass fraction)
# 1 explanatory variable (x - log stellar mass)
#
# Original data from: http://www.astro.yale.edu/jdbradford/research.html
import numpy as np
import pandas as pd
import pystan
import statsmodels.api as sm
# Data
path_to_data = 'https://raw.githubusercontent.com/astrobayes/BMAD/master/data/Section_10p5/f_gas.csv'
# read data
data_frame = dict(pd.read_csv(path_to_data))
# built atomic gas fraction
y = np.array([data_frame['M_HI'][i]/
(data_frame['M_HI'][i] + data_frame['M_STAR'][i])
for i in range(data_frame['M_STAR'].shape[0])])
x = np.array([np.log(item) for item in data_frame['M_STAR']])
# prepare data for Stan
data = {}
data['Y'] = y
data['X'] = sm.add_constant((x.transpose()))
data['nobs'] = data['X'].shape[0]
data['K'] = data['X'].shape[1]
# Fit
stan_code="""
data{
int<lower=0> nobs; # number of data points
int<lower=0> K; # number of coefficients
matrix[nobs, K] X; # stellar mass
real<lower=0, upper=1> Y[nobs]; # atomic gas fraction
}
parameters{
vector[K] beta; # linear predictor coefficients
real<lower=0> theta;
}
model{
vector[nobs] pi;
real a[nobs];
real b[nobs];
for (i in 1:nobs){
pi[i] = inv_logit(X[i] * beta);
a[i] = theta * pi[i];
b[i] = theta * (1 - pi[i]);
}
# priors and likelihood
for (i in 1:K) beta[i] ~ normal(0, 100);
theta ~ gamma(0.01, 0.01);
Y ~ beta(a, b);
}
"""
# Run mcmc
fit = pystan.stan(model_code=stan_code, data=data, iter=7500, chains=3,
warmup=5000, thin=1, n_jobs=3)
# Output
print(fit)
|
astrobayes/BMAD
|
chapter_10/code_10.11.py
|
Python
|
gpl-3.0
| 2,333
|
[
"Galaxy"
] |
bff55fd58506f08544f0f90507a7f7c13349e640701c9f85bfcac28ad356929d
|
"""
Synthetic Minority Over-Sampling Technique for Regression with Gaussian Noise
https://github.com/nickkunz/smogn
"""
from smogn.box_plot_stats import box_plot_stats
from smogn.phi_ctrl_pts import phi_ctrl_pts
from smogn.phi import phi
from smogn.smoter import smoter
__all__ = [
"box_plot_stats",
"phi_ctrl_pts",
"phi",
"smoter"
]
|
nickkunz/smogn
|
smogn/__init__.py
|
Python
|
gpl-3.0
| 354
|
[
"Gaussian"
] |
2552e3a0448465f2abaa225e8637ca6e48e23763ce323b8ce66fbae158c78970
|
from parsimonious.grammar import Grammar
from parsimonious.nodes import *
from decimal import Decimal
# ===================================================================
class ReplacementVisitor(NodeVisitor):
def __init__(self, data={}):
self._data = data
# default node handler ------------------------------------------
def generic_visit(self, node, children):
if len(children) == 1:
return children[0]
else:
return node.text.strip()
# proper node handlers ------------------------------------------
def visit_replacement(self, node, children):
ws1, replacevalue, translist, ws2 = children
return [replacevalue, translist]
def visit_transformationlist(self, node, translist):
return translist
def visit_transformation(self, node, children):
ws1, comma, ws2, transname, transarglist = children
return [transname, transarglist]
def visit_transarglist(self, node, children):
return children
def visit_varname(self, node, children):
if node.text in self._data:
return (self._data[node.text])
raise Exception('variable name "' + node.text + '" was not found.')
def visit_expression(self, node, children):
left, ws1, operator, ws2, right = children
if not isinstance(left, Decimal):
left = Decimal(repr(left))
if not isinstance(right, Decimal):
right = Decimal(repr(right))
if operator == '+':
return (left + right)
if operator == '-':
return (left - right)
if operator == '*':
return (left * right)
if operator == '/':
return (left / right)
raise Exception('operator "' + operator + '" is not implemented')
def visit_numberliteral(self, node, children):
return Decimal(node.text.strip())
def visit_doublequotedstr(self, node, children):
ws, q1, arg, q2 = children
return arg
def visit_singlequotedstr(self, node, children):
ws, q1, arg, q2 = children
return arg
def visit_unquotedarg(self, node, children):
ws, arg = children
return arg
# ===================================================================
class ReplacementParser(object):
def __init__(self, replacement='', data={}):
self._replacement = replacement
self._data = data
## replacement --------------------------------------------------
@property
def replacement(self):
""" The 'replacement' property """
return self._replacement
@replacement.setter
def replacement(self, value):
self._replacement = value
return self._replacement
@replacement.deleter
def replacement(self):
del self._replacement
## data ---------------------------------------------------------
@property
def data(self):
""" The 'data' property """
return self._data
@data.setter
def data(self, value):
self._data = value
return self._data
@data.deleter
def data(self):
del self._data
## result -------------------------------------------------------
@property
def result(self):
""" The 'result' property """
g = Grammar("""
replacement = ws replacevalue transformationlist ws
replacevalue = expression / varname / literal
transformationlist = transformation*
transformation = ws comma ws transname transarglist
transarglist = transarg*
transarg = singlequotedstr / doublequotedstr / unquotedarg
expression = term rws operator rws term
term = numberliteral / varname
varname = ~"[a-z_][a-z0-9_]*"i
transname = ~"[a-z_][a-z0-9_]*"i
literal = numberliteral / stringliteral
numberliteral = ~"(\+|-)?\d+([.]\d+)?"
stringliteral = singlequotedstr / doublequotedstr
doublequotedstr = ws dblq notdblq dblq
singlequotedstr = ws sngq notsngq sngq
unquotedarg = ws notwsorcomma
operator = plus / minus / times / divide
plus = "+"
minus = "-"
times = "*"
divide = "/"
rws = ~"\s+"
ws = ~"\s*"
comma = ","
notwsorcomma = ~"[^\s,]+"
dblq = "\\""
notdblq = ~"[^\\"]*"
sngq = "'"
notsngq = ~"[^']*"
""")
tree = g.parse(self._replacement)
return ReplacementVisitor(self._data).visit(tree)
|
DanielBaird/prosemaker
|
egg/dwb.prosemaker/src/dwb/prosemaker/replacementparser.py
|
Python
|
apache-2.0
| 4,684
|
[
"VisIt"
] |
15d224cf83df4fbad5771dd239e6676d42c3c5359285263aa53ebb733199e31a
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/active_matter/active_matter.py",
gpu=True,
ED_N_SAMPLING_STEPS=100000,
RECT_N_SAMPLES=150,
HYDRO_N_STEPS=100
)
@skipIfMissingFeatures
class TestActMat(ut.TestCase):
system = tutorial.system
def test_enhanced_diffusion(self):
""" Check that the active particle diffuses faster than the passive one
"""
self.assertGreater(
tutorial.msd_result[-1, 0], tutorial.msd_result[-1, 1])
def test_rectification(self):
""" Check that the center of mass is in the right half of the box
"""
self.assertGreater(tutorial.com_deviations[-1], 0)
def test_hydrodynamics(self):
""" Check that the particle is moving up and the fluid down
"""
self.assertGreater(
tutorial.system.analysis.linear_momentum(
include_lbfluid=False)[2], 0)
self.assertLess(
tutorial.system.analysis.linear_momentum(
include_particles=False)[2], 0)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/tutorials/test_active_matter.py
|
Python
|
gpl-3.0
| 1,884
|
[
"ESPResSo"
] |
b18e579ec6ed30c6ed7817b421e4ca8f7f87ae21fd067587f06e454f488db441
|
"""
Acceptance tests for the certificate web view feature.
"""
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.certificates import CertificateConfigFixture
from common.test.acceptance.fixtures.course import CourseFixture, CourseUpdateDesc, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.certificate_page import CertificatePage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest, get_element_padding, load_data_str
@attr(shard=5)
class CertificateWebViewTest(EventsTestMixin, UniqueCourseTest):
"""
Tests for verifying certificate web view features
"""
def setUp(self):
super(CertificateWebViewTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "335535897951379478207964576572017930000"
test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True,
}
course_settings = {'certificates': test_certificate_config}
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
settings=course_settings
)
self.course_fixture.add_advanced_settings({
"cert_html_view_enabled": {"value": "true"},
"certificates_display_behavior": {"value": "early_with_info"},
})
self.course_fixture.install()
self.user_id = "99" # we have created a user with this id in fixture
self.cert_fixture = CertificateConfigFixture(self.course_id, test_certificate_config)
# Load certificate web view page for use by the tests
self.certificate_page = CertificatePage(self.browser, self.user_id, self.course_id)
def log_in_as_unique_user(self):
"""
Log in as a valid lms user.
"""
AutoAuthPage(
self.browser,
username="testcert",
email="cert@example.com",
password="testuser",
course_id=self.course_id
).visit()
def test_page_has_accomplishments_banner(self):
"""
Scenario: User accomplishment banner should be present if logged in user is the one who is awarded
the certificate
Given there is a course with certificate configuration
And I have passed the course and certificate is generated
When I view the certificate web view page
Then I should see the accomplishment banner. banner should have linked-in and facebook share buttons
And When I click on `Add to Profile` button `edx.certificate.shared` event should be emitted
"""
self.cert_fixture.install()
self.log_in_as_unique_user()
self.certificate_page.visit()
self.assertTrue(self.certificate_page.accomplishment_banner.visible)
self.assertTrue(self.certificate_page.add_to_linkedin_profile_button.visible)
self.assertTrue(self.certificate_page.add_to_facebook_profile_button.visible)
self.certificate_page.add_to_linkedin_profile_button.click()
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.certificate.shared'},
number_of_matches=1
)
expected_events = [
{
'event': {
'user_id': self.user_id,
'course_id': self.course_id
}
}
]
self.assert_events_match(expected_events, actual_events)
@attr(shard=5)
class CertificateProgressPageTest(UniqueCourseTest):
"""
Tests for verifying Certificate info on Progress tab of course page.
"""
def setUp(self):
super(CertificateProgressPageTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "3355358979513794782079645765720179311111"
test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
course_settings = {'certificates': test_certificate_config}
self.course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"],
settings=course_settings
)
self.course_fixture.add_advanced_settings({
"cert_html_view_enabled": {"value": "true"},
"certificates_show_before_end": {"value": "true"}
})
self.course_fixture.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
self.course_fixture.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection', grader_type='Final Exam').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2', grader_type='Midterm Exam').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
)
)
)
self.course_fixture.install()
self.user_id = "99" # we have created a user with this id in fixture
self.cert_fixture = CertificateConfigFixture(self.course_id, test_certificate_config)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
def log_in_as_unique_user(self):
"""
Log in as a valid lms user.
"""
AutoAuthPage(
self.browser,
username="testprogress",
email="progress@example.com",
password="testuser",
course_id=self.course_id
).visit()
def test_progress_page_has_view_certificate_button(self):
"""
Scenario: View Certificate option should be present on Course Progress menu if the user is
awarded a certificate.
And there should be no padding around the box containing certificate info. (See SOL-1196 for details on this)
As a Student
Given there is a course with certificate configuration
And I have passed the course and certificate is generated
When I go on the Progress tab for the course
Then I should see a 'View Certificate' button
And their should be no padding around Certificate info box.
"""
self.cert_fixture.install()
self.log_in_as_unique_user()
self.complete_course_problems()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
self.assertTrue(self.progress_page.q(css='.auto-cert-message').first.visible)
actual_padding = get_element_padding(self.progress_page, '.wrapper-msg.wrapper-auto-cert')
actual_padding = [int(padding) for padding in actual_padding.itervalues()]
expected_padding = [0, 0, 0, 0]
# Verify that their is no padding around the box containing certificate info.
self.assertEqual(actual_padding, expected_padding)
def complete_course_problems(self):
"""
Complete Course Problems.
Problems were added in the setUp
"""
# self.course_info_page.visit()
# self.tab_nav.go_to_tab('Course')
#
# # TODO: TNL-6546: Remove extra visit call.
self.course_home_page.visit()
# Navigate to Test Subsection in Test Section Section
self.course_home_page.outline.go_to_section('Test Section', 'Test Subsection')
# Navigate to Test Problem 1
self.courseware_page.nav.go_to_vertical('Test Problem 1')
# Select correct value for from select menu
self.courseware_page.q(css='select option[value="{}"]'.format('blue')).first.click()
# Select correct radio button for the answer
self.courseware_page.q(css='fieldset div.field:nth-child(4) input').nth(0).click()
# Select correct radio buttons for the answer
self.courseware_page.q(css='fieldset div.field:nth-child(2) input').nth(1).click()
self.courseware_page.q(css='fieldset div.field:nth-child(4) input').nth(1).click()
# Submit the answer
self.courseware_page.q(css='button.submit').click()
self.courseware_page.wait_for_ajax()
# Navigate to the 'Test Subsection 2' of 'Test Section 2'
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 2', 'Test Subsection 2')
# Navigate to Test Problem 2
self.courseware_page.nav.go_to_vertical('Test Problem 2')
# Fill in the answer of the problem
self.courseware_page.q(css='input[id^=input_][id$=_2_1]').fill('A*x^2 + sqrt(y)')
# Submit the answer
self.courseware_page.q(css='button.submit').click()
self.courseware_page.wait_for_ajax()
|
lduarte1991/edx-platform
|
common/test/acceptance/tests/lms/test_certificate_web_view.py
|
Python
|
agpl-3.0
| 10,347
|
[
"VisIt"
] |
157db1a83dd024e8778c7ad238e25490b648f631c1998e3608566e3dcf93de48
|
"""
PyQt5.6 python3
Convert html to pdf with qt print
"""
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtPrintSupport import QPrinter
html = '''
<?xml version='1.0' encoding='utf-8'?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta name="author" content="Brian W. Kernighan, Dennis M. Ritchie"/>
<meta name="description" content="An introduction to C."/>
<meta name="keywords" content="book,programming,c"/>
<title>The C Programming Language</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
<link href="page_styles.css" rel="stylesheet" type="text/css"/>
</head>
<body class="calibre">
<div id="book" class="calibre1">
<div id="menu" class="calibre2">
<div id="corner-menu-open" class="calibre3">
<div class="title">The C Programming Language</div>
<h2 class="calibre4">Table of Contents</h2>
<a href="The%20C%20Programming%20Language_split_009.html#preface" class="pcalibre calibre5">Preface</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_009.html#preface-1st" class="pcalibre calibre5">Preface to the first edition </a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_001.html#1" class="pcalibre calibre5">1 - A Tutorial Introduction</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_002.html#2" class="pcalibre calibre5">2 - Types, Operators and Expressions</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_003.html#3" class="pcalibre calibre5">3 - Control Flow</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_004.html#4" class="pcalibre calibre5">4 - Functions and Program Structure</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_005.html#5" class="pcalibre calibre5">5 - Pointers and Arrays</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_006.html#6" class="pcalibre calibre5">6 - Structures</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_007.html#7" class="pcalibre calibre5">7 - Input and Output</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_008.html#8" class="pcalibre calibre5">8 - The UNIX System Interface</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_008.html#A" class="pcalibre calibre5">A - Reference Manual</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_009.html#B" class="pcalibre calibre5">B - Standard Library</a><br class="calibre3"/>
<a href="The%20C%20Programming%20Language_split_009.html#C" class="pcalibre calibre5">C - Summary of Changes</a><br class="calibre3"/>
</div>
</div>
<div class="calibre6" id="calibre_pb_0"></div>
</div>
</body></html>
'''
class Widget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.resize(800, 400)
self.btn = QPushButton(self)
self.btn.setText('Create pdf')
self.btn.setGeometry(300, 100, 150, 30)
self.btn.clicked.connect(self.genPdf)
def genPdf(self):
# os.chdir('')
html_printer = QPrinter()
html_printer.setOutputFormat(QPrinter.PdfFormat)
html_printer.setOutputFileName('out.pdf')
doc = QTextDocument()
doc.setHtml(html)
doc.print(html_printer)
QMessageBox.about(self, 'Create pdf', 'Create pdf sucessfully')
if __name__ == '__main__':
app = QApplication([])
w = Widget()
w.show()
app.exec_()
|
tcp813/mouTools
|
qt/html2pdf.py
|
Python
|
mit
| 3,566
|
[
"Brian"
] |
7ac40d96d9675a795f83dbcaeaeb75cb46f283e5e08233ec4761fc68832a431a
|
# default python modules
import os
from datetime import datetime
# external packages
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from rdkit.ML.Descriptors.MoleculeDescriptors \
import MolecularDescriptorCalculator as Calculator
# internal modules
from methods import methods
def train_model(model, data_file, test_percent, save=True):
"""
Choose the regression model
Input
------
model: string, the model to use
data_file: dataframe, cleaned csv data
test_percent: float, the percentage of data held for testing
Returns
------
obj: objective, the regressor
X: dataframe, normlized input feature
y: targeted electrical conductivity
"""
df, y_error = read_data(data_file)
X, y = molecular_descriptors(df)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=(test_percent/100))
X_train, X_mean, X_std = normalization(X_train)
model = model.replace(' ', '_')
# print("training model is ",model)
if (model.lower() == 'lasso'):
obj = methods.do_lasso(X_train, y_train)
elif (model.lower() == 'mlp_regressor'):
obj = methods.do_MLP_regressor(X_train, y_train)
elif (model.lower() == 'svr'):
obj = methods.do_svr(X_train, y_train)
else:
raise ValueError('Invalid model type!')
return obj, X_train, y_train, X_mean, X_std
def normalization(data, means=None, stdevs=None):
"""
Normalizes the data using the means and standard
deviations given, calculating them otherwise.
Returns the means and standard deviations of columns.
Inputs
------
data : Pandas DataFrame
means : optional numpy argument of column means
stdevs : optional numpy argument of column st. devs
Returns
------
normed : the normalized DataFrame
means : the numpy row vector of column means
stdevs : the numpy row vector of column st. devs
"""
cols = data.columns
data = data.values
if (means is None) or (stdevs is None):
means = np.mean(data, axis=0)
stdevs = np.std(data, axis=0, ddof=1)
else:
means = np.array(means)
stdevs = np.array(stdevs)
# handle special case of one row
if (len(data.shape) == 1) or (data.shape[0] == 1):
for i in range(len(data)):
data[i] = (data[i] - means[i]) / stdevs[i]
else:
for i in range(data.shape[1]):
data[:,i] = (data[:,i] - means[i]*np.ones(data.shape[0])) / stdevs[i]
normed = pd.DataFrame(data, columns=cols)
return normed, means, stdevs
def predict_model(A_smile, B_smile, obj, t, p, m, X_mean, X_stdev, flag=None):
"""
Generates the predicted model data for a mixture
of compounds A and B at temperature t and pressure p.
Inputs
-----
A_smile : SMILES string for compound A
B_smile : SMILES string for compound B
obj : model object
t : float of temperature
p : float of pressure
m : float of mol_fraction
X_mean : means of columns for normalization
X_stdev : stdevs fo columns for normalization
flag : string to designate which variable is on x-axis
Returns
------
x_vals : x-values chosen by flag
y_pred : predicted conductivity (y_values)
"""
N = 100 # number of points
y_pred = np.empty(N+1)
if (flag == 'm'):
x_conc = np.linspace(0, 1, N+1)
elif (flag == 't'):
x_conc = np.linspace(100, 400, N+1)
elif (flag == 'p'):
x_conc = np.linspace(5, 400, N+1)
else:
raise ValueError("unexpected flag")
for i in range(len(x_conc)):
if (flag == 'm'):
my_df = pd.DataFrame({'A': A_smile, 'B': B_smile, 'MOLFRC_A': x_conc[i], \
'P': p, 'T': t, 'EC_value': 0}, index=[0])
elif (flag == 't'):
my_df = pd.DataFrame({'A': A_smile, 'B': B_smile, 'MOLFRC_A': m, \
'P': p, 'T': x_conc[i], 'EC_value': 0}, index=[0])
elif (flag == 'p'):
my_df = pd.DataFrame({'A': A_smile, 'B': B_smile, 'MOLFRC_A': m, \
'P': x_conc[i], 'T': t, 'EC_value': 0}, index=[0])
X, trash = molecular_descriptors(my_df)
X, trash, trash = normalization(X, X_mean, X_stdev)
y_pred[i] = obj.predict(X)
return x_conc, y_pred
def molecular_descriptors(data):
"""
Use RDKit to prepare the molecular descriptor
Inputs
------
data: dataframe, cleaned csv data
Returns
------
prenorm_X: normalized input features
Y: experimental electrical conductivity
"""
n = data.shape[0]
# Choose which molecular descriptor we want
list_of_descriptors = ['NumHeteroatoms', 'ExactMolWt',
'NOCount', 'NumHDonors',
'RingCount', 'NumAromaticRings',
'NumSaturatedRings', 'NumAliphaticRings']
# Get the molecular descriptors and their dimension
calc = Calculator(list_of_descriptors)
D = len(list_of_descriptors)
d = len(list_of_descriptors)*2 + 4
Y = data['EC_value']
X = np.zeros((n, d))
X[:, -3] = data['T']
X[:, -2] = data['P']
X[:, -1] = data['MOLFRC_A']
for i in range(n):
A = Chem.MolFromSmiles(data['A'][i])
B = Chem.MolFromSmiles(data['B'][i])
X[i][:D] = calc.CalcDescriptors(A)
X[i][D:2*D] = calc.CalcDescriptors(B)
prenorm_X = pd.DataFrame(X,columns=['NUM', 'NumHeteroatoms_A',
'MolWt_A', 'NOCount_A','NumHDonors_A',
'RingCount_A', 'NumAromaticRings_A',
'NumSaturatedRings_A',
'NumAliphaticRings_A',
'NumHeteroatoms_B', 'MolWt_B',
'NOCount_B', 'NumHDonors_B',
'RingCount_B', 'NumAromaticRings_B',
'NumSaturatedRings_B',
'NumAliphaticRings_B',
'T', 'P', 'MOLFRC_A'])
prenorm_X = prenorm_X.drop('NumAliphaticRings_A', 1)
prenorm_X = prenorm_X.drop('NumAliphaticRings_B', 1)
return prenorm_X, Y
def read_data(filename):
"""
Reads data in from given file to Pandas DataFrame
Inputs
-------
filename : string of path to file
Returns
------
df : Pandas DataFrame
y_error : vector containing experimental errors
"""
cols = filename.split('.')
name = cols[0]
filetype = cols[1]
if (filetype == 'csv'):
df = pd.read_csv(filename)
elif (filetype in ['xls', 'xlsx']):
df = pd.read_excel(filename)
else:
raise ValueError('Filetype not supported')
# clean the data if necessary
df['EC_value'], df['EC_error'] = zip(*df['ELE_COD'].map(lambda x: x.split('±')))
y_error = np.copy(df['EC_error'])
df = df.drop('EC_error', 1)
df = df.drop('ELE_COD', 1)
return df, y_error
def save_model(obj, X_mean, X_stdev, X=None, y=None, dirname='default'):
"""
Save the trained regressor model to the file
Input
------
obj: model object
X_mean : mean for each column of training X
X_stdev : stdev for each column of training X
X : Predictor matrix
y : Response vector
dirname : the directory to save contents
Returns
------
None
"""
if (dirname == 'default'):
timestamp = str(datetime.now())[:19]
dirname = 'model_'+timestamp.replace(' ', '_')
else:
pass
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = dirname + '/model.pkl'
joblib.dump(obj, filename)
joblib.dump(X_mean, dirname+'/X_mean.pkl')
joblib.dump(X_stdev, dirname+'/X_stdev.pkl')
if (X is not None):
filename = dirname + '/X_data.pkl'
joblib.dump(X, filename)
else:
pass
if (y is not None):
filename = dirname + '/y_data.pkl'
joblib.dump(y, filename)
else:
pass
return
def read_model(file_dir):
"""
Read the trained regressor to
avoid repeating training.
Input
------
file_dir : the directory containing all model info
Returns
------
obj: model object
X_mean : mean of columns in training X
X_stdev : stdev of columns in training X
X : predictor matrix (if it exists) otherwise None
y : response vector (if it exists) otherwise None
"""
filename = file_dir + '/model.pkl'
obj = joblib.load(filename)
X_mean = joblib.load(file_dir+'/X_mean.pkl')
X_stdev = joblib.load(file_dir+'/X_stdev.pkl')
try:
X = joblib.load(file_dir + '/X_data.pkl')
except:
X = None
try:
y = joblib.load(file_dir + '/y_data.pkl')
except:
y = None
return obj, X_mean, X_stdev, X, y
|
joekasp/ionic_liquids
|
ionic_liquids/utils.py
|
Python
|
mit
| 9,003
|
[
"RDKit"
] |
e6a95237aff690be9683701814b0e18ed7c0068483922fcb5e5330234e528e43
|
#!/usr/bin/env python
"""
This python script generates the py_class_impl! macro.
"""
from collections import namedtuple
import sys, os
PY2 = (os.getenv('PY') == '2')
header = '''
// Copyright (c) 2016 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
'''
macro_start = '''
#[macro_export]
#[doc(hidden)]
macro_rules! py_class_impl {
// TT muncher macro. Results are accumulated in $info $slots $impls and $members.
'''
base_case = '''
// Base case: we're done munching and can start producing code:
{ {}
$class:ident $py:ident
/* info: */ {
$base_type:ty,
$size:expr,
{ $( $class_visibility:tt )* },
$gc:tt,
/* data: */ [ $( { $data_offset:expr, $data_name:ident, $data_ty:ty } )* ]
}
$slots:tt { $( $imp:item )* } $members:tt
} => {
py_coerce_item! {
$($class_visibility)* struct $class { _unsafe_inner: $crate::PyObject }
}
py_impl_to_py_object_for_python_object!($class);
py_impl_from_py_object_for_python_object!($class);
impl $crate::PythonObject for $class {
#[inline]
fn as_object(&self) -> &$crate::PyObject {
&self._unsafe_inner
}
#[inline]
fn into_object(self) -> $crate::PyObject {
self._unsafe_inner
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_from(obj: $crate::PyObject) -> Self {
$class { _unsafe_inner: obj }
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_borrow_from<'a>(obj: &'a $crate::PyObject) -> &'a Self {
::std::mem::transmute(obj)
}
}
impl $crate::PythonObjectWithCheckedDowncast for $class {
#[inline]
fn downcast_from<'p>(py: $crate::Python<'p>, obj: $crate::PyObject) -> Result<$class, $crate::PythonObjectDowncastError<'p>> {
if py.get_type::<$class>().is_instance(py, &obj) {
Ok($class { _unsafe_inner: obj })
} else {
Err($crate::PythonObjectDowncastError(py))
}
}
#[inline]
fn downcast_borrow_from<'a, 'p>(py: $crate::Python<'p>, obj: &'a $crate::PyObject) -> Result<&'a $class, $crate::PythonObjectDowncastError<'p>> {
if py.get_type::<$class>().is_instance(py, obj) {
unsafe { Ok(::std::mem::transmute(obj)) }
} else {
Err($crate::PythonObjectDowncastError(py))
}
}
}
py_coerce_item! {
impl $crate::py_class::BaseObject for $class {
type InitType = ( $( $data_ty, )* );
#[inline]
fn size() -> usize {
$size
}
unsafe fn alloc(
py: $crate::Python,
ty: &$crate::PyType,
( $( $data_name, )* ): Self::InitType
) -> $crate::PyResult<$crate::PyObject>
{
let obj = try!(<$base_type as $crate::py_class::BaseObject>::alloc(py, ty, ()));
$( $crate::py_class::data_init::<$data_ty>(py, &obj, $data_offset, $data_name); )*
Ok(obj)
}
unsafe fn dealloc(py: $crate::Python, obj: *mut $crate::_detail::ffi::PyObject) {
$( $crate::py_class::data_drop::<$data_ty>(py, obj, $data_offset); )*
<$base_type as $crate::py_class::BaseObject>::dealloc(py, obj)
}
}
}
$($imp)*
py_coerce_item! {
impl $class {
fn create_instance(py: $crate::Python $( , $data_name : $data_ty )* ) -> $crate::PyResult<$class> {
let obj = try!(unsafe {
<$class as $crate::py_class::BaseObject>::alloc(
py, &py.get_type::<$class>(), ( $($data_name,)* )
)
});
return Ok($class { _unsafe_inner: obj });
// hide statics in create_instance to avoid name conflicts
static mut TYPE_OBJECT : $crate::_detail::ffi::PyTypeObject
= py_class_type_object_static_init!($class, $gc, $slots);
static mut INIT_ACTIVE: bool = false;
// trait implementations that need direct access to TYPE_OBJECT
impl $crate::PythonObjectWithTypeObject for $class {
fn type_object(py: $crate::Python) -> $crate::PyType {
unsafe {
if $crate::py_class::is_ready(py, &TYPE_OBJECT) {
$crate::PyType::from_type_ptr(py, &mut TYPE_OBJECT)
} else {
// automatically initialize the class on-demand
<$class as $crate::py_class::PythonObjectFromPyClassMacro>::initialize(py)
.expect(concat!("An error occurred while initializing class ", stringify!($class)))
}
}
}
}
impl $crate::py_class::PythonObjectFromPyClassMacro for $class {
fn initialize(py: $crate::Python) -> $crate::PyResult<$crate::PyType> {
unsafe {
if $crate::py_class::is_ready(py, &TYPE_OBJECT) {
return Ok($crate::PyType::from_type_ptr(py, &mut TYPE_OBJECT));
}
assert!(!INIT_ACTIVE,
concat!("Reentrancy detected: already initializing class ",
stringify!($class)));
INIT_ACTIVE = true;
let res = init(py);
INIT_ACTIVE = false;
res
}
}
}
fn init($py: $crate::Python) -> $crate::PyResult<$crate::PyType> {
py_class_type_object_dynamic_init!($class, $py, TYPE_OBJECT, $slots);
py_class_init_members!($class, $py, TYPE_OBJECT, $members);
unsafe {
if $crate::_detail::ffi::PyType_Ready(&mut TYPE_OBJECT) == 0 {
Ok($crate::PyType::from_type_ptr($py, &mut TYPE_OBJECT))
} else {
Err($crate::PyErr::fetch($py))
}
}
}
}
}
}
};
'''
indentation = [' ']
last_char = '\n'
def write(text):
global last_char
for line in text.splitlines(True):
line = line.lstrip(' ')
if len(line.strip()) == 0 and last_char == '\n':
continue
if last_char == '\n':
initial_closing = 0
for c in line:
if c in ']}':
initial_closing += 1
else:
break
if initial_closing:
sys.stdout.write(''.join(indentation[:-initial_closing]))
else:
sys.stdout.write(''.join(indentation))
elif last_char not in ' \n' and len(line) > 0 and line[0] not in ' \n;':
sys.stdout.write(' ')
sys.stdout.write(line)
min_indent_level = len(indentation)
for c in line:
if c in '[{':
if len(indentation) > min_indent_level:
indentation.append('')
else:
indentation.append(' ')
elif c in ']}':
indentation.pop()
if len(indentation) < min_indent_level:
min_indent_level = len(indentation)
last_char = line[-1]
slot_groups = (
('tp', 'type_slots', None),
('nb', 'as_number', None),
('sq', 'as_sequence', None),
('mp', 'as_mapping', None),
('sdi', 'setdelitem', ['sdi_setitem', 'sdi_delitem'])
)
def generate_case(pattern, old_info=None, new_info=None, new_impl=None, new_slots=None, new_members=None):
write('{ { %s $($tail:tt)* }\n' % pattern)
write('$class:ident $py:ident')
if old_info is not None:
write(old_info)
elif new_info is not None:
write('\n/* info: */ {\n')
write('$base_type: ty,\n')
write('$size: expr,\n')
write('$class_visibility: tt,\n')
write('$gc: tt,\n')
write('[ $( $data:tt )* ]\n')
write('}\n')
else:
write('$info:tt')
if new_slots:
write('\n/* slots: */ {\n')
for prefix, group_name, explicit_slots in slot_groups:
if any(s.startswith(prefix) for s, v in new_slots):
if explicit_slots is None:
write('\n/* %s */ [ $( $%s_slot_name:ident : $%s_slot_value:expr, )* ]\n'
% (group_name, prefix, prefix))
else:
write('\n/* %s */ [\n' % group_name)
for slot in explicit_slots:
if any(s == slot for s, v in new_slots):
write('%s: {},\n' % slot)
else:
write('%s: $%s_slot_value:tt,\n' % (slot, slot))
write(']\n')
else:
write('$%s:tt' % group_name)
write('\n}\n')
else:
write('$slots:tt')
if new_impl is not None:
write('\n{ $( $imp:item )* }\n')
else:
write('$impls:tt')
if new_members:
write('\n{ $( $member_name:ident = $member_expr:expr; )* }')
else:
write('$members:tt')
write('\n} => { py_class_impl! {\n')
write('{ $($tail)* }\n')
write('$class $py')
write(new_info or '$info')
if new_slots:
write('\n/* slots: */ {\n')
for prefix, group_name, explicit_slots in slot_groups:
if any(s.startswith(prefix) for s, v in new_slots):
write('\n/* %s */ [\n' % group_name)
if explicit_slots is None:
write('$( $%s_slot_name : $%s_slot_value, )*\n' % (prefix, prefix))
for s, v in new_slots:
if s.startswith(prefix):
write('%s: %s,\n' % (s, v))
else:
for slot in explicit_slots:
slot_value = next((v for s, v in new_slots if s == slot), None)
if slot_value is None:
write('%s: $%s_slot_value,\n' % (slot, slot))
else:
write('%s: { %s },\n' % (slot, slot_value))
write(']\n')
else:
write('$%s' % group_name)
write('\n}\n')
else:
write('$slots')
if new_impl is not None:
write('\n/* impl: */ {\n')
write('$($imp)*\n')
write(new_impl)
write('\n}\n')
else:
write('$impls')
if new_members:
write('\n/* members: */ {\n')
write('$( $member_name = $member_expr; )*\n')
for name, val in new_members:
write('%s = %s;\n' % (name, val))
write('}')
else:
write('$members')
write('\n}};\n')
def data_decl():
generate_case('data $data_name:ident : $data_type:ty;',
new_info = '''
/* info: */ {
$base_type,
/* size: */ $crate::py_class::data_new_size::<$data_type>($size),
$class_visibility,
$gc,
/* data: */ [
$($data)*
{
$crate::py_class::data_offset::<$data_type>($size),
$data_name,
$data_type
}
]
}
''',
new_impl='''
impl $class {
fn $data_name<'a>(&'a self, py: $crate::Python<'a>) -> &'a $data_type {
unsafe {
$crate::py_class::data_get::<$data_type>(
py,
&self._unsafe_inner,
$crate::py_class::data_offset::<$data_type>($size)
)
}
}
}
''')
def generate_class_method(special_name=None, decoration='',
slot=None, add_member=False, value_macro=None, value_args=None):
name_pattern = special_name or '$name:ident'
name_use = special_name or '$name'
def impl(with_params):
if with_params:
param_pattern = ', $($p:tt)+'
impl = '''py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, %s($cls: &$crate::PyType,) $res_type; { $($body)* } }
[] ($($p)+,)
}''' % name_use
value = 'py_argparse_parse_plist_impl!{%s {%s} [] ($($p)+,)}' \
% (value_macro, value_args)
else:
param_pattern = ''
impl = 'py_class_impl_item! { $class, $py,%s($cls: &$crate::PyType,) $res_type; { $($body)* } [] }' \
% name_use
value = '%s!{%s []}' % (value_macro, value_args)
pattern = '%s def %s ($cls:ident%s) -> $res_type:ty { $( $body:tt )* }' \
% (decoration, name_pattern, param_pattern)
slots = []
if slot is not None:
slots.append((slot, value))
members = []
if add_member:
members.append((name_use, value))
generate_case(pattern, new_impl=impl, new_slots=slots, new_members=members)
impl(False) # without parameters
impl(True) # with parameters
def traverse_and_clear():
generate_case('def __traverse__(&$slf:tt, $visit:ident) $body:block',
old_info = '''
/* info: */ {
$base_type: ty,
$size: expr,
$class_visibility: tt,
/* gc: */ {
/* traverse_proc: */ None,
$traverse_data: tt
},
$datas: tt
}
''',
new_info='''
/* info: */ {
$base_type,
$size,
$class_visibility,
/* gc: */ {
/* traverse_proc: */ $class::__traverse__,
$traverse_data
},
$datas
}
''',
new_impl='''
py_coerce_item!{
impl $class {
fn __traverse__(&$slf,
$py: $crate::Python,
$visit: $crate::py_class::gc::VisitProc)
-> Result<(), $crate::py_class::gc::TraverseError>
$body
}
}
''')
generate_case('def __clear__ (&$slf:ident) $body:block',
new_slots=[('tp_clear', 'py_class_tp_clear!($class)')],
new_impl='''
py_coerce_item!{
impl $class {
fn __clear__(&$slf, $py: $crate::Python) $body
}
}
''')
def generate_instance_method(special_name=None, decoration='',
slot=None, add_member=False, value_macro=None, value_args=None):
name_pattern = special_name or '$name:ident'
name_use = special_name or '$name'
def impl(with_params):
if with_params:
param_pattern = ', $($p:tt)+'
impl = '''py_argparse_parse_plist_impl!{
py_class_impl_item { $class, $py, %s(&$slf,) $res_type; { $($body)* } }
[] ($($p)+,)
}''' % name_use
value = 'py_argparse_parse_plist_impl!{%s {%s} [] ($($p)+,)}' \
% (value_macro, value_args)
else:
param_pattern = ''
impl = 'py_class_impl_item! { $class, $py, %s(&$slf,) $res_type; { $($body)* } [] }' \
% name_use
value = '%s!{%s []}' % (value_macro, value_args)
pattern = '%s def %s (&$slf:ident%s) -> $res_type:ty { $( $body:tt )* }' \
% (decoration, name_pattern, param_pattern)
slots = []
if slot is not None:
slots.append((slot, value))
members = []
if add_member:
members.append((name_use, value))
generate_case(pattern, new_impl=impl, new_slots=slots, new_members=members)
impl(False) # without parameters
impl(True) # with parameters
def static_method():
generate_case(
'@staticmethod def $name:ident ($($p:tt)*) -> $res_type:ty { $( $body:tt )* }',
new_impl='''
py_argparse_parse_plist!{
py_class_impl_item { $class, $py, $name() $res_type; { $($body)* } }
($($p)*)
}
''',
new_members=[('$name', '''
py_argparse_parse_plist!{
py_class_static_method {$py, $class::$name}
($($p)*)
}
''')])
def static_data():
generate_case('static $name:ident = $init:expr;',
new_members=[('$name', '$init')])
macro_end = '''
}
'''
def special_method(decorated_function):
def wrap1(*args, **kwargs):
def wrap2(special_name):
return decorated_function(special_name, *args, **kwargs)
return wrap2
return wrap1
@special_method
def error(special_name, msg):
print('''
{ { def %s $($tail:tt)* } $( $stuff:tt )* } => {
py_error! { "%s" }
};''' % (special_name, msg))
@special_method
def unimplemented(special_name):
return error('%s is not supported by py_class! yet.' % special_name)(special_name)
@special_method
def normal_method(special_name):
pass
@special_method
def special_class_method(special_name, *args, **kwargs):
generate_class_method(special_name=special_name, *args, **kwargs)
class Argument(object):
def __init__(self, name):
self.name = name
@special_method
def operator(special_name, slot,
args=(),
res_type='PyObject',
res_conv=None,
res_ffi_type='*mut $crate::_detail::ffi::PyObject',
additional_slots=()
):
if res_conv is None:
if res_type == '()':
res_conv = '$crate::py_class::slots::UnitCallbackConverter'
res_ffi_type = '$crate::_detail::libc::c_int'
elif res_type == 'bool':
res_conv = '$crate::py_class::slots::BoolConverter'
res_ffi_type = '$crate::_detail::libc::c_int'
elif res_type == 'PyObject':
res_conv = '$crate::_detail::PyObjectCallbackConverter'
else:
res_conv = '$crate::_detail::PythonObjectCallbackConverter::<$crate::%s>(::std::marker::PhantomData)' % res_type
arg_pattern = ''
param_list = []
for arg in args:
arg_pattern += ', ${0}:ident : ${0}_type:ty'.format(arg.name)
param_list.append('{{ ${0} : ${0}_type = {{}} }}'.format(arg.name))
if slot == 'sq_contains':
new_slots = [(slot, 'py_class_contains_slot!($class::%s, $%s_type)' % (special_name, args[0].name))]
elif slot == 'tp_richcompare':
new_slots = [(slot, 'py_class_richcompare_slot!($class::%s, $%s_type, %s, %s)'
% (special_name, args[0].name, res_ffi_type, res_conv))]
elif len(args) == 0:
new_slots = [(slot, 'py_class_unary_slot!($class::%s, %s, %s)'
% (special_name, res_ffi_type, res_conv))]
elif len(args) == 1:
new_slots = [(slot, 'py_class_binary_slot!($class::%s, $%s_type, %s, %s)'
% (special_name, args[0].name, res_ffi_type, res_conv))]
elif len(args) == 2:
new_slots = [(slot, 'py_class_ternary_slot!($class::%s, $%s_type, $%s_type, %s, %s)'
% (special_name, args[0].name, args[1].name, res_ffi_type, res_conv))]
else:
raise ValueError('Unsupported argument count')
generate_case(
pattern='def %s(&$slf:ident%s) -> $res_type:ty { $($body:tt)* }' % (special_name, arg_pattern),
new_impl='py_class_impl_item! { $class, $py, %s(&$slf,) $res_type; { $($body)* } [%s] }'
% (special_name, ' '.join(param_list)),
new_slots=new_slots + list(additional_slots)
)
# Generate fall-back matcher that produces an error
# when using the wrong method signature
error('Invalid signature for operator %s' % special_name)(special_name)
@special_method
def call_operator(special_name, slot):
generate_instance_method(
special_name=special_name,
slot=slot,
value_macro='py_class_call_slot',
value_args='$class::%s' % special_name)
@special_method
def binary_numeric_operator(special_name, slot):
generate_case(
pattern='def %s($left:ident, $right:ident) -> $res_type:ty { $($body:tt)* }'
% special_name,
new_impl='py_class_impl_item! { $class, $py, %s() $res_type; { $($body)* } ' % special_name
+'[ { $left : &$crate::PyObject = {} } { $right : &$crate::PyObject = {} } ] }',
new_slots=[(slot, 'py_class_binary_numeric_slot!($class::%s)' % special_name)]
)
error('Invalid signature for binary numeric operator %s' % special_name)(special_name)
@special_method
def reflected_numeric_operator(special_name):
error('Reflected numeric operator %s is not supported by py_class! Use __%s__ instead!'
% (special_name, special_name[3:-2]))(special_name)
@special_method
def inplace_numeric_operator(special_name, slot):
operator(slot=slot,
args=[Argument('other')])(special_name)
special_names = {
'__init__': error('__init__ is not supported by py_class!; use __new__ instead.'),
'__new__': special_class_method(
slot='tp_new',
value_macro='py_class_wrap_newfunc',
value_args='$class::__new__'),
'__del__': error('__del__ is not supported by py_class!; Use a data member with a Drop impl instead.'),
'__repr__': operator('tp_repr', res_type="PyString"),
'__str__': operator('tp_str', res_type="PyString"),
'__unicode__': normal_method(),
'__bytes__': normal_method(),
'__format__': normal_method(),
# Comparison Operators
'__lt__': error('__lt__ is not supported by py_class! use __richcmp__ instead.'),
'__le__': error('__le__ is not supported by py_class! use __richcmp__ instead.'),
'__gt__': error('__gt__ is not supported by py_class! use __richcmp__ instead.'),
'__ge__': error('__ge__ is not supported by py_class! use __richcmp__ instead.'),
'__eq__': error('__eq__ is not supported by py_class! use __richcmp__ instead.'),
'__ne__': error('__ne__ is not supported by py_class! use __richcmp__ instead.'),
'__cmp__': error('__cmp__ is not supported by py_class! use __richcmp__ instead.'),
'__richcmp__': operator('tp_richcompare',
res_type='PyObject',
args=[Argument('other'), Argument('op')]),
'__hash__': operator('tp_hash',
res_conv='$crate::py_class::slots::HashConverter',
res_ffi_type='$crate::Py_hash_t'),
'__nonzero__': error('__nonzero__ is not supported by py_class!; use the Python 3 spelling __bool__ instead.'),
'__bool__': operator('nb_nonzero' if PY2 else 'nb_bool',
res_type='bool'),
# Customizing attribute access
'__getattr__': unimplemented(),
'__getattribute__': unimplemented(),
'__setattr__': unimplemented(),
'__delattr__': unimplemented(),
'__dir__': unimplemented(),
# Implementing Descriptors
'__get__': unimplemented(),
'__set__': unimplemented(),
'__delete__': unimplemented(),
# Customizing instance and subclass checks
'__instancecheck__': unimplemented(),
'__subclasscheck__': unimplemented(),
# Emulating callable objects
'__call__': call_operator('tp_call'),
# Emulating container types
'__len__': operator('sq_length',
res_ffi_type='$crate::_detail::ffi::Py_ssize_t',
res_conv='$crate::py_class::slots::LenResultConverter',
additional_slots=[
# Use PySequence_Size to forward mp_length calls to sq_length.
('mp_length', 'Some($crate::_detail::ffi::PySequence_Size)')
]),
'__length_hint__': normal_method(),
'__getitem__': operator('mp_subscript',
args=[Argument('key')],
additional_slots=[
('sq_item', 'Some($crate::py_class::slots::sq_item)')
]),
'__missing__': normal_method(),
'__setitem__': operator('sdi_setitem',
args=[Argument('key'), Argument('value')],
res_type='()'),
'__delitem__': operator('sdi_delitem',
args=[Argument('key')],
res_type='()'),
'__iter__': operator('tp_iter'),
'__next__': operator('tp_iternext',
res_conv='$crate::py_class::slots::IterNextResultConverter'),
'__reversed__': normal_method(),
'__contains__': operator('sq_contains', args=[Argument('item')]),
# Emulating numeric types
'__add__': binary_numeric_operator('nb_add'),
'__sub__': binary_numeric_operator('nb_subtract'),
'__mul__': binary_numeric_operator('nb_multiply'),
'__matmul__': unimplemented(),
'__div__': unimplemented(),
'__truediv__': unimplemented(),
'__floordiv__': unimplemented(),
'__mod__': unimplemented(),
'__divmod__': unimplemented(),
'__pow__': unimplemented(),
'__lshift__': binary_numeric_operator('nb_lshift'),
'__rshift__': binary_numeric_operator('nb_rshift'),
'__and__': binary_numeric_operator('nb_and'),
'__xor__': binary_numeric_operator('nb_xor'),
'__or__': binary_numeric_operator('nb_or'),
# Emulating numeric types - reflected
'__radd__': reflected_numeric_operator(),
'__rsub__': reflected_numeric_operator(),
'__rmul__': reflected_numeric_operator(),
'__rmatmul__': reflected_numeric_operator(),
'__rdiv__': reflected_numeric_operator(),
'__rtruediv__': reflected_numeric_operator(),
'__rfloordiv__': reflected_numeric_operator(),
'__rmod__': reflected_numeric_operator(),
'__rdivmod__': reflected_numeric_operator(),
'__rpow__': reflected_numeric_operator(),
'__rlshift__': reflected_numeric_operator(),
'__rrshift__': reflected_numeric_operator(),
'__rand__': reflected_numeric_operator(),
'__rxor__': reflected_numeric_operator(),
'__ror__': reflected_numeric_operator(),
# Emulating numeric types - in-place
'__iadd__': inplace_numeric_operator('nb_inplace_add'),
'__isub__': inplace_numeric_operator('nb_inplace_subtract'),
'__imul__': inplace_numeric_operator('nb_inplace_multiply'),
'__imatmul__': inplace_numeric_operator('nb_inplace_matrix_multiply'),
'__idiv__': unimplemented(),
'__itruediv__': inplace_numeric_operator('nb_inplace_true_divide'),
'__ifloordiv__': inplace_numeric_operator('nb_inplace_floor_divide'),
'__imod__': inplace_numeric_operator('nb_inplace_remainder'),
'__ipow__': unimplemented(),
'__ilshift__': inplace_numeric_operator('nb_inplace_lshift'),
'__irshift__': inplace_numeric_operator('nb_inplace_rshift'),
'__iand__': inplace_numeric_operator('nb_inplace_and'),
'__ixor__': inplace_numeric_operator('nb_inplace_xor'),
'__ior__': inplace_numeric_operator('nb_inplace_or'),
# Unary arithmetic
'__neg__': operator('nb_negative'),
'__pos__': operator('nb_positive'),
'__abs__': operator('nb_absolute'),
'__invert__': operator('nb_invert'),
'__complex__': unimplemented(),
'__int__': unimplemented(),
'__long__': unimplemented(),
'__float__': unimplemented(),
'__round__': unimplemented(),
'__index__': unimplemented(),
'__coerce__': unimplemented(),
# With statement context managers
'__enter__': normal_method(),
'__exit__': normal_method(),
# Coroutines
'__await__': unimplemented(),
'__aiter__': unimplemented(),
'__aenter__': unimplemented(),
'__aexit__': unimplemented(),
}
def main():
if sys.argv[1:] == ['--format']:
while True:
line = sys.stdin.readline()
if len(line) == 0:
return
write(line)
print(header)
print('')
print('// !!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('// THIS IS A GENERATED FILE !!')
print('// DO NOT MODIFY !!')
print('// !!!!!!!!!!!!!!!!!!!!!!!!!!!')
print(macro_start)
print(base_case)
data_decl()
traverse_and_clear()
for name, f in sorted(special_names.items()):
f(name)
generate_instance_method(
add_member=True,
value_macro='py_class_instance_method',
value_args='$py, $class::$name')
generate_class_method(decoration='@classmethod',
add_member=True,
value_macro='py_class_class_method',
value_args='$py, $class::$name')
static_method()
static_data()
print(macro_end)
if __name__ == '__main__':
main()
|
naufraghi/rust-cpython
|
src/py_class/py_class_impl.py
|
Python
|
mit
| 30,720
|
[
"VisIt"
] |
bc66151026a2837507c1ae6f3ea76aad3b29fddc91a8eb524f3d6997a4c631ed
|
""" DowntimeCommand module will look into GOC DB to find announced downtimes for RSS-managed sites and resources.
If found, downtimes are added to the internal RSS cache using ResourceManagementClient.
GOCDB downtimes that are modified or deleted are also synced.
"""
__RCSID__ = '$Id$'
import urllib2
import re
from datetime import datetime, timedelta
from operator import itemgetter
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getGOCSiteName, getGOCSites, getGOCFTSName
from DIRAC.Core.Utilities.SiteSEMapping import getSEHosts, getStorageElementsHosts
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities.CSHelpers import getComputingElements
class DowntimeCommand(Command):
"""
Downtime "master" Command or removed DTs.
"""
def __init__(self, args=None, clients=None):
super(DowntimeCommand, self).__init__(args, clients)
if 'GOCDBClient' in self.apis:
self.gClient = self.apis['GOCDBClient']
else:
self.gClient = GOCDBClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis['ResourceManagementClient']
else:
self.rmClient = ResourceManagementClient()
def _storeCommand(self, result):
"""
Stores the results of doNew method on the database.
"""
for dt in result:
resQuery = self.rmClient.addOrModifyDowntimeCache(downtimeID=dt['DowntimeID'],
element=dt['Element'],
name=dt['Name'],
startDate=dt['StartDate'],
endDate=dt['EndDate'],
severity=dt['Severity'],
description=dt['Description'],
link=dt['Link'],
gOCDBServiceType=dt['gOCDBServiceType'])
return resQuery
def _cleanCommand(self, element, elementNames):
"""
Clear Cache from expired DT.
"""
resQuery = []
for elementName in elementNames:
# get the list of all DTs stored in the cache
result = self.rmClient.selectDowntimeCache(element=element,
name=elementName)
if not result['OK']:
return result
uniformResult = [dict(zip(result['Columns'], res)) for res in result['Value']]
currentDate = datetime.utcnow()
if not uniformResult:
continue
# get the list of all ongoing DTs from GocDB
gDTLinkList = self.gClient.getCurrentDTLinkList()
if not gDTLinkList['OK']:
return gDTLinkList
for dt in uniformResult:
# if DT expired or DT not in the list of current DTs, then we remove it from the cache
if dt['EndDate'] < currentDate or dt['Link'] not in gDTLinkList['Value']:
result = self.rmClient.deleteDowntimeCache(downtimeID=dt['DowntimeID'])
resQuery.append(result)
return S_OK(resQuery)
def _prepareCommand(self):
"""
DowntimeCommand requires four arguments:
- name : <str>
- element : Site / Resource
- elementType: <str>
If the elements are Site(s), we need to get their GOCDB names. They may
not have, so we ignore them if they do not have.
"""
if 'name' not in self.args:
return S_ERROR('"name" not found in self.args')
elementName = self.args['name']
if 'element' not in self.args:
return S_ERROR('"element" not found in self.args')
element = self.args['element']
if 'elementType' not in self.args:
return S_ERROR('"elementType" not found in self.args')
elementType = self.args['elementType']
if element not in ['Site', 'Resource']:
return S_ERROR('element is neither Site nor Resource')
hours = None
if 'hours' in self.args:
hours = self.args['hours']
gOCDBServiceType = None
# Transform DIRAC site names into GOCDB topics
if element == 'Site':
gocSite = getGOCSiteName(elementName)
if not gocSite['OK']: # The site is most probably is not a grid site - not an issue, of course
pass # so, elementName remains unchanged
else:
elementName = gocSite['Value']
# The DIRAC se names mean nothing on the grid, but their hosts do mean.
elif elementType == 'StorageElement':
# for SRM and SRM only, we need to distinguish if it's tape or disk
# if it's not SRM, then gOCDBServiceType will be None (and we'll use them all)
try:
se = StorageElement(elementName)
seOptions = se.options
seProtocols = set(se.localAccessProtocolList) | set(se.localWriteProtocolList)
except AttributeError: # Sometimes the SE can't be instantiated properly
self.log.error("Failure instantiating StorageElement object", elementName)
return S_ERROR("Failure instantiating StorageElement")
if 'SEType' in seOptions and 'srm' in seProtocols:
# Type should follow the convention TXDY
seType = seOptions['SEType']
diskSE = re.search('D[1-9]', seType) is not None
tapeSE = re.search('T[1-9]', seType) is not None
if tapeSE:
gOCDBServiceType = "srm.nearline"
elif diskSE:
gOCDBServiceType = "srm"
res = getSEHosts(elementName)
if not res['OK']:
return res
seHosts = res['Value']
if not seHosts:
return S_ERROR('No seHost(s) for %s' % elementName)
elementName = seHosts # in this case it will return a list, because there might be more than one host only
elif elementType in ['FTS', 'FTS3']:
gOCDBServiceType = 'FTS'
# WARNING: this method presupposes that the server is an FTS3 type
gocSite = getGOCFTSName(elementName)
if not gocSite['OK']:
self.log.warn("FTS not in Resources/FTSEndpoints/FTS3 ?", elementName)
else:
elementName = gocSite['Value']
return S_OK((element, elementName, hours, gOCDBServiceType))
def doNew(self, masterParams=None):
"""
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName, unless it is given a list, in which case it contacts
the gocdb client. The server is not very stable, so in case of failure tries
a second time.
If there are downtimes, are recorded and then returned.
"""
if masterParams is not None:
element, elementNames = masterParams
hours = 120
elementName = None
gOCDBServiceType = None
else:
params = self._prepareCommand()
if not params['OK']:
return params
element, elementName, hours, gOCDBServiceType = params['Value']
if not isinstance(elementName, list):
elementNames = [elementName]
else:
elementNames = elementName
# WARNING: checking all the DT that are ongoing or starting in given <hours> from now
try:
results = self.gClient.getStatus(element, name=elementNames, startingInHours=hours)
except urllib2.URLError:
try:
# Let's give it a second chance..
results = self.gClient.getStatus(element, name=elementNames, startingInHours=hours)
except urllib2.URLError as e:
return S_ERROR(e)
if not results['OK']:
return results
results = results['Value']
if results is None: # no downtimes found
return S_OK(None)
# cleaning the Cache
if elementName:
cleanRes = self._cleanCommand(element, elementNames)
if not cleanRes['OK']:
return cleanRes
uniformResult = []
# Humanize the results into a dictionary, not the most optimal, but readable
for downtime, downDic in results.iteritems():
dt = {}
dt['Name'] = downDic.get('HOSTNAME', downDic.get('SITENAME'))
if not dt['Name']:
return S_ERROR("SITENAME and HOSTNAME are missing from downtime dictionary")
dt['gOCDBServiceType'] = downDic.get('SERVICE_TYPE')
if dt['gOCDBServiceType'] and gOCDBServiceType:
if gOCDBServiceType.lower() != downDic['SERVICE_TYPE'].lower():
self.log.warn("SERVICE_TYPE mismatch",
"between GOCDB (%s) and CS (%s) for %s" % (gOCDBServiceType,
downDic['SERVICE_TYPE'],
dt['Name']))
dt['DowntimeID'] = downtime
dt['Element'] = element
dt['StartDate'] = downDic['FORMATED_START_DATE']
dt['EndDate'] = downDic['FORMATED_END_DATE']
dt['Severity'] = downDic['SEVERITY']
dt['Description'] = downDic['DESCRIPTION'].replace('\'', '')
dt['Link'] = downDic['GOCDB_PORTAL_URL']
uniformResult.append(dt)
storeRes = self._storeCommand(uniformResult)
if not storeRes['OK']:
return storeRes
return S_OK()
def doCache(self):
"""
Method that reads the cache table and tries to read from it. It will
return a list with one dictionary describing the DT if there are results.
"""
params = self._prepareCommand()
if not params['OK']:
return params
element, elementName, hours, gOCDBServiceType = params['Value']
result = self.rmClient.selectDowntimeCache(element=element, name=elementName,
gOCDBServiceType=gOCDBServiceType)
if not result['OK']:
return result
uniformResult = [dict(zip(result['Columns'], res)) for res in result['Value']]
# 'targetDate' can be either now or some 'hours' later in the future
targetDate = datetime.utcnow()
# dtOverlapping is a buffer to assure only one dt is returned
# when there are overlapping outage/warning dt for same element
# on top of the buffer we put the most recent outages
# while at the bottom the most recent warnings,
# assumption: uniformResult list is already ordered by resource/site name, severity, startdate
dtOverlapping = []
if hours is not None:
# IN THE FUTURE
targetDate = targetDate + timedelta(hours=hours)
# sorting by 'StartDate' b/c if we look for DTs in the future
# then we are interested in the earliest DTs
uniformResult.sort(key=itemgetter('Name', 'Severity', 'StartDate'))
for dt in uniformResult:
if (dt['StartDate'] < targetDate) and (dt['EndDate'] > targetDate):
# the list is already ordered in a way that outages come first over warnings
# and the earliest outages are on top of other outages and warnings
# while the earliest warnings are on top of the other warnings
# so what ever comes first in the list is also what we are looking for
dtOverlapping = [dt]
break
else:
# IN THE PRESENT
# sorting by 'EndDate' b/c if we look for DTs in the present
# then we are interested in those DTs that last longer
uniformResult.sort(key=itemgetter('Name', 'Severity', 'EndDate'))
for dt in uniformResult:
if (dt['StartDate'] < targetDate) and (dt['EndDate'] > targetDate):
# if outage, we put it on top of the overlapping buffer
# i.e. the latest ending outage is on top
if dt['Severity'].upper() == 'OUTAGE':
dtOverlapping = [dt] + dtOverlapping
# if warning, we put it at the bottom of the overlapping buffer
# i.e. the latest ending warning is at the bottom
elif dt['Severity'].upper() == 'WARNING':
dtOverlapping.append(dt)
result = None
if dtOverlapping:
dtTop = dtOverlapping[0]
dtBottom = dtOverlapping[-1]
if dtTop['Severity'].upper() == 'OUTAGE':
result = dtTop
else:
result = dtBottom
return S_OK(result)
def doMaster(self):
""" Master method, which looks little bit spaghetti code, sorry !
- It gets all sites and transforms them into gocSites.
- It gets all the storage elements and transforms them into their hosts
- It gets the the CEs (FTS and file catalogs will come).
"""
gocSites = getGOCSites()
if not gocSites['OK']:
return gocSites
gocSites = gocSites['Value']
sesHosts = getStorageElementsHosts()
if not sesHosts['OK']:
return sesHosts
sesHosts = sesHosts['Value']
resources = sesHosts if sesHosts else []
ftsServer = getFTS3Servers(hostOnly=True)
if ftsServer['OK'] and ftsServer['Value']:
resources.extend(ftsServer['Value'])
# TODO: file catalogs need also to use their hosts
# fc = CSHelpers.getFileCatalogs()
# if fc[ 'OK' ]:
# resources = resources + fc[ 'Value' ]
ce = getComputingElements()
if ce['OK'] and ce['Value']:
resources.extend(ce['Value'])
self.log.verbose('Processing Sites', ', '.join(gocSites if gocSites else ['NONE']))
siteRes = self.doNew(('Site', gocSites))
if not siteRes['OK']:
self.metrics['failed'].append(siteRes['Message'])
self.log.verbose('Processing Resources', ', '.join(resources if resources else ['NONE']))
resourceRes = self.doNew(('Resource', resources))
if not resourceRes['OK']:
self.metrics['failed'].append(resourceRes['Message'])
return S_OK(self.metrics)
|
fstagni/DIRAC
|
ResourceStatusSystem/Command/DowntimeCommand.py
|
Python
|
gpl-3.0
| 13,816
|
[
"DIRAC"
] |
b951a27282144daa139378f1bdf1d0a6c1a381368df88808f4637000ee7eb725
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 13:23:54 2018
@author: butenko
"""
import warnings
warnings.filterwarnings("ignore")
from dolfin import *
from pandas import read_csv
import numpy as np
import os
import subprocess
import pickle
import time as tm
from tissue_dielectrics import DielectricProperties
parameters['linear_algebra_backend']='PETSc'
set_log_active(False) #turns off debugging info
parameters['ghost_mode'] = 'shared_vertex'
def get_CPE_impedance(beta,K_Area,freq):
Z_CPE=K_Area/((1j*2*np.pi*freq)**(beta))
return Z_CPE
def get_tissue_impedance(V_drop,J_r_contact,J_im_contact,C=0): #adapted from FanPy
z_tis = V_drop/(J_r_contact+1j*J_im_contact)
return z_tis
def get_solutions(EQS_form,frequency,el_order):
start_reassamble=tm.time()
mesh_sol = Mesh()
f = HDF5File(mesh_sol.mpi_comm(),os.environ['PATIENTDIR']+"/Results_adaptive/Solution_"+str(np.round(frequency,6))+".h5",'r')
f.read(mesh_sol,"mesh_sol", False)
if EQS_form == 'EQS':
Er = FiniteElement("Lagrange", mesh_sol.ufl_cell(),el_order)
Ei = FiniteElement("Lagrange", mesh_sol.ufl_cell(),el_order)
Ec = Er * Ei
V = FunctionSpace(mesh_sol, Ec)
phi_sol=Function(V)
f.read(phi_sol,'solution_phi_full')
phi_r_sol,phi_i_sol=phi_sol.split(deepcopy=True)
if el_order>1:
W =VectorFunctionSpace(mesh_sol,'DG',el_order-1)
W_i =VectorFunctionSpace(mesh_sol,'DG',el_order-1)
V_normE=FunctionSpace(mesh_sol,"CG",el_order-1)
else:
W =VectorFunctionSpace(mesh_sol,'DG',el_order)
W_i =VectorFunctionSpace(mesh_sol,'DG',el_order)
V_normE=FunctionSpace(mesh_sol,"CG",el_order)
E_field = Function(W)
E_field_im = Function(W_i)
f.read(E_field,'solution_E_field')
f.read(E_field_im,'solution_E_field_im')
j_dens_real = Function(W)
j_dens_im = Function(W_i)
f.read(j_dens_real, "solution_j_real")
f.read(j_dens_im, "solution_j_im")
J_Vector=PETScVector(MPI.comm_world,2)
f.read(J_Vector, "J_Vector",False)
J_real,J_im=J_Vector[:]
E_norm=project(sqrt(inner(E_field,E_field)+inner(E_field_im,E_field_im)),V_normE,solver_type="cg", preconditioner_type="amg")
max_E=E_norm.vector().max()
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Phi_r_field_EQS.pvd')
file<<phi_r_sol,mesh_sol
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Phi_im_field_EQS.pvd')
file<<phi_i_sol,mesh_sol
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/E_norm_EQS.pvd')
file<<E_norm,mesh_sol
elif EQS_form == 'QS':
V = FunctionSpace(mesh_sol, "Lagrange",el_order)
phi_r_sol=Function(V)
phi_i_sol=Function(V)
f.read(phi_r_sol,'solution_phi_full')
phi_i_sol.vector()[:]=0.0
if el_order>1:
W =VectorFunctionSpace(mesh_sol,'DG',el_order-1)
V_normE=FunctionSpace(mesh_sol,"CG",el_order-1)
else:
W =VectorFunctionSpace(mesh_sol,'DG',el_order)
V_normE=FunctionSpace(mesh_sol,"CG",el_order)
E_field = Function(W)
f.read(E_field,'solution_E_field')
E_field_im=Function(W)
E_field_im.vector()[:] = 0.0 #fake
j_dens_real = Function(W)
j_dens_im = Function(W)
f.read(j_dens_real, "solution_j_real")
j_dens_im.vector()[:] = 0.0 #fake
J_Vector=PETScVector(MPI.comm_world,2)
f.read(J_Vector, "J_Vector",False)
J_real,J_im=J_Vector[:]
E_norm=project(sqrt(inner(E_field,E_field)),V_normE,solver_type="cg", preconditioner_type="amg")
max_E=E_norm.vector().max()
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/E_norm_QS.pvd')
file<<E_norm,mesh_sol
file=File(os.environ['PATIENTDIR']+'/Results_adaptive/Phi_r_field_QS.pvd')
file<<phi_r_sol,mesh_sol
f.close()
#if we want to get the potential magnitude on the neuron compartments
Vertices_get=read_csv(os.environ['PATIENTDIR']+'/Neuron_model_arrays/Vert_of_Neural_model_NEURON.csv', delimiter=' ', header=None)
Vertices_array=Vertices_get.values
Phi_ROI=np.zeros((Vertices_array.shape[0],4),float)
for inx in range(Vertices_array.shape[0]):
pnt=Point(Vertices_array[inx,0],Vertices_array[inx,1],Vertices_array[inx,2])
Phi_ROI[inx,0]=Vertices_array[inx,0]
Phi_ROI[inx,1]=Vertices_array[inx,1]
Phi_ROI[inx,2]=Vertices_array[inx,2]
Phi_ROI[inx,3]=np.sqrt(phi_r_sol(pnt)*phi_r_sol(pnt)+phi_i_sol(pnt)*phi_i_sol(pnt))
np.savetxt(os.environ['PATIENTDIR']+'/Results_adaptive/Phi_'+str(frequency)+'.csv', Phi_ROI, delimiter=" ")
print("Quasi impedance (to check current convergence): ",J_real,J_im)
minutes=int((tm.time() - start_reassamble)/60)
secnds=int(tm.time() - start_reassamble)-minutes*60
print("--- solution reassambled in ",minutes," min ",secnds," s ---")
return phi_r_sol,phi_i_sol,E_field,E_field_im,max_E,J_real,J_im,j_dens_real,j_dens_im
def get_field_on_points(phi_r,phi_i,c_c,J_r,J_i):
Vertices_neur_get=read_csv(os.environ['PATIENTDIR']+'/Neuron_model_arrays/Vert_of_Neural_model_NEURON.csv', delimiter=' ', header=None)
Vertices_neur=Vertices_neur_get.values
Ampl_ROI=np.zeros((Vertices_neur.shape[0],4),float)
for inx in range(Vertices_neur.shape[0]):
pnt=Point(Vertices_neur[inx,0],Vertices_neur[inx,1],Vertices_neur[inx,2])
Ampl_ROI[inx,3]=sqrt(phi_r(pnt)*phi_r(pnt)+phi_i(pnt)*phi_i(pnt))
Ampl_ROI[inx,0]=Vertices_neur[inx,0]
Ampl_ROI[inx,1]=Vertices_neur[inx,1]
Ampl_ROI[inx,2]=Vertices_neur[inx,2]
Ampl_ROI=Ampl_ROI[~np.all(Ampl_ROI==0.0,axis=1)] #deletes all zero enteries
return Ampl_ROI
def compute_field_with_superposition(mesh_sol,Domains,subdomains,boundaries_sol,kappa,Field_calc_param):
start_math=tm.time()
from Math_module_hybrid import choose_solver_for_me
if Field_calc_param.Solver_type=='Default':
Solver_type=choose_solver_for_me(Field_calc_param.EQS_mode,Domains.Float_contacts) #choses solver basing on the Laplace formulation and whether the floating conductors are used
else:
Solver_type=Field_calc_param.Solver_type # just get the solver directly
#IMPORTANT: for get_field_with_floats when solving EQS we always use direct solver MUMPS for stability issues (multiple floating conductors)
if Field_calc_param.element_order==1 and MPI.comm_world.rank==1:
print("Selected element_order (1st) is too low for current-controlled stimulation, increasing to 2nd")
Field_calc_param.element_order=2
if MPI.comm_world.rank==1:
#print("Computing field with superposition on mesh with ",mesh_sol.num_cells(), " elements")
print(len(Domains.fi)," computations are required for the iteration")
contacts_with_current=[x for x in Domains.fi if x != 0.0] #0.0 are grounded contacts
phi_r_floating=np.zeros((len(contacts_with_current),len(contacts_with_current)-1),float) #stores real potential field in the virtual floating contacts (where current is actually assigned)
J_real_current_contacts=np.zeros(len(contacts_with_current),float) #currents computed on the contacts when we solve "one active contact vs ground" system (other contacts are floating)
contact_amplitude=np.zeros(len(contacts_with_current),float) #stores assigned amplitudes of the currents
fl_ind=np.zeros((len(contacts_with_current),len(contacts_with_current)-1),float) ##strores relative ind of floats (if three current contacts, it will store [[1,2][0,2],[0,1]])
fl_contacts_rel_ind=np.arange(len(contacts_with_current))
if Field_calc_param.EQS_mode == 'EQS':
phi_i_floating=np.zeros((len(contacts_with_current),len(contacts_with_current)-1),float)
J_im_current_contacts=np.zeros(len(contacts_with_current),float)
glob_counter=0
for i in range(len(Domains.fi)):
for j in range(len(Domains.Float_on_lead)):
if Domains.Active_on_lead[i] == Domains.Float_on_lead[j]: # find the index of the floating conductor (in .med/.msh file) for the active contact (i)
# to solve "one active contact (i) vs ground" system, get potentials on the rest of the active contacts (which are put to floating condcutors), get current on the active contact
from Math_module_hybrid_floating import get_field_with_floats
if Field_calc_param.EQS_mode == 'EQS':
phi_r_floating[glob_counter,:],phi_i_floating[glob_counter,:],J_real_current_contacts[glob_counter],J_im_current_contacts[glob_counter]=get_field_with_floats(Field_calc_param.external_grounding,mesh_sol,i,Domains,subdomains,boundaries_sol,Field_calc_param.default_material,Field_calc_param.element_order,Field_calc_param.anisotropy,Field_calc_param.frequenc,Field_calc_param.EQS_mode,Solver_type,calc_with_MPI=True,kappa=kappa)
else:
phi_r_floating[glob_counter,:],__,J_real_current_contacts[glob_counter],__=get_field_with_floats(Field_calc_param.external_grounding,mesh_sol,i,Domains,subdomains,boundaries_sol,Field_calc_param.default_material,Field_calc_param.element_order,Field_calc_param.anisotropy,Field_calc_param.frequenc,Field_calc_param.EQS_mode,Solver_type,calc_with_MPI=True,kappa=kappa)
fl_ind[glob_counter,:]=fl_contacts_rel_ind[np.arange(len(fl_contacts_rel_ind))!=glob_counter] # if three current contacts, it will store [[1,2][0,2],[0,1]]
contact_amplitude[glob_counter]=Domains.fi[i]
glob_counter=glob_counter+1
glob_counter=0
V_r_BC_for_current=np.zeros(len(contacts_with_current),float) #real potential for the contact to match the given current
V_im_BC_for_current=np.zeros(len(contacts_with_current),float) # only for the EQS formulation
for i in range(V_r_BC_for_current.shape[0]):
floating_ind=np.argwhere(fl_ind==i)
contact_amplitude_others=contact_amplitude[np.arange(len(contact_amplitude))!=i]
if Field_calc_param.EQS_mode == 'EQS':
phi_float_vector=(phi_r_floating[floating_ind[:,0],floating_ind[:,1]]+1j*phi_i_floating[floating_ind[:,0],floating_ind[:,1]]) #vector of values of the floating potentials at the contact
J_others_vector=(J_real_current_contacts[np.arange(len(J_real_current_contacts))!=i]+1j*J_im_current_contacts[np.arange(len(J_im_current_contacts))!=i])
V_r_BC_for_current[i]=np.real(contact_amplitude[i]*contact_amplitude[i]/(J_real_current_contacts[i]+1j*J_im_current_contacts[i])+np.sum(phi_float_vector*contact_amplitude_others/J_others_vector))
V_im_BC_for_current[i]=np.imag(contact_amplitude[i]*contact_amplitude[i]/(J_real_current_contacts[i]+1j*J_im_current_contacts[i])+np.sum(phi_float_vector*contact_amplitude_others/J_others_vector))
else:
phi_float_vector=(phi_r_floating[floating_ind[:,0],floating_ind[:,1]]) #vector of values of the floating potentials at the contact
J_others_vector=(J_real_current_contacts[np.arange(len(J_real_current_contacts))!=i])
V_r_BC_for_current[i]=np.real(contact_amplitude[i]*contact_amplitude[i]/(J_real_current_contacts[i])+np.sum(phi_float_vector*contact_amplitude_others/J_others_vector))
# not an elegant way but just for the maximum transparency
if Field_calc_param.EQS_mode == 'EQS':
scaled_phi=np.complex(1.0,0.0)*np.zeros(len(Domains.fi),float)
for i in range(len(Domains.fi)):
if Domains.fi[i]==0.0:
scaled_phi[i]=0.0+1j*0.0
else:
scaled_phi[i]=V_r_BC_for_current[glob_counter]+1j*V_im_BC_for_current[glob_counter]
glob_counter=glob_counter+1
else:
scaled_phi=np.zeros(len(Domains.fi),float)
for i in range(len(Domains.fi)):
if Domains.fi[i]==0.0:
scaled_phi[i]=0.0
else:
scaled_phi[i]=V_r_BC_for_current[glob_counter]
glob_counter=glob_counter+1
glob_counter=0
#the results are stored in h5 file, check get_solutions above
from Math_module_hybrid_floating import get_field_with_scaled_BC
get_field_with_scaled_BC(Field_calc_param.external_grounding,mesh_sol,Domains,scaled_phi,subdomains,boundaries_sol,Field_calc_param.default_material,Field_calc_param.element_order,Field_calc_param.EQS_mode,Field_calc_param.anisotropy,Field_calc_param.frequenc,Solver_type,calc_with_MPI=True,kappa=kappa)
minutes=int((tm.time() - start_math)/60)
secnds=int(tm.time() - start_math)-minutes*60
if MPI.comm_world.rank==1:
print("--- Field with superposition was calculated in ",minutes," min ",secnds," s ")
print("__________________________________")
return True
if __name__ == '__main__':
with open(os.environ['PATIENTDIR']+'/Meshes/Mesh_ind.file', "rb") as f:
Domains = pickle.load(f)
with open(os.environ['PATIENTDIR']+'/Results_adaptive/Field_calc_param.file', "rb") as f:
Field_calc_param = pickle.load(f)
mesh = Mesh()
hdf = HDF5File(mesh.mpi_comm(), os.environ['PATIENTDIR']+"/Results_adaptive/Mesh_to_solve.h5", "r")
hdf.read(mesh, "/mesh", False)
subdomains = MeshFunction("size_t", mesh, 3)
hdf.read(subdomains, "/subdomains")
boundaries = MeshFunction("size_t", mesh, 2)
hdf.read(boundaries, "/boundaries")
V0_r=FunctionSpace(mesh,'DG',0)
V0_i=FunctionSpace(mesh,'DG',0)
kappa_r=Function(V0_r)
kappa_i=Function(V0_i)
hdf.read(kappa_r, "/kappa_r")
kappa=[kappa_r]
if Field_calc_param.EQS_mode=='EQS':
hdf.read(kappa_i, "/kappa_i")
kappa=[kappa_r,kappa_i]
#anisotropy will be read in at the site
hdf.close()
compute_field_with_superposition(mesh,Domains,subdomains,boundaries,kappa,Field_calc_param)
|
andreashorn/lead_dbs
|
ext_libs/OSS-DBS/OSS_platform/Math_module_floating_MPI.py
|
Python
|
gpl-3.0
| 14,099
|
[
"NEURON"
] |
e13e50c743b31766f7762191f2ad03d3d55e589587ff98abc4b6b49895014ea9
|
"""
Courseware page.
"""
import re
from bok_choy.page_object import PageObject, unguarded
from bok_choy.promise import EmptyPromise
from selenium.webdriver.common.action_chains import ActionChains
from common.test.acceptance.pages.lms import BASE_URL
from common.test.acceptance.pages.lms.bookmarks import BookmarksPage
from common.test.acceptance.pages.lms.completion import CompletionOnViewMixin
from common.test.acceptance.pages.lms.course_page import CoursePage
class CoursewarePage(CoursePage, CompletionOnViewMixin):
"""
Course info.
"""
url_path = "courseware/"
xblock_component_selector = '.vert .xblock'
# TODO: TNL-6546: Remove sidebar selectors
section_selector = '.chapter'
subsection_selector = '.chapter-content-container a'
def __init__(self, browser, course_id):
super(CoursewarePage, self).__init__(browser, course_id)
self.nav = CourseNavPage(browser, self)
def is_browser_on_page(self):
return self.q(css='.course-content').present
# TODO: TNL-6546: Remove and find callers
@property
def chapter_count_in_navigation(self):
"""
Returns count of chapters available on LHS navigation.
"""
return len(self.q(css='nav.course-navigation a.chapter'))
# TODO: TNL-6546: Remove and find callers.
@property
def num_sections(self):
"""
Return the number of sections in the sidebar on the page
"""
return len(self.q(css=self.section_selector))
# TODO: TNL-6546: Remove and find callers.
@property
def num_subsections(self):
"""
Return the number of subsections in the sidebar on the page, including in collapsed sections
"""
return len(self.q(css=self.subsection_selector))
@property
def xblock_components(self):
"""
Return the xblock components within the unit on the page.
"""
return self.q(css=self.xblock_component_selector)
@property
def num_xblock_components(self):
"""
Return the number of rendered xblocks within the unit on the page
"""
return len(self.xblock_components)
def xblock_component_type(self, index=0):
"""
Extract rendered xblock component type.
Returns:
str: xblock module type
index: which xblock to query, where the index is the vertical display within the page
(default is 0)
"""
return self.q(css=self.xblock_component_selector).attrs('data-block-type')[index]
def xblock_component_html_content(self, index=0):
"""
Extract rendered xblock component html content.
Returns:
str: xblock module html content
index: which xblock to query, where the index is the vertical display within the page
(default is 0)
"""
# When Student Notes feature is enabled, it looks for the content inside
# `.edx-notes-wrapper-content` element (Otherwise, you will get an
# additional html related to Student Notes).
element = self.q(css='{} .edx-notes-wrapper-content'.format(self.xblock_component_selector))
if element.first:
return element.attrs('innerHTML')[index].strip()
else:
return self.q(css=self.xblock_component_selector).attrs('innerHTML')[index].strip()
def verify_tooltips_displayed(self):
"""
Verify that all sequence navigation bar tooltips are being displayed upon mouse hover.
If a tooltip does not appear, raise a BrokenPromise.
"""
for index, tab in enumerate(self.q(css='#sequence-list > li')):
ActionChains(self.browser).move_to_element(tab).perform()
self.wait_for_element_visibility(
'#tab_{index} > .sequence-tooltip'.format(index=index),
'Tab {index} should appear'.format(index=index)
)
@property
def course_license(self):
"""
Returns the course license text, if present. Else returns None.
"""
element = self.q(css="#content .container-footer .course-license")
if element.is_present():
return element.text[0]
return None
def go_to_sequential_position(self, sequential_position):
"""
Within a section/subsection navigate to the sequential position specified by `sequential_position`.
Arguments:
sequential_position (int): position in sequential bar
"""
def is_at_new_position():
"""
Returns whether the specified tab has become active. It is defensive
against the case where the page is still being loaded.
"""
active_tab = self._active_sequence_tab
try:
return active_tab and int(active_tab.attrs('data-element')[0]) == sequential_position
except IndexError:
return False
sequential_position_css = '#sequence-list #tab_{0}'.format(sequential_position - 1)
self.q(css=sequential_position_css).first.click()
EmptyPromise(is_at_new_position, "Position navigation fulfilled").fulfill()
@property
def sequential_position(self):
"""
Returns the position of the active tab in the sequence.
"""
tab_id = self._active_sequence_tab.attrs('id')[0]
return int(tab_id.split('_')[1])
@property
def _active_sequence_tab(self):
return self.q(css='#sequence-list .nav-item.active')
@property
def is_next_button_enabled(self):
return not self.q(css='.sequence-nav > .sequence-nav-button.button-next.disabled').is_present()
@property
def is_previous_button_enabled(self):
return not self.q(css='.sequence-nav > .sequence-nav-button.button-previous.disabled').is_present()
def click_next_button_on_top(self):
self._click_navigation_button('sequence-nav', 'button-next')
def click_next_button_on_bottom(self):
self._click_navigation_button('sequence-bottom', 'button-next')
def click_previous_button_on_top(self):
self._click_navigation_button('sequence-nav', 'button-previous')
def click_previous_button_on_bottom(self):
self._click_navigation_button('sequence-bottom', 'button-previous')
def _click_navigation_button(self, top_or_bottom_class, next_or_previous_class):
"""
Clicks the navigation button, given the respective CSS classes.
"""
previous_tab_id = self._active_sequence_tab.attrs('data-id')[0]
def is_at_new_tab_id():
"""
Returns whether the active tab has changed. It is defensive
against the case where the page is still being loaded.
"""
active_tab = self._active_sequence_tab
try:
return active_tab and previous_tab_id != active_tab.attrs('data-id')[0]
except IndexError:
return False
self.q(
css='.{} > .sequence-nav-button.{}'.format(top_or_bottom_class, next_or_previous_class)
).first.click()
EmptyPromise(is_at_new_tab_id, "Button navigation fulfilled").fulfill()
@property
def can_start_proctored_exam(self):
"""
Returns True if the timed/proctored exam timer bar is visible on the courseware.
"""
return self.q(css='button.start-timed-exam[data-start-immediately="false"]').is_present()
def start_timed_exam(self):
"""
clicks the start this timed exam link
"""
self.q(css=".xblock-student_view .timed-exam .start-timed-exam").first.click()
self.wait_for_element_presence(".proctored_exam_status .exam-timer", "Timer bar")
def stop_timed_exam(self):
"""
clicks the stop this timed exam link
"""
self.q(css=".proctored_exam_status button.exam-button-turn-in-exam").first.click()
self.wait_for_element_absence(".proctored_exam_status .exam-button-turn-in-exam", "End Exam Button gone")
self.wait_for_element_presence("button[name='submit-proctored-exam']", "Submit Exam Button")
self.q(css="button[name='submit-proctored-exam']").first.click()
self.wait_for_element_absence(".proctored_exam_status .exam-timer", "Timer bar")
def start_proctored_exam(self):
"""
clicks the start this timed exam link
"""
self.q(css='button.start-timed-exam[data-start-immediately="false"]').first.click()
# Wait for the unique exam code to appear.
# self.wait_for_element_presence(".proctored-exam-code", "unique exam code")
def has_submitted_exam_message(self):
"""
Returns whether the "you have submitted your exam" message is present.
This being true implies "the exam contents and results are hidden".
"""
return self.q(css="div.proctored-exam.completed").visible
def content_hidden_past_due_date(self):
"""
Returns whether the "the due date for this ___ has passed" message is present.
___ is the type of the hidden content, and defaults to subsection.
This being true implies "the ___ contents are hidden because their due date has passed".
"""
message = "this assignment is no longer available"
if self.q(css="div.seq_content").is_present():
return False
for html in self.q(css="div.hidden-content").html:
if message in html:
return True
return False
@property
def entrance_exam_message_selector(self):
"""
Return the entrance exam status message selector on the top of courseware page.
"""
return self.q(css='#content .container section.course-content .sequential-status-message')
def has_entrance_exam_message(self):
"""
Returns boolean indicating presence entrance exam status message container div.
"""
return self.entrance_exam_message_selector.is_present()
def has_passed_message(self):
"""
Returns boolean indicating presence of passed message.
"""
return self.entrance_exam_message_selector.is_present() \
and "You have passed the entrance exam" in self.entrance_exam_message_selector.text[0]
def has_banner(self):
"""
Returns boolean indicating presence of banner
"""
return self.q(css='.pattern-library-shim').is_present()
@property
def is_timer_bar_present(self):
"""
Returns True if the timed/proctored exam timer bar is visible on the courseware.
"""
return self.q(css=".proctored_exam_status .exam-timer").is_present()
def active_usage_id(self):
""" Returns the usage id of active sequence item """
get_active = lambda el: 'active' in el.get_attribute('class')
attribute_value = lambda el: el.get_attribute('data-id')
return self.q(css='#sequence-list .nav-item').filter(get_active).map(attribute_value).results[0]
def unit_title_visible(self):
""" Check if unit title is visible """
return self.q(css='.unit-title').visible
def bookmark_button_visible(self):
""" Check if bookmark button is visible """
EmptyPromise(lambda: self.q(css='.bookmark-button').visible, "Bookmark button visible").fulfill()
return True
@property
def bookmark_button_state(self):
""" Return `bookmarked` if button is in bookmarked state else '' """
return 'bookmarked' if self.q(css='.bookmark-button.bookmarked').present else ''
@property
def bookmark_icon_visible(self):
""" Check if bookmark icon is visible on active sequence nav item """
return self.q(css='.active .bookmark-icon').visible
def click_bookmark_unit_button(self):
""" Bookmark a unit by clicking on Bookmark button """
previous_state = self.bookmark_button_state
self.q(css='.bookmark-button').first.click()
EmptyPromise(lambda: self.bookmark_button_state != previous_state, "Bookmark button toggled").fulfill()
# TODO: TNL-6546: Remove this helper function
def click_bookmarks_button(self):
""" Click on Bookmarks button """
self.q(css='.bookmarks-list-button').first.click()
bookmarks_page = BookmarksPage(self.browser, self.course_id)
bookmarks_page.visit()
def is_gating_banner_visible(self):
"""
Check if the gated banner for locked content is visible.
"""
return self.q(css='.problem-header').is_present() \
and self.q(css='.btn-brand').text[0] == u'Go To Prerequisite Section' \
and self.q(css='.problem-header').text[0] == u'Content Locked'
@property
def is_word_cloud_rendered(self):
"""
Check for word cloud fields presence
"""
return self.q(css='.input-cloud').visible
def input_word_cloud(self, answer_word):
"""
Fill the word cloud fields
Args:
answer_word(str): An answer words to be filled in the field
"""
self.wait_for_element_visibility('.input-cloud', "Word cloud fields are visible")
css = '.input_cloud_section label:nth-child({}) .input-cloud'
for index in range(1, len(self.q(css='.input-cloud')) + 1):
self.q(css=css.format(index)).fill(answer_word + str(index))
def save_word_cloud(self):
"""
Click save button
"""
self.q(css='.input_cloud_section .action button.save').click()
self.wait_for_ajax()
@property
def word_cloud_answer_list(self):
"""
Get saved words
Returns:
list: Return empty when no answer words are present
list: Return populated when answer words are present
"""
self.wait_for_element_presence('.your_words', "Answer list is present")
if self.q(css='.your_words strong').present:
return self.q(css='.your_words strong').text
else:
return self.q(css='.your_words').text[0]
class CoursewareSequentialTabPage(CoursePage):
"""
Courseware Sequential page
"""
def __init__(self, browser, course_id, chapter, subsection, position):
super(CoursewareSequentialTabPage, self).__init__(browser, course_id)
self.url_path = "courseware/{}/{}/{}".format(chapter, subsection, position)
def is_browser_on_page(self):
return self.q(css='nav.sequence-list-wrapper').present
def get_selected_tab_content(self):
"""
return the body of the sequential currently selected
"""
return self.q(css='#seq_content .xblock').text[0]
class CourseNavPage(PageObject):
"""
Handles navigation on the courseware pages, including sequence navigation and
breadcrumbs.
"""
url = None
def __init__(self, browser, parent_page):
super(CourseNavPage, self).__init__(browser)
self.parent_page = parent_page
# TODO: TNL-6546: Remove the following
self.course_outline_page = False
def is_browser_on_page(self):
return self.parent_page.is_browser_on_page
@property
def breadcrumb_section_title(self):
"""
Returns the section's title from the breadcrumb, or None if one is not found.
"""
label = self.q(css='.breadcrumbs .nav-item-chapter').text
return label[0].strip() if label else None
@property
def breadcrumb_subsection_title(self):
"""
Returns the subsection's title from the breadcrumb, or None if one is not found
"""
label = self.q(css='.breadcrumbs .nav-item-section').text
return label[0].strip() if label else None
@property
def breadcrumb_unit_title(self):
"""
Returns the unit's title from the breadcrumb, or None if one is not found
"""
label = self.q(css='.breadcrumbs .nav-item-sequence').text
return label[0].strip() if label else None
# TODO: TNL-6546: Remove method, outline no longer on courseware page
@property
def sections(self):
"""
Return a dictionary representation of sections and subsections.
Example:
{
'Introduction': ['Course Overview'],
'Week 1': ['Lesson 1', 'Lesson 2', 'Homework']
'Final Exam': ['Final Exam']
}
You can use these titles in `go_to_section` to navigate to the section.
"""
# Dict to store the result
nav_dict = dict()
section_titles = self._section_titles()
# Get the section titles for each chapter
for sec_index, sec_title in enumerate(section_titles):
if len(section_titles) < 1:
self.warning("Could not find subsections for '{0}'".format(sec_title))
else:
# Add one to convert list index (starts at 0) to CSS index (starts at 1)
nav_dict[sec_title] = self._subsection_titles(sec_index + 1)
return nav_dict
@property
def sequence_items(self):
"""
Return a list of sequence items on the page.
Sequence items are one level below subsections in the course nav.
Example return value:
['Chemical Bonds Video', 'Practice Problems', 'Homework']
"""
seq_css = 'ol#sequence-list>li>.nav-item>.sequence-tooltip'
return self.q(css=seq_css).map(self._clean_seq_titles).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def go_to_section(self, section_title, subsection_title):
"""
Go to the section in the courseware.
Every section must have at least one subsection, so specify
both the section and subsection title.
Example:
go_to_section("Week 1", "Lesson 1")
"""
# For test stability, disable JQuery animations (opening / closing menus)
self.browser.execute_script("jQuery.fx.off = true;")
# Get the section by index
try:
sec_index = self._section_titles().index(section_title)
except ValueError:
self.warning("Could not find section '{0}'".format(section_title))
return
# Click the section to ensure it's open (no harm in clicking twice if it's already open)
# Add one to convert from list index to CSS index
section_css = '.course-navigation .chapter:nth-of-type({0})'.format(sec_index + 1)
self.q(css=section_css).first.click()
# Get the subsection by index
try:
subsec_index = self._subsection_titles(sec_index + 1).index(subsection_title)
except ValueError:
msg = "Could not find subsection '{0}' in section '{1}'".format(subsection_title, section_title)
self.warning(msg)
return
# Convert list indices (start at zero) to CSS indices (start at 1)
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item:nth-of-type({1})"
).format(sec_index + 1, subsec_index + 1)
# Click the subsection and ensure that the page finishes reloading
self.q(css=subsection_css).first.click()
self._on_section_promise(section_title, subsection_title).fulfill()
def go_to_vertical(self, vertical_title):
"""
Within a section/subsection, navigate to the vertical with `vertical_title`.
"""
# Get the index of the item in the sequence
all_items = self.sequence_items
try:
seq_index = all_items.index(vertical_title)
except ValueError:
msg = "Could not find sequential '{0}'. Available sequentials: [{1}]".format(
vertical_title, ", ".join(all_items)
)
self.warning(msg)
else:
# Click on the sequence item at the correct index
# Convert the list index (starts at 0) to a CSS index (starts at 1)
seq_css = "ol#sequence-list>li:nth-of-type({0})>.nav-item".format(seq_index + 1)
self.q(css=seq_css).first.click()
# Click triggers an ajax event
self.wait_for_ajax()
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _section_titles(self):
"""
Return a list of all section titles on the page.
"""
chapter_css = '.course-navigation .chapter .group-heading'
return self.q(css=chapter_css).map(lambda el: el.text.strip()).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _subsection_titles(self, section_index):
"""
Return a list of all subsection titles on the page
for the section at index `section_index` (starts at 1).
"""
# Retrieve the subsection title for the section
# Add one to the list index to get the CSS index, which starts at one
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item a p:nth-of-type(1)"
).format(section_index)
# If the element is visible, we can get its text directly
# Otherwise, we need to get the HTML
# It *would* make sense to always get the HTML, but unfortunately
# the open tab has some child <span> tags that we don't want.
return self.q(
css=subsection_css
).map(
lambda el: el.text.strip().split('\n')[0] if el.is_displayed() else el.get_attribute('innerHTML').strip()
).results
# TODO: TNL-6546: Remove method, outline no longer on courseware page
def _on_section_promise(self, section_title, subsection_title):
"""
Return a `Promise` that is fulfilled when the user is on
the correct section and subsection.
"""
desc = "currently at section '{0}' and subsection '{1}'".format(section_title, subsection_title)
return EmptyPromise(
lambda: self.is_on_section(section_title, subsection_title), desc
)
def go_to_outline(self):
"""
Navigates using breadcrumb to the course outline on the course home page.
Returns CourseHomePage page object.
"""
# To avoid circular dependency, importing inside the function
from common.test.acceptance.pages.lms.course_home import CourseHomePage
course_home_page = CourseHomePage(self.browser, self.parent_page.course_id)
self.q(css='.nav-item-course').click()
course_home_page.wait_for_page()
return course_home_page
@unguarded
def is_on_section(self, section_title, subsection_title):
"""
Return a boolean indicating whether the user is on the section and subsection
with the specified titles.
"""
return self.breadcrumb_section_title == section_title and self.breadcrumb_subsection_title == subsection_title
# Regular expression to remove HTML span tags from a string
REMOVE_SPAN_TAG_RE = re.compile(r'</span>(.+)<span')
def _clean_seq_titles(self, element):
"""
Clean HTML of sequence titles, stripping out span tags and returning the first line.
"""
return self.REMOVE_SPAN_TAG_RE.search(element.get_attribute('innerHTML')).groups()[0].strip()
# TODO: TNL-6546: Remove. This is no longer needed.
@property
def active_subsection_url(self):
"""
return the url of the active subsection in the left nav
"""
return self.q(css='.chapter-content-container .menu-item.active a').attrs('href')[0]
# TODO: TNL-6546: Remove all references to self.course_outline_page
# TODO: TNL-6546: Remove the following function
def visit_course_outline_page(self):
# use course_outline_page version of the nav
self.course_outline_page = True
# reload the same page with the course_outline_page flag
self.browser.get(self.browser.current_url + "&course_experience.course_outline_page=1")
self.wait_for_page()
class RenderXBlockPage(PageObject, CompletionOnViewMixin):
"""
render_xblock page.
"""
xblock_component_selector = '.xblock'
def __init__(self, browser, block_id):
super(RenderXBlockPage, self).__init__(browser)
self.block_id = block_id
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
return BASE_URL + "/xblock/" + self.block_id
def is_browser_on_page(self):
return self.q(css='.course-content').present
|
a-parhom/edx-platform
|
common/test/acceptance/pages/lms/courseware.py
|
Python
|
agpl-3.0
| 24,820
|
[
"VisIt"
] |
d3b96d4dd37c0cb1792ed056d38766c137e83d33864c6cd832b6987c33380b85
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Plotter
#################
This is supposed to be a collection of classes and functions to aid in plotting
'''
from bokeh.plotting import output_notebook, show, figure
from bokeh.models.ranges import Range1d
import numpy as np
def lorentzian(freq, x, fwhm, inten=None):
'''
Plot lorentzian lineshapes
Args:
freq (np.ndarray): Frequencies where the peaks will be located
x (np.ndarray): X-axis data points
fwhm (float): Full-width at half maximum
inten (np.ndarray): Intensities of the peaks
Returns:
y (np.ndarray): Y values of the lorentzian lineshapes
'''
y = np.zeros(len(x))
if inten is None:
for fdx in freq:
y += 1/(2*np.pi)*fwhm/((fdx-x)**2+(0.5*fwhm)**2)
else:
for fdx, idx in zip(freq, inten):
y += 1/(2*np.pi)*idx*fwhm/((fdx-x)**2+(0.5*fwhm)**2)
return y
def gaussian(freq, x, fwhm, inten=None):
'''
Plot gaussian lineshapes
Args:
freq (np.ndarray): Frequencies where the peaks will be located
x (np.ndarray): X-axis data points
fwhm (float): Full-width at half maximum
inten (np.ndarray): Intensities of the peaks
Returns:
y (np.ndarray): Y values of the gaussian lineshapes
'''
y = np.zeros(len(x))
sigma = fwhm/(np.sqrt(8*np.log(2)))
if inten is None:
for fdx in freq:
y += 1/(sigma*np.sqrt(2*np.pi))*np.exp(-(x-fdx)**2/(2*sigma**2))
else:
for idx, fdx in zip(inten, freq):
y += 1/(sigma*np.sqrt(2*np.pi))*idx*np.exp(-(x-fdx)**2/(2*sigma**2))
return y
class Plot:
'''
Class that has a collection of methods to make plotting easier. Some of the bokeh functions
require importing specific functions like 'show' to display the figure. We want to make
this easier by defining methods like show so we can just import the class and it takes
care of everything.
'''
def show(self):
# method just to have simple show function like in matplotlib
show(self.fig)
def set_xrange(self, xmin, xmax):
# set the xrange
# makes it simple to flip the xaxis by giving the max value as the
# xmin and the min value as the xmax
self.fig.x_range = Range1d(xmin, xmax)
def set_yrange(self, ymin, ymax):
# set the yrange
self.fig.y_range = Range1d(ymin, ymax)
def __init__(self, *args, **kwargs):
# this worries me a bit and not sure if this is the proper way to do this
output_notebook()
# set the title
title = kwargs.pop('title', '')
# set the plot area parameters
plot_width = kwargs.pop('plot_width', 500)
plot_height = kwargs.pop('plot_height', 500)
# set the tools to be used
tools = kwargs.pop('tools', 'hover, crosshair, pan, wheel_zoom, box_zoom, reset, save,')
# create the figure
self.fig = figure(title=title, plot_height=plot_height, plot_width=plot_width, tools=tools)
# a matplotlib example
# maybe we can make some conditional so you can use a bokeh plot or a matplotlib plot
# might be useful if we just want to display the plot right on the screen as opposed
# to having the plot on the web browser
#
#import numpy as np
#import matplotlib.pyplot as plt
#from matplotlib import ticker, rc
#
#class Plot:
# def lorentz(freq, x, fwhm, inten=None):
# y = np.zeros(len(x))
# if inten is None:
# for fdx in freq:
# y += 1/(2*np.pi)*fwhm/((fdx-x)**2+(0.5*fwhm)**2)
# else:
# for fdx, idx in zip(freq, inten):
# y += 1/(2*np.pi)*idx*fwhm/((fdx-x)**2+(0.5*fwhm)**2)
# return y
#
# def __init__(self, *args, **kwargs):
# title = kwargs.pop('title', '')
# xlabel = kwargs.pop('xlabel', '')
# ylabel = kwargs.pop('ylabel', '')
# marker = kwargs.pop('marker', '')
# line = kwargs.pop('line', '-')
# figsize = kwargs.pop('figsize', (8,8))
# dpi = kwargs.pop('dpi', 50)
# xrange = kwargs.pop('xrange', None)
# yrange = kwargs.pop('yrange', None)
# fwhm = kwargs.pop('fwhm', 15)
# res = kwargs.pop('res', 1)
# grid = kwargs.pop('grid', False)
# legend = kwargs.pop('legend', True)
# invert_x = kwargs.pop('invert_x', False)
# font = kwargs.pop('font', 10)
# lorentz = kwargs.pop('lorentz', True)
# self.fig = plt.figure(figsize=figsize, dpi=dpi)
# rc('font', size=font)
|
exa-analytics/atomic
|
exatomic/plotter/plot.py
|
Python
|
apache-2.0
| 4,657
|
[
"Gaussian"
] |
6c98f522997cf191d7dec40ecee0aa3dac7a40f8cd9576c3d5a6d4eec79f0b50
|
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.mail import send_mail, mail_managers, EmailMessage
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from contest.models import *
from datetime import datetime
def get_entries(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entries.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_entries_table(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entry_table.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_winners(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True).order_by('-vote_count')
return render_to_response('contest/winners.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_rules(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
return render_to_response('contest/rules.html', {'contest': contest}, context_instance=RequestContext(request))
def get_entry(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
return render_to_response('contest/entry.html', {'contest': entry.contest, 'entry': entry}, context_instance=RequestContext(request))
#@login_required
def add_entry(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
if request.method == 'POST':
form = EntryForm(request.POST)
form.contest = contest_id
if form.is_valid():
data = {
#"submitter": request.user.username,
"submit_date": datetime.now(),
"org_name": request.POST.get("org_name"),
"org_url": request.POST.get("org_url"),
"contact_person": request.POST.get("contact_person"),
"contact_phone": request.POST.get("contact_phone"),
"contact_email": request.POST.get("contact_email"),
"data_set": request.POST.get("data_set"),
"data_use": request.POST.get("data_use"),
"data_mission": request.POST.get("data_mission")
}
subject = 'OpenDataPhilly - Contest Submission'
user_email = request.POST.get("contact_email")
text_content = render_to_string('contest/submit_email.txt', data)
text_content_copy = render_to_string('contest/submit_email_copy.txt', data)
mail_managers(subject, text_content)
msg = EmailMessage(subject, text_content_copy, to=[user_email])
msg.send()
return render_to_response('contest/thanks.html', {'contest': contest}, context_instance=RequestContext(request))
else:
form = EntryForm()
return render_to_response('contest/submit_entry.html', {'contest': contest, 'form': form}, context_instance=RequestContext(request))
@login_required
def add_vote(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
contest = entry.contest
user = User.objects.get(username=request.user)
if contest.user_can_vote(user):
new_vote = Vote(user=user, entry=entry)
new_vote.save()
entry.vote_count = entry.vote_set.count()
entry.save()
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>Thank you for your vote! You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
return redirect('/contest/?sort=vote_count')
|
copelco/Durham-Open-Data-Catalog
|
OpenDataCatalog/contest/views.py
|
Python
|
mit
| 5,451
|
[
"VisIt"
] |
7ecbdf61304e07ee35e2022b990f8177c6dfa27de5774db7ea7105376440dc97
|
import os
from ase import Atoms, Atom
from ase.io import write
from ase.calculators.jacapo import Jacapo
atoms = Atoms([Atom('H',[0,0,0])],
cell=(2,2,2),
pbc=True)
calc = Jacapo('Jacapo-test.nc',
pw=200,
nbands=2,
kpts=(1,1,1),
spinpol=False,
dipole=False,
symmetry=False,
ft=0.01)
atoms.set_calculator(calc)
print atoms.get_potential_energy()
write('Jacapo-test.traj', atoms)
os.system('rm -f Jacapo-test.nc Jacapo-test.txt Jacapo-test.traj')
|
grhawk/ASE
|
tools/ase/test/jacapo/jacapo.py
|
Python
|
gpl-2.0
| 575
|
[
"ASE"
] |
62a05c656081d0b9dc3fb7f964253c2417f568fc1313dc416f193010e69a950a
|
#! /usr/bin/env python
from MDAnalysis import *
import numpy
import math
import sys
my_traj = sys.argv[1]
u = Universe("init.pdb",my_traj)
v = Universe("init.pdb")
end = my_traj.find('.pdb')
fout_name = my_traj[0:end] + '_dist_kink.dat'
a1 = u.selectAtoms("segid A and resid 62:78")
b1 = u.selectAtoms("segid B and resid 51:79")
f = open(fout_name,'w')
for ts in u.trajectory:
distance1 = numpy.linalg.norm(a1.centerOfMass() - b1.centerOfMass())
f.write('%7.3f\n' % distance1)
f.close()
|
demharters/git_scripts
|
dist_mhcii_kink.py
|
Python
|
apache-2.0
| 512
|
[
"MDAnalysis"
] |
973416491fd3c8e61152d907fb3fa14002debce2a2f2794b21561bd1739df815
|
from __future__ import division, print_function, unicode_literals
__author__ = 'setten'
import os
import unittest
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.structure import Structure
from pymatgen.matproj.rest import MPRester, MPRestError
from pymatgen.io.gwwrapper.datastructures import GWSpecs, GWConvergenceData, get_spec
from pymatgen.io.gwwrapper.codeinterfaces import AbinitInterface, AbstractCodeInterface, VaspInterface, get_code_interface
from pymatgen.io.gwwrapper.GWworkflows import GWG0W0VaspInputSet, SingleAbinitGWWorkFlow
from pymatgen.io.gwwrapper.helpers import refine_structure, clean, load_ps, read_extra_abivars, read_grid_from_file
from pymatgen.io.gwwrapper.helpers import expand
#test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",'test_files')
have_abinit_ps_ext = True
try:
os.environ['ABINIT_PS_EXT']
except KeyError:
have_abinit_ps_ext = False
structure_dict = {'lattice': {'a': 3.866974622849504,
'gamma': 60.0000000241681,
'c': 3.86697462,
'b': 3.866974623775052,
'matrix': [[3.34889826, 0.0, 1.93348731], [1.11629942, 3.15737156, 1.93348731], [0.0, 0.0, 3.86697462]],
'volume': 40.888291888494884,
'alpha': 60.000000032293386,
'beta': 60.00000002437586},
'sites': [{'properties': {u'coordination_no': 5, u'forces': [0.0, 0.0, 0.0]},
'abc': [0.875, 0.875, 0.875],
'xyz': [3.9070479700000003, 2.762700115, 6.767205585],
'species': [{'occu': 1, 'element': 'Si'}], 'label': 'Si'},
{'properties': {u'coordination_no': 5, u'forces': [0.0, 0.0, 0.0]},
'abc': [0.125, 0.125, 0.125], 'xyz': [0.55814971, 0.394671445, 0.966743655],
'species': [{'occu': 1, 'element': 'Si'}], 'label': 'Si'}],
'@class': 'Structure', '@module': 'pymatgen.core.structure'}
structure = Structure.from_dict(structure_dict)
class GWTestHelpers(PymatgenTest):
#def test_refine_structure(self):
# test = refine_structure(structure)
# self.assertIsInstance(test, Structure)
def test_clean(self):
string = 'MddmmdDD '
self.assertEqual(clean(string), 'mddmmddd')
def test_read_extra_abivars(self):
vars_out = {'ecut': 40}
f = open('extra_abivars', 'w')
f.write(str(vars_out))
f.close()
vars_in = read_extra_abivars()
self.assertEqual(vars_out, vars_in)
os.remove('extra_abivars')
@unittest.skipIf(not have_abinit_ps_ext, "Requires ABINIT_PS_EXT env variable")
def test_expand(self):
spec = get_spec('GW')
tests = SingleAbinitGWWorkFlow(structure, spec).convs
tests_out = {'nscf_nbands': {'test_range': (50,),
'control': 'gap', 'method': 'set_bands', 'level': 'nscf'},
'ecut': {'test_range': (28, 32, 36, 40, 44),
'control': 'e_ks_max', 'method': 'direct', 'level': 'scf'},
'ecuteps': {'test_range': (4, 8, 12, 16, 20),
'control': 'gap', 'method': 'direct', 'level': 'sigma'}}
self.assertEqual(expand(tests, 1), tests_out)
spec.data['code'] = 'VASP'
spec.update_code_interface()
tests = GWG0W0VaspInputSet(structure, spec).convs
tests_out = {'ENCUTGW': {'test_range': (200, 400, 600, 800), 'control': 'gap', 'method': 'incar_settings'}}
self.assertEqual(expand(tests, 1), tests_out)
|
yanikou19/pymatgen
|
pymatgen/io/gwwrapper/tests/test_helpers.py
|
Python
|
mit
| 3,782
|
[
"VASP",
"pymatgen"
] |
c711d3c731c921cf6f6de68033a88b83185656ccfe0380738a8259bbae80034e
|
from __future__ import division, print_function
import numpy as np
from bct.utils import BCTParamError, binarize, get_rng
from bct.utils import pick_four_unique_nodes_quickly
from .clustering import number_of_components
def latmio_dir_connected(R, itr, D=None, seed=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions. The
function also ensures that the randomized network maintains
connectedness, the ability for every node to reach every other node in
the network. The input network for this function must be connected.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
n = len(R)
ind_rp = rng.permutation(n) # random permutation of nodes
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
# connectedness condition
if not (np.any((R[a, c], R[d, b], R[d, c])) and
np.any((R[c, a], R[b, d], R[b, a]))):
P = R[(a, c), :].copy()
P[0, b] = 0
P[0, d] = 1
P[1, d] = 0
P[1, b] = 1
PN = P.copy()
PN[0, a] = 1
PN[1, c] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
PN += P
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(PN[0, (b, c)]) and np.any(PN[1, (d, a)]):
break
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff
def latmio_dir(R, itr, D=None, seed=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
n = len(R)
ind_rp = rng.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff
def latmio_und_connected(R, itr, D=None, seed=None):
'''
This function "latticizes" an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks. The function also ensures that the
randomized network maintains connectedness, the ability for every node
to reach every other node in the network. The input network for this
function must be connected.
Parameters
----------
R : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
if number_of_components(R) > 1:
raise BCTParamError("Input is not connected")
n = len(R)
ind_rp = rng.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1) / 2))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
rewire = True
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
# connectedness condition
if not (R[a, c] or R[b, d]):
P = R[(a, d), :].copy()
P[0, b] = 0
P[1, c] = 0
PN = P.copy()
PN[:, d] = 1
PN[:, a] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(P[:, (b, c)]):
break
PN += P
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])]
return Rlatt, R, ind_rp, eff
def latmio_und(R, itr, D=None, seed=None):
'''
This function "latticizes" an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
R : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
n = len(R)
ind_rp = rng.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1) / 2))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts:
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])]
return Rlatt, R, ind_rp, eff
def makeevenCIJ(n, k, sz_cl, seed=None):
'''
This function generates a random, directed network with a specified
number of fully connected modules linked together by evenly distributed
remaining random connections.
Parameters
----------
N : int
number of vertices (must be power of 2)
K : int
number of edges
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
N must be a power of 2.
A warning is generated if all modules contain more edges than K.
Cluster size is 2^sz_cl;
'''
rng = get_rng(seed)
# compute number of hierarchical levels and adjust cluster size
mx_lvl = int(np.floor(np.log2(n)))
sz_cl -= 1
# make a stupid little template
t = np.ones((2, 2)) * 2
# check n against the number of levels
Nlvl = 2**mx_lvl
if Nlvl != n:
print("Warning: n must be a power of 2")
n = Nlvl
# create hierarchical template
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
CIJp = (CIJ >= (mx_lvl - sz_cl))
# determine nr of non-cluster connections left and their possible positions
rem_k = k - np.size(np.where(CIJp.flatten()))
if rem_k < 0:
print("Warning: K is too small, output matrix contains clusters only")
return CIJp
a, b = np.where(np.logical_not(CIJp + np.eye(n)))
# assign remK randomly dstributed connections
rp = rng.permutation(len(a))
a = a[rp[:rem_k]]
b = b[rp[:rem_k]]
for ai, bi in zip(a, b):
CIJp[ai, bi] = 1
return np.array(CIJp, dtype=int)
def makefractalCIJ(mx_lvl, E, sz_cl, seed=None):
'''
This function generates a directed network with a hierarchical modular
organization. All modules are fully connected and connection density
decays as 1/(E^n), with n = index of hierarchical level.
Parameters
----------
mx_lvl : int
number of hierarchical levels, N = 2^mx_lvl
E : int
connection density fall off per level
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
K : int
number of connections present in output CIJ
'''
rng = get_rng(seed)
# make a stupid little template
t = np.ones((2, 2)) * 2
# compute N and cluster size
n = 2**mx_lvl
sz_cl -= 1
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
ee = mx_lvl - CIJ - sz_cl
ee = (ee > 0) * ee
prob = (1 / E**ee) * (np.ones((s, s)) - np.eye(s))
CIJ = (prob > rng.random_sample((n, n)))
# count connections
k = np.sum(CIJ)
return np.array(CIJ, dtype=int), k
def makerandCIJdegreesfixed(inv, outv, seed=None):
'''
This function generates a directed random network with a specified
in-degree and out-degree sequence.
Parameters
----------
inv : Nx1 np.ndarray
in-degree vector
outv : Nx1 np.ndarray
out-degree vector
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
Notes
-----
Necessary conditions include:
length(in) = length(out) = n
sum(in) = sum(out) = k
in(i), out(i) < n-1
in(i) + out(j) < n+2
in(i) + out(i) < n
No connections are placed on the main diagonal
The algorithm used in this function is not, technically, guaranteed to
terminate. If a valid distribution of in and out degrees is provided,
this function will find it in bounded time with probability
1-(1/(2*(k^2))). This turns out to be a serious problem when
computing infinite degree matrices, but offers good performance
otherwise.
'''
rng = get_rng(seed)
n = len(inv)
k = np.sum(inv)
in_inv = np.zeros((k,))
out_inv = np.zeros((k,))
i_in = 0
i_out = 0
for i in range(n):
in_inv[i_in:i_in + inv[i]] = i
out_inv[i_out:i_out + outv[i]] = i
i_in += inv[i]
i_out += outv[i]
CIJ = np.eye(n)
edges = np.array((out_inv, in_inv[rng.permutation(k)]))
# create CIJ and check for double edges and self connections
for i in range(k):
if CIJ[edges[0, i], edges[1, i]]:
tried = set()
while True:
if len(tried) == k:
raise BCTParamError('Could not resolve the given '
'in and out vectors')
switch = rng.randint(k)
while switch in tried:
switch = rng.randint(k)
if not (CIJ[edges[0, i], edges[1, switch]] or
CIJ[edges[0, switch], edges[1, i]]):
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
if switch < i:
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
t = edges[1, i]
edges[1, i] = edges[1, switch]
edges[1, switch] = t
break
tried.add(switch)
else:
CIJ[edges[0, i], edges[1, i]] = 1
CIJ -= np.eye(n)
return CIJ
def makerandCIJ_dir(n, k, seed=None):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
def makerandCIJ_und(n, k, seed=None):
'''
This function generates an undirected random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
undirected random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.triu(np.logical_not(np.eye(n))).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
def makeringlatticeCIJ(n, k, seed=None):
'''
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'''
rng = get_rng(seed)
# initialize
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
# fill in
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
# remove excess connections
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = rng.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ
def maketoeplitzCIJ(n, k, s, seed=None):
'''
This function generates a directed network with a Gaussian drop-off in
edge density with increasing distance from the main diagonal. There are
toroidal boundary counditions (i.e. no ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
s : float
standard deviation of toeplitz
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
from scipy import linalg, stats
pf = stats.norm.pdf(range(1, n), .5, s)
template = linalg.toeplitz(np.append((0,), pf), r=np.append((0,), pf))
template *= (k / np.sum(template))
CIJ = np.zeros((n, n))
itr = 0
while np.sum(CIJ) != k:
CIJ = (rng.random_sample((n, n)) < template)
itr += 1
if itr > 10000:
raise BCTParamError('Infinite loop was caught generating toeplitz '
'matrix. This means the matrix could not be resolved with the '
'specified parameters.')
return CIJ
def null_model_dir_sign(W, bin_swaps=5, wei_freq=.1, seed=None):
'''
This function randomizes an directed network with positive and
negative weights, while preserving the degree and strength
distributions. This function calls randmio_dir.m
Parameters
----------
W : NxN np.ndarray
directed weighted connection matrix
bin_swaps : int
average number of swaps in each edge binary randomization. Default
value is 5. 0 swaps implies no binary randomization.
wei_freq : float
frequency of weight sorting in weighted randomization. 0<=wei_freq<1.
wei_freq == 1 implies that weights are sorted at each step.
wei_freq == 0.1 implies that weights sorted each 10th step (faster,
default value)
wei_freq == 0 implies no sorting of weights (not recommended)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
W0 : NxN np.ndarray
randomized weighted connection matrix
R : 4-tuple of floats
Correlation coefficients between strength sequences of input and
output connection matrices, rpos_in, rpos_out, rneg_in, rneg_out
Notes
-----
The value of bin_swaps is ignored when binary topology is fully
connected (e.g. when the network has no negative weights).
Randomization may be better (and execution time will be slower) for
higher values of bin_swaps and wei_freq. Higher values of bin_swaps may
enable a more random binary organization, and higher values of wei_freq
may enable a more accurate conservation of strength sequences.
R are the correlation coefficients between positive and negative
in-strength and out-strength sequences of input and output connection
matrices and are used to evaluate the accuracy with which strengths
were preserved. Note that correlation coefficients may be a rough
measure of strength-sequence accuracy and one could implement more
formal tests (such as the Kolmogorov-Smirnov test) if desired.
'''
rng = get_rng(seed)
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Ap = (W > 0) # positive adjmat
if np.size(np.where(Ap.flat)) < (n * (n - 1)):
W_r = randmio_und_signed(W, bin_swaps, seed=rng)
Ap_r = W_r > 0
An_r = W_r < 0
else:
Ap_r = Ap
An_r = An
W0 = np.zeros((n, n))
for s in (1, -1):
if s == 1:
Acur = Ap
A_rcur = Ap_r
else:
Acur = An
A_rcur = An_r
Si = np.sum(W * Acur, axis=0) # positive in-strength
So = np.sum(W * Acur, axis=1) # positive out-strength
Wv = np.sort(W[Acur].flat) # sorted weights vector
i, j = np.where(A_rcur)
Lij, = np.where(A_rcur.flat) # weights indices
P = np.outer(So, Si)
if wei_freq == 0: # get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij]) # assign corresponding sorted
W0.flat[Lij[Oind]] = s * Wv # weight at this index
else:
wsize = np.size(Wv)
wei_period = np.round(1 / wei_freq) # convert frequency to period
lq = np.arange(wsize, 0, -wei_period, dtype=int)
for m in lq: # iteratively explore at this period
# get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij])
R = rng.permutation(m)[:np.min((m, wei_period))]
for q, r in enumerate(R):
# choose random index of sorted expected weight
o = Oind[r]
W0.flat[Lij[o]] = s * Wv[r] # assign corresponding weight
# readjust expected weighted probability for i[o],j[o]
f = 1 - Wv[r] / So[i[o]]
P[i[o], :] *= f
f = 1 - Wv[r] / So[j[o]]
P[j[o], :] *= f
# readjust in-strength of i[o]
So[i[o]] -= Wv[r]
# readjust out-strength of j[o]
Si[j[o]] -= Wv[r]
O = Oind[R]
# remove current indices from further consideration
Lij = np.delete(Lij, O)
i = np.delete(i, O)
j = np.delete(j, O)
Wv = np.delete(Wv, O)
rpos_in = np.corrcoef(np.sum(W * (W > 0), axis=0),
np.sum(W0 * (W0 > 0), axis=0))
rpos_ou = np.corrcoef(np.sum(W * (W > 0), axis=1),
np.sum(W0 * (W0 > 0), axis=1))
rneg_in = np.corrcoef(np.sum(-W * (W < 0), axis=0),
np.sum(-W0 * (W0 < 0), axis=0))
rneg_ou = np.corrcoef(np.sum(-W * (W < 0), axis=1),
np.sum(-W0 * (W0 < 0), axis=1))
return W0, (rpos_in[0, 1], rpos_ou[0, 1], rneg_in[0, 1], rneg_ou[0, 1])
def null_model_und_sign(W, bin_swaps=5, wei_freq=.1, seed=None):
'''
This function randomizes an undirected network with positive and
negative weights, while preserving the degree and strength
distributions. This function calls randmio_und.m
Parameters
----------
W : NxN np.ndarray
undirected weighted connection matrix
bin_swaps : int
average number of swaps in each edge binary randomization. Default
value is 5. 0 swaps implies no binary randomization.
wei_freq : float
frequency of weight sorting in weighted randomization. 0<=wei_freq<1.
wei_freq == 1 implies that weights are sorted at each step.
wei_freq == 0.1 implies that weights sorted each 10th step (faster,
default value)
wei_freq == 0 implies no sorting of weights (not recommended)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
W0 : NxN np.ndarray
randomized weighted connection matrix
R : 4-tuple of floats
Correlation coefficients between strength sequences of input and
output connection matrices, rpos_in, rpos_out, rneg_in, rneg_out
Notes
-----
The value of bin_swaps is ignored when binary topology is fully
connected (e.g. when the network has no negative weights).
Randomization may be better (and execution time will be slower) for
higher values of bin_swaps and wei_freq. Higher values of bin_swaps
may enable a more random binary organization, and higher values of
wei_freq may enable a more accurate conservation of strength
sequences.
R are the correlation coefficients between positive and negative
strength sequences of input and output connection matrices and are
used to evaluate the accuracy with which strengths were preserved.
Note that correlation coefficients may be a rough measure of
strength-sequence accuracy and one could implement more formal tests
(such as the Kolmogorov-Smirnov test) if desired.
'''
rng = get_rng(seed)
if not np.all(W == W.T):
raise BCTParamError("Input must be undirected")
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Ap = (W > 0) # positive adjmat
An = (W < 0) # negative adjmat
if np.size(np.where(Ap.flat)) < (n * (n - 1)):
W_r, eff = randmio_und_signed(W, bin_swaps, seed=rng)
Ap_r = W_r > 0
An_r = W_r < 0
else:
Ap_r = Ap
An_r = An
W0 = np.zeros((n, n))
for s in (1, -1):
if s == 1:
Acur = Ap
A_rcur = Ap_r
else:
Acur = An
A_rcur = An_r
S = np.sum(W * Acur, axis=0) # strengths
Wv = np.sort(W[np.where(np.triu(Acur))]) # sorted weights vector
i, j = np.where(np.triu(A_rcur))
Lij, = np.where(np.triu(A_rcur).flat) # weights indices
P = np.outer(S, S)
if wei_freq == 0: # get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij]) # assign corresponding sorted
W0.flat[Lij[Oind]] = s * Wv # weight at this index
else:
wsize = np.size(Wv)
wei_period = np.round(1 / wei_freq) # convert frequency to period
lq = np.arange(wsize, 0, -wei_period, dtype=int)
for m in lq: # iteratively explore at this period
# get indices of Lij that sort P
Oind = np.argsort(P.flat[Lij])
R = rng.permutation(m)[:np.min((m, wei_period))]
for q, r in enumerate(R):
# choose random index of sorted expected weight
o = Oind[r]
W0.flat[Lij[o]] = s * Wv[r] # assign corresponding weight
# readjust expected weighted probability for i[o],j[o]
f = 1 - Wv[r] / S[i[o]]
P[i[o], :] *= f
P[:, i[o]] *= f
f = 1 - Wv[r] / S[j[o]]
P[j[o], :] *= f
P[:, j[o]] *= f
# readjust strength of i[o]
S[i[o]] -= Wv[r]
# readjust strength of j[o]
S[j[o]] -= Wv[r]
O = Oind[R]
# remove current indices from further consideration
Lij = np.delete(Lij, O)
i = np.delete(i, O)
j = np.delete(j, O)
Wv = np.delete(Wv, R)
W0 = W0 + W0.T
rpos_in = np.corrcoef(np.sum(W * (W > 0), axis=0),
np.sum(W0 * (W0 > 0), axis=0))
rpos_ou = np.corrcoef(np.sum(W * (W > 0), axis=1),
np.sum(W0 * (W0 > 0), axis=1))
rneg_in = np.corrcoef(np.sum(-W * (W < 0), axis=0),
np.sum(-W0 * (W0 < 0), axis=0))
rneg_ou = np.corrcoef(np.sum(-W * (W < 0), axis=1),
np.sum(-W0 * (W0 < 0), axis=1))
return W0, (rpos_in[0, 1], rpos_ou[0, 1], rneg_in[0, 1], rneg_ou[0, 1])
def randmio_dir_connected(R, itr, seed=None):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions. The
function also ensures that the randomized network maintains
connectedness, the ability for every node to reach every other node in
the network. The input network for this function must be connected.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
# connectedness condition
if not (np.any((R[a, c], R[d, b], R[d, c])) and
np.any((R[c, a], R[b, d], R[b, a]))):
P = R[(a, c), :].copy()
P[0, b] = 0
P[0, d] = 1
P[1, d] = 0
P[1, b] = 1
PN = P.copy()
PN[0, a] = 1
PN[1, c] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
PN += P
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(PN[0, (b, c)]) and np.any(PN[1, (d, a)]):
break
# end connectedness testing
if rewire: # reassign edges
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d # reassign edge indices
j[e2] = b
eff += 1
break
att += 1
return R, eff
def randmio_dir(R, itr, seed=None):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
i.setflags(write=True)
j.setflags(write=True)
i[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_und_connected(R, itr, seed=None):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks. The function also ensures that the
randomized network maintains connectedness, the ability for every node
to reach every other node in the network. The input network for this
function must be connected.
NOTE the changes to the BCT matlab function of the same name
made in the Jan 2016 release
have not been propagated to this function because of substantially
decreased time efficiency in the implementation. Expect these changes
to be merged eventually.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
if number_of_components(R) > 1:
raise BCTParamError("Input is not connected")
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
rewire = True
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
# connectedness condition
if not (R[a, c] or R[b, d]):
P = R[(a, d), :].copy()
P[0, b] = 0
P[1, c] = 0
PN = P.copy()
PN[:, d] = 1
PN[:, a] = 1
while True:
P[0, :] = np.any(R[P[0, :] != 0, :], axis=0)
P[1, :] = np.any(R[P[1, :] != 0, :], axis=0)
P *= np.logical_not(PN)
if not np.all(np.any(P, axis=1)):
rewire = False
break
elif np.any(P[:, (b, c)]):
break
PN += P
# end connectedness testing
if rewire:
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_dir_signed(R, itr, seed=None):
'''
This function randomizes a directed weighted network with positively
and negatively signed connections, while preserving the positive and
negative degree distributions. In weighted networks by default the
function preserves the out-degree strength but not the in-strength
distributions
Parameters
---------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
itr *= n * (n - 1)
#maximal number of rewiring attempts per iter
max_attempts = n
#actual number of successful rewirings
eff = 0
#print(itr)
for it in range(int(itr)):
#print(it)
att = 0
while att <= max_attempts:
#select four distinct vertices
a, b, c, d = pick_four_unique_nodes_quickly(n, rng)
#a, b, c, d = rng.choice(n, 4)
#a, b, c, d = rng.permutation(4)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#print(np.sign(r0_ab), np.sign(r0_ad))
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = r0_ab
R[a, b] = r0_ad
R[c, b] = r0_cd
R[c, d] = r0_cb
eff += 1
break
att += 1
#print(eff)
return R, eff
def randmio_und(R, itr, seed=None):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1, e2 = rng.randint(k, size=(2,))
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff
def randmio_und_signed(R, itr, seed=None):
'''
This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
itr *= int(n * (n -1) / 2)
max_attempts = int(np.round(n / 2))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts:
a, b, c, d = pick_four_unique_nodes_quickly(n, rng)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = R[d, a] = r0_ab
R[a, b] = R[b, a] = r0_ad
R[c, b] = R[b, c] = r0_cd
R[c, d] = R[d, c] = r0_cb
eff += 1
break
att += 1
return R, eff
def randomize_graph_partial_und(A, B, maxswap, seed=None):
'''
A = RANDOMIZE_GRAPH_PARTIAL_UND(A,B,MAXSWAP) takes adjacency matrices A
and B and attempts to randomize matrix A by performing MAXSWAP
rewirings. The rewirings will avoid any spots where matrix B is
nonzero.
Parameters
----------
A : NxN np.ndarray
undirected adjacency matrix to randomize
B : NxN np.ndarray
mask; edges to avoid
maxswap : int
number of rewirings
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
A : NxN np.ndarray
randomized matrix
Notes
-----
1. Graph may become disconnected as a result of rewiring. Always
important to check.
2. A can be weighted, though the weighted degree sequence will not be
preserved.
3. A must be undirected.
'''
rng = get_rng(seed)
A = A.copy()
i, j = np.where(np.triu(A, 1))
i.setflags(write=True)
j.setflags(write=True)
m = len(i)
nswap = 0
while nswap < maxswap:
while True:
e1, e2 = rng.randint(m, size=(2,))
while e1 == e2:
e2 = rng.randint(m)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if rng.random_sample() > .5:
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (A[a, d] or A[c, b] or B[a, d] or B[c, b]): # avoid specified ixes
A[a, d] = A[a, b]
A[a, b] = 0
A[d, a] = A[b, a]
A[b, a] = 0
A[c, b] = A[c, d]
A[c, d] = 0
A[b, c] = A[d, c]
A[d, c] = 0
j[e1] = d
j[e2] = b # reassign edge indices
nswap += 1
return A
def randomizer_bin_und(R, alpha, seed=None):
'''
This function randomizes a binary undirected network, while preserving
the degree distribution. The function directly searches for rewirable
edge pairs (rather than trying to rewire edge pairs at random), and
hence avoids long loops and works especially well in dense matrices.
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
alpha : float
fraction of edges to rewire
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
'''
rng = get_rng(seed)
R = binarize(R, copy=True) # binarize
if not np.all(R == R.T):
raise BCTParamError(
'randomizer_bin_und only takes undirected matrices')
ax = len(R)
nr_poss_edges = (np.dot(ax, ax) - ax) / 2 # find maximum possible edges
savediag = np.diag(R)
np.fill_diagonal(R, np.inf) # replace diagonal with high value
# if there are more edges than non-edges, invert the matrix to reduce
# computation time. "invert" means swap meaning of 0 and 1, not matrix
# inversion
i, j = np.where(np.triu(R, 1))
k = len(i)
if k > nr_poss_edges / 2:
swap = True
R = np.logical_not(R)
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
else:
swap = False
# exclude fully connected nodes
fullnodes = np.where((np.sum(np.triu(R, 1), axis=0) +
np.sum(np.triu(R, 1), axis=1).T) == (ax - 1))
if np.size(fullnodes):
R[fullnodes, :] = 0
R[:, fullnodes] = 0
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
if k == 0 or k >= (nr_poss_edges - 1):
raise BCTParamError("No possible randomization")
for it in range(k):
if rng.random_sample() > alpha:
continue # rewire alpha% of edges
a = i[it]
b = j[it] # it is the chosen edge from a<->b
alliholes, = np.where(R[:, a] == 0) # find where each end can connect
alljholes, = np.where(R[:, b] == 0)
# we can only use edges with connection to neither node
i_intersect = np.intersect1d(alliholes, alljholes)
# find which of these nodes are connected
ii, jj = np.where(R[np.ix_(i_intersect, i_intersect)])
# if there is an edge to switch
if np.size(ii):
# choose one randomly
nummates = np.size(ii)
mate = rng.randint(nummates)
# randomly orient the second edge
if rng.random_sample() > .5:
c = i_intersect[ii[mate]]
d = i_intersect[jj[mate]]
else:
d = i_intersect[ii[mate]]
c = i_intersect[jj[mate]]
# swap the edges
R[a, b] = 0
R[c, d] = 0
R[b, a] = 0
R[d, c] = 0
R[a, c] = 1
R[b, d] = 1
R[c, a] = 1
R[d, b] = 1
# update the edge index (this is inefficient)
for m in range(k):
if i[m] == d and j[m] == c:
i.setflags(write=True)
j.setflags(write=True)
i[it] = c
j[m] = b
elif i[m] == c and j[m] == d:
i.setflags(write=True)
j.setflags(write=True)
j[it] = c
i[m] = b
# restore fullnodes
if np.size(fullnodes):
R[fullnodes, :] = 1
R[:, fullnodes] = 1
# restore inversion
if swap:
R = np.logical_not(R)
# restore diagonal
np.fill_diagonal(R, 0)
R += savediag
return np.array(R, dtype=int)
|
clbarnes/bctpy
|
bct/algorithms/reference.py
|
Python
|
gpl-3.0
| 58,967
|
[
"Gaussian"
] |
fed9adb0271e6be00f82bbb04a69043dedbc4364735b064f29e96bba1474fb39
|
# LIBTBX_SET_DISPATCHER_NAME dials.import
from __future__ import annotations
import logging
import pickle
from collections import namedtuple
import dxtbx.model.compare as compare
from dxtbx.imageset import ImageGrid, ImageSequence
from dxtbx.model.experiment_list import (
Experiment,
ExperimentList,
ExperimentListFactory,
)
from libtbx.phil import parse
from dials.util import Sorry, show_mail_handle_errors
from dials.util.multi_dataset_handling import generate_experiment_identifiers
from dials.util.options import flatten_experiments
logger = logging.getLogger("dials.command_line.import")
def _pickle_load(fh):
return pickle.load(fh, encoding="bytes")
help_message = """
This program is used to import image data files into a format that can be used
within dials. The program looks at the metadata for each image along with the
filenames to determine the relationship between sets of images. Once all the
images have been analysed, a experiments object is written to file which specifies
the relationship between files. For example if two sets of images which belong
to two rotation scans have been given, two image sequences will be saved. Images to
be processed are specified as command line arguments. Sometimes, there is a
maximum number of arguments that can be given on the command line and the number
of files may exceed this. In this case image filenames can be input on stdin
as shown in the examples below. Alternatively a template can be specified using
the template= parameter where the consecutive digits representing the image
numbers in the filenames are replaced with '#' characters.
The geometry can be set manually, either by using the reference_geometry=
parameter to specify an experiment list .expt file containing
the reference geometry, by using the mosflm_beam_centre= parameter to set
the Mosflm beam centre, or by specifying each variable to be overridden
using various geometry parameters.
Examples::
dials.import /data/directory-containing-images/
dials.import image_*.cbf
dials.import image_1_*.cbf image_2_*.cbf
dials.import directory/with/images
dials.import template=image_1_####.cbf
dials.import directory=directory/with/images
find . -name "image_*.cbf" | dials.import
dials.import << EOF
image_1.cbf
image_2.cbf
EOF
"""
# Create the phil parameters
phil_scope = parse(
"""
output {
experiments = imported.expt
.type = str
.help = "The output JSON or pickle file"
log = 'dials.import.log'
.type = str
.help = "The log filename"
compact = False
.type = bool
.help = "For JSON output use compact representation"
}
identifier_type = *uuid timestamp None
.type = choice
.help = "Type of unique identifier to generate."
input {
ignore_unhandled = True
.type = bool
.help = "Ignore unhandled input (e.g. log files)"
template = None
.type = str
.help = "The image sequence template"
.multiple = True
directory = None
.type = str
.help = "A directory with images"
.multiple = True
reference_geometry = None
.type = path
.help = "Experimental geometry from this models.expt "
"will override the geometry from the "
"image headers."
check_reference_geometry = True
.type = bool
.expert_level = 2
.help = "If True, assert the reference geometry is similar to"
"the image geometry"
use_beam_reference = True
.type = bool
.expert_level = 2
.help = "If True, the beam from reference_geometry will override "
"the beam from the image headers."
use_gonio_reference = True
.type = bool
.expert_level = 2
.help = "If True, the goniometer from reference_geometry will override "
"the goniometer from the image headers."
use_detector_reference = True
.type = bool
.expert_level = 2
.help = "If True, the detector from reference_geometry will override "
"the detector from the image headers."
allow_multiple_sequences = True
.type = bool
.help = "If False, raise an error if multiple sequences are found"
as_grid_scan = False
.type = bool
.help = "Import as grid scan"
grid_size = None
.type = ints(size=2)
.help = "If importing as a grid scan set the size"
}
include scope dials.util.options.format_phil_scope
include scope dials.util.options.geometry_phil_scope
lookup {
mask = None
.type = str
.help = "Apply a mask to the imported data"
gain = None
.type = str
.help = "Apply a gain to the imported data"
pedestal = None
.type = str
.help = "Apply a pedestal to the imported data"
dx = None
.type = str
.help = "Apply an x geometry offset"
"If both dx and dy are set then"
"OffsetParallaxCorrectedPxMmStrategy will be used"
dy = None
.type = str
.help = "Apply an y geometry offset"
"If both dx and dy are set then"
"OffsetParallaxCorrectedPxMmStrategy will be used"
}
""",
process_includes=True,
)
def _extract_or_read_imagesets(params):
"""
Return a list of ImageSets, importing them via alternative means if necessary.
The "Alternative Means" means via params.input.template or .directory,
if the images to import haven't been specified directly.
Args:
params: The phil.scope_extract from dials.import
Returns: A list of ImageSet objects
"""
# Get the experiments
experiments = flatten_experiments(params.input.experiments)
# Check we have some filenames
if len(experiments) == 0:
# FIXME Should probably make this smarter since it requires editing here
# and in dials.import phil scope
try:
format_kwargs = {
"dynamic_shadowing": params.format.dynamic_shadowing,
"multi_panel": params.format.multi_panel,
}
except AttributeError:
format_kwargs = None
# Check if a template has been set and print help if not, otherwise try to
# import the images based on the template input
if len(params.input.template) > 0:
experiments = ExperimentListFactory.from_templates(
params.input.template,
image_range=params.geometry.scan.image_range,
format_kwargs=format_kwargs,
)
if len(experiments) == 0:
raise Sorry(
"No experiments found matching template %s"
% params.input.experiments
)
elif len(params.input.directory) > 0:
experiments = ExperimentListFactory.from_filenames(
params.input.directory, format_kwargs=format_kwargs
)
if len(experiments) == 0:
raise Sorry(
"No experiments found in directories %s" % params.input.directory
)
else:
raise Sorry("No experiments found")
# TODO (Nick): This looks redundant as the experiments are immediately discarded.
# verify this, and remove if it is.
if params.identifier_type:
generate_experiment_identifiers(experiments, params.identifier_type)
# Get a list of all imagesets
imageset_list = experiments.imagesets()
# Return the experiments
return imageset_list
class ReferenceGeometryUpdater:
"""
A class to replace beam + detector with a reference
"""
def __init__(self, params):
"""
Load the reference geometry
"""
self.params = params
self.reference = self.load_reference_geometry(params)
def __call__(self, imageset):
"""
Replace with the reference geometry
"""
if self.params.input.check_reference_geometry:
# Check static detector items are the same
assert self.reference.detector.is_similar_to(
imageset.get_detector(), static_only=True
), "Reference detector model does not match input detector model"
# Set beam and detector
if self.params.input.use_beam_reference:
imageset.set_beam(self.reference.beam)
if self.params.input.use_detector_reference:
imageset.set_detector(self.reference.detector)
if self.params.input.use_gonio_reference:
imageset.set_goniometer(self.reference.goniometer)
return imageset
def load_reference_geometry(self, params):
"""
Load a reference geometry file
"""
# Load reference geometry
reference_detector = None
reference_beam = None
if params.input.reference_geometry is not None:
from dxtbx.serialize import load
experiments = None
experiments = load.experiment_list(
params.input.reference_geometry, check_format=False
)
assert experiments, "Could not import reference geometry"
assert len(experiments.detectors()) >= 1
assert len(experiments.beams()) >= 1
if len(experiments.detectors()) > 1:
raise Sorry(
"The reference geometry file contains %d detector definitions, but only a single definition is allowed."
% len(experiments.detectors())
)
if len(experiments.beams()) > 1:
raise Sorry(
"The reference geometry file contains %d beam definitions, but only a single definition is allowed."
% len(experiments.beams())
)
reference_detector = experiments.detectors()[0]
reference_beam = experiments.beams()[0]
reference_goniometer = experiments.goniometers()[0]
Reference = namedtuple("Reference", ["detector", "beam", "goniometer"])
return Reference(
detector=reference_detector,
beam=reference_beam,
goniometer=reference_goniometer,
)
class ManualGeometryUpdater:
"""
A class to update the geometry manually
"""
def __init__(self, params):
"""
Save the params
"""
self.params = params
def __call__(self, imageset):
"""
Override the parameters
"""
from copy import deepcopy
from dxtbx.imageset import ImageSequence, ImageSetFactory
from dxtbx.model import (
BeamFactory,
DetectorFactory,
GoniometerFactory,
ScanFactory,
)
if self.params.geometry.convert_sequences_to_stills:
imageset = ImageSetFactory.imageset_from_anyset(imageset)
for j in imageset.indices():
imageset.set_scan(None, j)
imageset.set_goniometer(None, j)
if not isinstance(imageset, ImageSequence):
if self.params.geometry.convert_stills_to_sequences:
imageset = self.convert_stills_to_sequence(imageset)
if isinstance(imageset, ImageSequence):
beam = BeamFactory.from_phil(self.params.geometry, imageset.get_beam())
detector = DetectorFactory.from_phil(
self.params.geometry, imageset.get_detector(), beam
)
goniometer = GoniometerFactory.from_phil(
self.params.geometry, imageset.get_goniometer()
)
scan = ScanFactory.from_phil(
self.params.geometry, deepcopy(imageset.get_scan())
)
i0, i1 = scan.get_array_range()
j0, j1 = imageset.get_scan().get_array_range()
if i0 < j0 or i1 > j1:
imageset = self.extrapolate_imageset(
imageset=imageset,
beam=beam,
detector=detector,
goniometer=goniometer,
scan=scan,
)
else:
imageset.set_beam(beam)
imageset.set_detector(detector)
imageset.set_goniometer(goniometer)
imageset.set_scan(scan)
else:
for i in range(len(imageset)):
beam = BeamFactory.from_phil(self.params.geometry, imageset.get_beam(i))
detector = DetectorFactory.from_phil(
self.params.geometry, imageset.get_detector(i), beam
)
goniometer = GoniometerFactory.from_phil(
self.params.geometry, imageset.get_goniometer(i)
)
scan = ScanFactory.from_phil(self.params.geometry, imageset.get_scan(i))
imageset.set_beam(beam, i)
imageset.set_detector(detector, i)
imageset.set_goniometer(goniometer, i)
imageset.set_scan(scan, i)
return imageset
def extrapolate_imageset(
self, imageset=None, beam=None, detector=None, goniometer=None, scan=None
):
from dxtbx.imageset import ImageSetFactory
first, last = scan.get_image_range()
sequence = ImageSetFactory.make_sequence(
template=imageset.get_template(),
indices=list(range(first, last + 1)),
format_class=imageset.get_format_class(),
beam=beam,
detector=detector,
goniometer=goniometer,
scan=scan,
format_kwargs=imageset.params(),
)
return sequence
def convert_stills_to_sequence(self, imageset):
from dxtbx.model import Scan
assert self.params.geometry.scan.oscillation is not None
beam = imageset.get_beam(index=0)
detector = imageset.get_detector(index=0)
goniometer = imageset.get_goniometer(index=0)
for i in range(1, len(imageset)):
b_i = imageset.get_beam(i)
d_i = imageset.get_detector(i)
g_i = imageset.get_goniometer(i)
assert (beam is None and b_i is None) or beam.is_similar_to(
imageset.get_beam(index=i),
wavelength_tolerance=self.params.input.tolerance.beam.wavelength,
direction_tolerance=self.params.input.tolerance.beam.direction,
polarization_normal_tolerance=self.params.input.tolerance.beam.polarization_normal,
polarization_fraction_tolerance=self.params.input.tolerance.beam.polarization_fraction,
)
assert (detector is None and d_i is None) or detector.is_similar_to(
imageset.get_detector(index=i),
fast_axis_tolerance=self.params.input.tolerance.detector.fast_axis,
slow_axis_tolerance=self.params.input.tolerance.detector.slow_axis,
origin_tolerance=self.params.input.tolerance.detector.origin,
)
assert (goniometer is None and g_i is None) or goniometer.is_similar_to(
imageset.get_goniometer(index=i),
rotation_axis_tolerance=self.params.input.tolerance.goniometer.rotation_axis,
fixed_rotation_tolerance=self.params.input.tolerance.goniometer.fixed_rotation,
setting_rotation_tolerance=self.params.input.tolerance.goniometer.setting_rotation,
)
oscillation = self.params.geometry.scan.oscillation
from dxtbx.imageset import ImageSetFactory
from dxtbx.sequence_filenames import template_regex_from_list
template, indices = template_regex_from_list(imageset.paths())
image_range = (min(indices), max(indices))
assert (image_range[1] + 1 - image_range[0]) == len(indices)
scan = Scan(image_range=image_range, oscillation=oscillation)
if template is None:
paths = [imageset.get_path(i) for i in range(len(imageset))]
assert len(set(paths)) == 1
template = paths[0]
new_sequence = ImageSetFactory.make_sequence(
template=template,
indices=indices,
format_class=imageset.reader().get_format_class(),
beam=beam,
detector=detector,
goniometer=goniometer,
scan=scan,
)
return new_sequence
class MetaDataUpdater:
"""
A class to manage updating the experiments metadata
"""
def __init__(self, params):
"""
Init the class
"""
from dials.util.options import geometry_phil_scope
self.params = params
# Create the geometry updater
self.update_geometry = []
update_order = []
# First add reference geometry is present
if self.params.input.reference_geometry is not None:
self.update_geometry.append(ReferenceGeometryUpdater(self.params))
update_order.append("Reference geometry")
# Then add manual geometry
working_phil = geometry_phil_scope.format(self.params)
diff_phil = geometry_phil_scope.fetch_diff(source=working_phil)
if diff_phil.as_str() != "":
self.update_geometry.append(ManualGeometryUpdater(self.params))
update_order.append("Manual geometry")
if len(update_order) > 0:
logger.info("")
logger.info("Applying input geometry in the following order:")
for i, item in enumerate(update_order, start=1):
logger.info(" %d. %s", i, item)
logger.info("")
def __call__(self, imageset_list):
"""
Transform the metadata
"""
# Import the lookup data
lookup = self.import_lookup_data(self.params)
# Convert all to ImageGrid
if self.params.input.as_grid_scan:
imageset_list = self.convert_to_grid_scan(imageset_list, self.params)
# Create the experiments
experiments = ExperimentList()
# Loop through imagesets
for imageset in imageset_list:
# Set the external lookups
imageset = self.update_lookup(imageset, lookup)
# Update the geometry
for updater in self.update_geometry:
imageset = updater(imageset)
# Check beam and detector are present
if imageset.get_beam() is None or imageset.get_detector() is None:
raise Sorry(
"""
Imageset contains no beam or detector model. This means you will be
unable to process your data.
Possible causes of this error are:
- A problem reading the images with one of the dxtbx format classes
- A lack of header information in the file itself.
You can override this by specifying the metadata as geometry parameters
"""
)
# Check if dx and dy are set
if [
imageset.external_lookup.dx.filename,
imageset.external_lookup.dy.filename,
].count(None) == 0:
imageset.update_detector_px_mm_data()
elif [
imageset.external_lookup.dx.filename,
imageset.external_lookup.dy.filename,
].count(None) == 1:
raise Sorry(
"""
Only 1 offset map is set. Need to set both dx and d
"""
)
# Append to new imageset list
if isinstance(imageset, ImageSequence):
if imageset.get_scan().is_still():
# make lots of experiments all pointing at one
# image set
# check if user has overridden the input - if yes, recall
# that these are in people numbers (1...) and are inclusive
if self.params.geometry.scan.image_range:
user_start, user_end = self.params.geometry.scan.image_range
offset = imageset.get_scan().get_array_range()[0]
start, end = user_start - 1, user_end
else:
start, end = imageset.get_scan().get_array_range()
offset = 0
for j in range(start, end):
subset = imageset[j - offset : j - offset + 1]
experiments.append(
Experiment(
imageset=imageset,
beam=imageset.get_beam(),
detector=imageset.get_detector(),
goniometer=imageset.get_goniometer(),
scan=subset.get_scan(),
crystal=None,
)
)
else:
# have just one experiment
experiments.append(
Experiment(
imageset=imageset,
beam=imageset.get_beam(),
detector=imageset.get_detector(),
goniometer=imageset.get_goniometer(),
scan=imageset.get_scan(),
crystal=None,
)
)
else:
for i in range(len(imageset)):
experiments.append(
Experiment(
imageset=imageset[i : i + 1],
beam=imageset.get_beam(i),
detector=imageset.get_detector(i),
goniometer=imageset.get_goniometer(i),
scan=imageset.get_scan(i),
crystal=None,
)
)
if self.params.identifier_type:
generate_experiment_identifiers(experiments, self.params.identifier_type)
# Return the experiments
return experiments
def update_lookup(self, imageset, lookup):
from dxtbx.format.image import ImageBool, ImageDouble
if lookup.size is not None:
d = imageset.get_detector()
assert len(lookup.size) == len(d), "Incompatible size"
for s, p in zip(lookup.size, d):
assert s == p.get_image_size()[::-1], "Incompatible size"
if lookup.mask.filename is not None:
imageset.external_lookup.mask.filename = lookup.mask.filename
imageset.external_lookup.mask.data = ImageBool(lookup.mask.data)
if lookup.gain.filename is not None:
imageset.external_lookup.gain.filename = lookup.gain.filename
imageset.external_lookup.gain.data = ImageDouble(lookup.gain.data)
if lookup.dark.filename is not None:
imageset.external_lookup.pedestal.filename = lookup.dark.filename
imageset.external_lookup.pedestal.data = ImageDouble(lookup.dark.data)
if lookup.dx.filename is not None:
imageset.external_lookup.dx.filename = lookup.dx.filename
imageset.external_lookup.dx.data = ImageDouble(lookup.dx.data)
if lookup.dy.filename is not None:
imageset.external_lookup.dy.filename = lookup.dy.filename
imageset.external_lookup.dy.data = ImageDouble(lookup.dy.data)
return imageset
def import_lookup_data(self, params):
"""
Get the lookup data
"""
# Check the lookup inputs
mask_filename = None
gain_filename = None
dark_filename = None
dx_filename = None
dy_filename = None
mask = None
gain = None
dark = None
dx = None
dy = None
lookup_size = None
if params.lookup.mask is not None:
mask_filename = params.lookup.mask
with open(mask_filename, "rb") as fh:
mask = _pickle_load(fh)
if not isinstance(mask, tuple):
mask = (mask,)
lookup_size = [m.all() for m in mask]
if params.lookup.gain is not None:
gain_filename = params.lookup.gain
with open(gain_filename, "rb") as fh:
gain = _pickle_load(fh)
if not isinstance(gain, tuple):
gain = (gain,)
if lookup_size is None:
lookup_size = [g.all() for g in gain]
else:
assert len(gain) == len(lookup_size), "Incompatible size"
for s, g in zip(lookup_size, gain):
assert s == g.all(), "Incompatible size"
if params.lookup.pedestal is not None:
dark_filename = params.lookup.pedestal
with open(dark_filename, "rb") as fh:
dark = _pickle_load(fh)
if not isinstance(dark, tuple):
dark = (dark,)
if lookup_size is None:
lookup_size = [d.all() for d in dark]
else:
assert len(dark) == len(lookup_size), "Incompatible size"
for s, d in zip(lookup_size, dark):
assert s == d.all(), "Incompatible size"
if params.lookup.dx is not None:
dx_filename = params.lookup.dx
with open(dx_filename, "rb") as fh:
dx = _pickle_load(fh)
if not isinstance(dx, tuple):
dx = (dx,)
if lookup_size is None:
lookup_size = [d.all() for d in dx]
else:
assert len(dx) == len(lookup_size), "Incompatible size"
for s, d in zip(lookup_size, dx):
assert s == d.all(), "Incompatible size"
if params.lookup.dy is not None:
dy_filename = params.lookup.dy
with open(dx_filename, "rb") as fh:
dy = _pickle_load(fh)
if not isinstance(dy, tuple):
dy = (dy,)
if lookup_size is None:
lookup_size = [d.all() for d in dy]
else:
assert len(dy) == len(lookup_size), "Incompatible size"
for s, d in zip(lookup_size, dy):
assert s == d.all(), "Incompatible size"
Lookup = namedtuple("Lookup", ["size", "mask", "gain", "dark", "dx", "dy"])
Item = namedtuple("Item", ["data", "filename"])
return Lookup(
size=lookup_size,
mask=Item(data=mask, filename=mask_filename),
gain=Item(data=gain, filename=gain_filename),
dark=Item(data=dark, filename=dark_filename),
dx=Item(data=dx, filename=dx_filename),
dy=Item(data=dy, filename=dy_filename),
)
def convert_to_grid_scan(self, imageset_list, params):
"""
Convert the imagesets to grid scans
"""
if params.input.grid_size is None:
raise Sorry("The input.grid_size parameter is required")
result = []
for imageset in imageset_list:
result.append(
ImageGrid.from_imageset(imageset.as_imageset(), params.input.grid_size)
)
return result
class ImageImporter:
"""Class to parse the command line options."""
def __init__(self, phil=phil_scope):
"""Set the expected options."""
from dials.util.options import ArgumentParser
# Create the option parser
usage = "dials.import [options] /path/to/image/files"
self.parser = ArgumentParser(
usage=usage,
sort_options=True,
phil=phil,
read_experiments_from_images=True,
epilog=help_message,
)
def import_image(self, args=None):
"""Parse the options."""
# Parse the command line arguments in two passes to set up logging early
params, options = self.parser.parse_args(
args=args, show_diff_phil=False, quick_parse=True
)
# Configure logging, if this is the main process
if __name__ == "__main__":
from dials.util import log
log.config(verbosity=options.verbose, logfile=params.output.log)
from dials.util.version import dials_version
logger.info(dials_version())
# Parse the command line arguments completely
if params.input.ignore_unhandled:
params, options, unhandled = self.parser.parse_args(
args=args, show_diff_phil=False, return_unhandled=True
)
# Remove any False values from unhandled (eliminate empty strings)
unhandled = [x for x in unhandled if x]
else:
params, options = self.parser.parse_args(args=args, show_diff_phil=False)
unhandled = None
# Log the diff phil
diff_phil = self.parser.diff_phil.as_str()
if diff_phil:
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
# Print a warning if something unhandled
if unhandled:
msg = "Unable to handle the following arguments:\n"
msg += "\n".join([" %s" % a for a in unhandled])
msg += "\n"
logger.warning(msg)
# Print help if no input
if len(params.input.experiments) == 0 and not (
params.input.template or params.input.directory
):
self.parser.print_help()
return
# Re-extract the imagesets to rebuild experiments from
imagesets = _extract_or_read_imagesets(params)
metadata_updater = MetaDataUpdater(params)
experiments = metadata_updater(imagesets)
# Compute some numbers
num_sweeps = 0
num_still_sequences = 0
num_stills = 0
num_images = 0
# importing a lot of experiments all pointing at one imageset should
# work gracefully
counted_imagesets = []
for e in experiments:
if e.imageset in counted_imagesets:
continue
if isinstance(e.imageset, ImageSequence):
if e.imageset.get_scan().is_still():
num_still_sequences += 1
else:
num_sweeps += 1
else:
num_stills += 1
num_images += len(e.imageset)
counted_imagesets.append(e.imageset)
format_list = {str(e.imageset.get_format_class()) for e in experiments}
# Print out some bulk info
logger.info("-" * 80)
for f in format_list:
logger.info(" format: %s", f)
logger.info(" num images: %d", num_images)
logger.info(" sequences:")
logger.info(" still: %d", num_still_sequences)
logger.info(" sweep: %d", num_sweeps)
logger.info(" num stills: %d", num_stills)
# Print out info for all experiments
for experiment in experiments:
# Print some experiment info - override the output of image range
# if appropriate
image_range = params.geometry.scan.image_range
if isinstance(experiment.imageset, ImageSequence):
imageset_type = "sequence"
else:
imageset_type = "stills"
logger.debug("-" * 80)
logger.debug(" format: %s", str(experiment.imageset.get_format_class()))
logger.debug(" imageset type: %s", imageset_type)
if image_range is None:
logger.debug(" num images: %d", len(experiment.imageset))
else:
logger.debug(" num images: %d", image_range[1] - image_range[0] + 1)
logger.debug("")
logger.debug(experiment.imageset.get_beam())
logger.debug(experiment.imageset.get_goniometer())
logger.debug(experiment.imageset.get_detector())
logger.debug(experiment.imageset.get_scan())
# Only allow a single sequence
if params.input.allow_multiple_sequences is False:
self.assert_single_sequence(experiments, params)
# Write the experiments to file
self.write_experiments(experiments, params)
def write_experiments(self, experiments, params):
"""
Output the experiments to file.
"""
if params.output.experiments:
logger.info("-" * 80)
logger.info("Writing experiments to %s", params.output.experiments)
experiments.as_file(
params.output.experiments, compact=params.output.compact
)
def assert_single_sequence(self, experiments, params):
"""
Print an error message if more than 1 sequence
"""
sequences = [
e.imageset for e in experiments if isinstance(e.imageset, ImageSequence)
]
if len(sequences) > 1:
# Print some info about multiple sequences
self.diagnose_multiple_sequences(sequences, params)
# Raise exception
raise Sorry(
"""
More than 1 sequence was found. Two things may be happening here:
1. There really is more than 1 sequence. If you expected this to be the
case, set the parameter allow_multiple_sequences=True. If you don't
expect this, then check the input to dials.import.
2. There may be something wrong with your image headers (for example,
the rotation ranges of each image may not match up). You should
investigate what went wrong, but you can force dials.import to treat
your images as a single sequence by using the template=image_####.cbf
parameter (see help).
"""
)
def diagnose_multiple_sequences(self, sequences, params):
"""
Print a diff between sequences.
"""
logger.info("")
for i in range(1, len(sequences)):
logger.info("=" * 80)
logger.info("Diff between sequence %d and %d", i - 1, i)
logger.info("")
self.print_sequence_diff(sequences[i - 1], sequences[i], params)
logger.info("=" * 80)
logger.info("")
@staticmethod
def print_sequence_diff(sequence1, sequence2, params):
"""
Print a diff between sequences.
"""
logger.info(
compare.sequence_diff(
sequence1, sequence2, tolerance=params.input.tolerance
)
)
@show_mail_handle_errors()
def run(args=None):
importer = ImageImporter()
importer.import_image(args)
if __name__ == "__main__":
run()
|
dials/dials
|
command_line/dials_import.py
|
Python
|
bsd-3-clause
| 34,891
|
[
"CRYSTAL"
] |
c1c34b7f3521c21e36a887c7c281ca41c27bd99b0af70b575c0f5db3b29ab4b4
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from skbio import DistanceMatrix
from skbio.stats.distance import bioenv
from skbio.stats.distance._bioenv import _scale
from skbio.util import get_data_path
class BIOENVTests(TestCase):
"""Results were verified with R 3.0.2 and vegan 2.0-10 (vegan::bioenv)."""
def setUp(self):
# The test dataset used here is a subset of the Lauber et al. 2009
# "88 Soils" dataset. It has been altered to exercise various aspects
# of the code, including (but not limited to):
#
# - order of distance matrix IDs and IDs in data frame (metadata) are
# not exactly the same
# - data frame has an extra sample that is not in the distance matrix
# - this extra sample has non-numeric and missing values in some of its
# cells
#
# Additional variations of the distance matrix and data frame are used
# to test different orderings of rows/columns, extra non-numeric data
# frame columns, etc.
#
# This dataset is also useful because it is non-trivial in size (6
# samples, 11 environment variables) and it includes positive/negative
# floats and integers in the data frame.
self.dm = DistanceMatrix.read(get_data_path('dm.txt'))
# Reordered rows and columns (i.e., different ID order). Still
# conceptually the same distance matrix.
self.dm_reordered = DistanceMatrix.read(
get_data_path('dm_reordered.txt'))
self.df = pd.read_csv(get_data_path('df.txt'), sep='\t', index_col=0)
# Similar to the above data frame, except that it has an extra
# non-numeric column, and some of the other rows and columns have been
# reordered.
self.df_extra_column = pd.read_csv(
get_data_path('df_extra_column.txt'), sep='\t', index_col=0)
# All columns in the original data frame (these are all numeric
# columns).
self.cols = self.df.columns.tolist()
# This second dataset is derived from vegan::bioenv's example dataset
# (varespec and varechem). The original dataset includes a site x
# species table (e.g., OTU table) and a data frame of environmental
# variables. Since the bioenv function defined here accepts a distance
# matrix, we use a Bray-Curtis distance matrix that is derived from the
# site x species table (this matches what is done by vegan::bioenv when
# provided an OTU table, using their default distance measure). The
# data frame only includes the numeric environmental variables we're
# interested in for these tests: log(N), P, K, Ca, pH, Al
self.dm_vegan = DistanceMatrix.read(
get_data_path('bioenv_dm_vegan.txt'))
self.df_vegan = pd.read_csv(
get_data_path('bioenv_df_vegan.txt'), sep='\t',
converters={0: str})
self.df_vegan.set_index('#SampleID', inplace=True)
# Load expected results.
self.exp_results = pd.read_csv(get_data_path('exp_results.txt'),
sep='\t', index_col=0)
self.exp_results_single_column = pd.read_csv(
get_data_path('exp_results_single_column.txt'), sep='\t',
index_col=0)
self.exp_results_different_column_order = pd.read_csv(
get_data_path('exp_results_different_column_order.txt'), sep='\t',
index_col=0)
self.exp_results_vegan = pd.read_csv(
get_data_path('bioenv_exp_results_vegan.txt'), sep='\t',
index_col=0)
def test_bioenv_all_columns_implicit(self):
# Test with all columns in data frame (implicitly).
obs = bioenv(self.dm, self.df)
assert_frame_equal(obs, self.exp_results)
# Should get the same results if order of rows/cols in distance matrix
# is changed.
obs = bioenv(self.dm_reordered, self.df)
assert_frame_equal(obs, self.exp_results)
def test_bioenv_all_columns_explicit(self):
# Test with all columns being specified.
obs = bioenv(self.dm, self.df, columns=self.cols)
assert_frame_equal(obs, self.exp_results)
# Test against a data frame that has an extra non-numeric column and
# some of the rows and columns reordered (we should get the same
# result since we're specifying the same columns in the same order).
obs = bioenv(self.dm, self.df_extra_column, columns=self.cols)
assert_frame_equal(obs, self.exp_results)
def test_bioenv_single_column(self):
obs = bioenv(self.dm, self.df, columns=['PH'])
assert_frame_equal(obs, self.exp_results_single_column)
def test_bioenv_different_column_order(self):
# Specifying columns in a different order will change the row labels in
# the results data frame as the column subsets will be reordered, but
# the actual results (e.g., correlation coefficients) shouldn't change.
obs = bioenv(self.dm, self.df, columns=self.cols[::-1])
assert_frame_equal(obs, self.exp_results_different_column_order)
def test_bioenv_no_side_effects(self):
# Deep copies of both primary inputs.
dm_copy = self.dm.copy()
df_copy = self.df.copy(deep=True)
bioenv(self.dm, self.df)
# Make sure we haven't modified the primary input in some way (e.g.,
# with scaling, type conversions, etc.).
self.assertEqual(self.dm, dm_copy)
assert_frame_equal(self.df, df_copy)
def test_bioenv_vegan_example(self):
# The correlation coefficient in the first row of the
# results (rho=0.2516) is different from the correlation coefficient
# computed by vegan (rho=0.2513). This seems to occur due to
# differences in numerical precision when calculating the Euclidean
# distances, which affects the rank calculations in Spearman
# (specifically, dealing with ties). The ranked distances end up being
# slightly different between vegan and our implementation because some
# distances are treated as ties in vegan but treated as distinct values
# in our implementation. This explains the difference in rho values. I
# verified that using Pearson correlation instead of Spearman on the
# same distances yields *very* similar results. Thus, the discrepancy
# seems to stem from differences when computing ranks/ties.
obs = bioenv(self.dm_vegan, self.df_vegan)
assert_frame_equal(obs, self.exp_results_vegan)
def test_bioenv_no_distance_matrix(self):
with self.assertRaises(TypeError):
bioenv('breh', self.df)
def test_bioenv_no_data_frame(self):
with self.assertRaises(TypeError):
bioenv(self.dm, None)
def test_bioenv_duplicate_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=self.cols + ['PH'])
def test_bioenv_no_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=[])
def test_bioenv_missing_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=self.cols + ['brofist'])
def test_bioenv_missing_distance_matrix_ids(self):
df = self.df[1:]
with self.assertRaises(ValueError):
bioenv(self.dm, df)
def test_bioenv_nans(self):
df = self.df.replace(53.9, np.nan)
with self.assertRaises(ValueError):
bioenv(self.dm, df)
def test_bioenv_nonnumeric_columns(self):
df = self.df.replace(2400, 'no cog yay')
with self.assertRaises(TypeError):
bioenv(self.dm, df)
with self.assertRaises(TypeError):
bioenv(self.dm, self.df_extra_column)
def test_scale_single_column(self):
df = pd.DataFrame([[1], [0], [2]], index=['A', 'B', 'C'],
columns=['foo'])
exp = pd.DataFrame([[0.0], [-1.0], [1.0]], index=['A', 'B', 'C'],
columns=['foo'])
obs = _scale(df)
assert_frame_equal(obs, exp)
def test_scale_multiple_columns(self):
# Floats and ints, including positives and negatives.
df = pd.DataFrame([[7.0, 400, -1],
[8.0, 530, -5],
[7.5, 450, 1],
[8.5, 810, -4]],
index=['A', 'B', 'C', 'D'],
columns=['pH', 'Elevation', 'negatives'])
exp = pd.DataFrame([[-1.161895, -0.805979, 0.453921],
[0.387298, -0.095625, -0.998625],
[-0.387298, -0.532766, 1.180194],
[1.161895, 1.434369, -0.635489]],
index=['A', 'B', 'C', 'D'],
columns=['pH', 'Elevation', 'negatives'])
obs = _scale(df)
assert_frame_equal(obs, exp)
def test_scale_no_variance(self):
df = pd.DataFrame([[-7.0, -1.2], [6.2, -1.2], [2.9, -1.2]],
index=['A', 'B', 'C'], columns=['foo', 'bar'])
with self.assertRaises(ValueError):
_scale(df)
if __name__ == '__main__':
main()
|
Kleptobismol/scikit-bio
|
skbio/stats/distance/tests/test_bioenv.py
|
Python
|
bsd-3-clause
| 9,846
|
[
"scikit-bio"
] |
26cc695dfa811a98255979fb9173a7baf0f4c6dd040c0d5bef241cca412b4d6c
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose representation training base code."""
import os
import time
from absl import flags
from absl import logging
import tensorflow as tf
from poem.core import data_utils
from poem.core import pipeline_utils
from poem.cv_mim import algorithms
from poem.cv_mim import pipelines
from poem.cv_mim import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('log_dir_path', None, 'Path to save checkpoints and logs.')
flags.mark_flag_as_required('log_dir_path')
flags.DEFINE_string('input_table', None,
'CSV of input tf.Example table pattern.')
flags.mark_flag_as_required('input_table')
flags.DEFINE_string('keypoint_profile_name_2d', 'LEGACY_2DCOCO13',
'Profile name for input 2D keypoints.')
flags.DEFINE_string(
'keypoint_profile_name_3d', 'LEGACY_3DH36M17',
'Profile name for input 3D keypoints '
'(H36M: LEGACY_3DH36M17; NTU-RGBD: 3DSTD13).')
# See `common_module.SUPPORTED_TRAINING_MODEL_INPUT_KEYPOINT_TYPES`.
flags.DEFINE_string(
'model_input_keypoint_type', '2D_INPUT_AND_3D_PROJECTION',
'Type of model input keypoints (H36M: 2D_INPUT_AND_3D_PROJECTION; NTU-RGBD:'
' 2D_INPUT).')
flags.DEFINE_enum('algorithm_type', 'DISENTANGLE',
algorithms.SUPPORTED_ALGORITHM_TYPES,
'Type of the algorithm used for training.')
flags.DEFINE_enum('fusion_op_type', 'MOE', algorithms.SUPPORTED_FUSION_OP_TYPES,
'Type of the fusion operation for encoder.')
flags.DEFINE_integer('pose_embedding_dim', 32,
'Dimension of the pose embedding.')
flags.DEFINE_integer('view_embedding_dim', 32,
'Dimension of the view embedding.')
flags.DEFINE_enum('embedder_type', 'POINT', ['POINT', 'GAUSSIAN'],
'Type of the embedder.')
flags.DEFINE_float('view_loss_weight', 5.0, 'Weight of view loss.')
flags.DEFINE_float('regularization_loss_weight', 1.0,
'Weight of (KL-divergence) regularization loss.')
flags.DEFINE_float('disentangle_loss_weight', 0.5,
'Weight of disentanglement loss.')
flags.DEFINE_integer(
'shuffle_buffer_size', 2097152,
'Input shuffle buffer size. A large number beneifts shuffling quality.')
flags.DEFINE_float('learning_rate', 2e-2, 'Initial learning rate.')
flags.DEFINE_integer('batch_size', 256, 'Batch size in terms of trainig.')
flags.DEFINE_integer('num_iterations', 5000000,
'Num of iterations in terms of trainig.')
flags.DEFINE_boolean('compile', True,
'Compiles functions for faster tf training.')
logging.set_verbosity('info')
logging.set_stderrthreshold('info')
def _validate(common_module):
"""Validates training configurations."""
# Validate flags.
validate_flag = common_module.validate
validate_flag(FLAGS.model_input_keypoint_type,
common_module.SUPPORTED_TRAINING_MODEL_INPUT_KEYPOINT_TYPES)
def run(input_dataset_class, common_module, keypoint_profiles_module,
input_example_parser_creator, keypoint_preprocessor_3d):
"""Runs training pipeline.
Args:
input_dataset_class: An input dataset class that matches input table type.
common_module: A Python module that defines common flags and constants.
keypoint_profiles_module: A Python module that defines keypoint profiles.
input_example_parser_creator: A function handle for creating data parser
function. If None, uses the default parser creator.
keypoint_preprocessor_3d: A function handle for preprocessing raw 3D
keypoints.
"""
_validate(common_module)
log_dir_path = FLAGS.log_dir_path
pipeline_utils.create_dir_and_save_flags(flags, log_dir_path,
'all_flags.train.json')
# Setup summary writer.
summary_writer = tf.summary.create_file_writer(
os.path.join(log_dir_path, 'train_logs'), flush_millis=10000)
# Setup configuration.
keypoint_profile_2d = keypoint_profiles_module.create_keypoint_profile_or_die(
FLAGS.keypoint_profile_name_2d)
keypoint_profile_3d = keypoint_profiles_module.create_keypoint_profile_or_die(
FLAGS.keypoint_profile_name_3d)
model = algorithms.get_algorithm(
algorithm_type=FLAGS.algorithm_type,
pose_embedding_dim=FLAGS.pose_embedding_dim,
view_embedding_dim=FLAGS.view_embedding_dim,
fusion_op_type=FLAGS.fusion_op_type,
view_loss_weight=FLAGS.view_loss_weight,
regularization_loss_weight=FLAGS.regularization_loss_weight,
disentangle_loss_weight=FLAGS.disentangle_loss_weight,
embedder_type=FLAGS.embedder_type)
optimizers = algorithms.get_optimizers(
algorithm_type=FLAGS.algorithm_type, learning_rate=FLAGS.learning_rate)
global_step = optimizers['encoder_optimizer'].iterations
ckpt_manager, _, _ = utils.create_checkpoint(
log_dir_path, **optimizers, model=model, global_step=global_step)
# Setup the training dataset.
dataset = pipelines.create_dataset_from_tables(
[FLAGS.input_table], [FLAGS.batch_size],
num_instances_per_record=2,
shuffle=True,
num_epochs=None,
drop_remainder=True,
keypoint_names_2d=keypoint_profile_2d.keypoint_names,
keypoint_names_3d=keypoint_profile_3d.keypoint_names,
shuffle_buffer_size=FLAGS.shuffle_buffer_size,
dataset_class=input_dataset_class,
input_example_parser_creator=input_example_parser_creator)
def train_one_iteration(inputs):
"""Trains the model for one iteration.
Args:
inputs: A dictionary for training inputs.
Returns:
The training loss for this iteration.
"""
_, side_outputs = pipelines.create_model_input(
inputs, FLAGS.model_input_keypoint_type, keypoint_profile_2d,
keypoint_profile_3d)
keypoints_2d = side_outputs[common_module.KEY_PREPROCESSED_KEYPOINTS_2D]
keypoints_3d, _ = keypoint_preprocessor_3d(
inputs[common_module.KEY_KEYPOINTS_3D],
keypoint_profile_3d,
normalize_keypoints_3d=True)
keypoints_2d, keypoints_3d = data_utils.shuffle_batches(
[keypoints_2d, keypoints_3d])
return model.train((keypoints_2d, keypoints_3d), **optimizers)
if FLAGS.compile:
train_one_iteration = tf.function(train_one_iteration)
record_every_n_steps = min(100, FLAGS.num_iterations)
save_ckpt_every_n_steps = min(10000, FLAGS.num_iterations)
with summary_writer.as_default():
with tf.summary.record_if(global_step % record_every_n_steps == 0):
start = time.time()
for inputs in dataset:
if global_step >= FLAGS.num_iterations:
break
model_losses = train_one_iteration(inputs)
duration = time.time() - start
start = time.time()
for tag, losses in model_losses.items():
for name, loss in losses.items():
tf.summary.scalar(
'train/{}/{}'.format(tag, name), loss, step=global_step)
for tag, optimizer in optimizers.items():
tf.summary.scalar(
'train/{}_learning_rate'.format(tag),
optimizer.lr,
step=global_step)
tf.summary.scalar('train/batch_time', duration, step=global_step)
tf.summary.scalar('global_step/sec', 1 / duration, step=global_step)
if global_step % record_every_n_steps == 0:
logging.info('Iter[{}/{}], {:.6f}s/iter, loss: {:.4f}'.format(
global_step.numpy(), FLAGS.num_iterations, duration,
model_losses['encoder']['total_loss'].numpy()))
# Save checkpoint.
if global_step % save_ckpt_every_n_steps == 0:
ckpt_manager.save(checkpoint_number=global_step)
logging.info('Checkpoint saved at step %d.', global_step.numpy())
|
google-research/google-research
|
poem/cv_mim/train_base.py
|
Python
|
apache-2.0
| 8,343
|
[
"Gaussian",
"MOE"
] |
f608d7b4fbc76575a8efa6f1ca932f8857f87945558530f0eb07ed22647e3cec
|
""" 2-input XOR example using Izhikevich's spiking neuron model. """
from __future__ import print_function
import os
import matplotlib.pyplot as plt
from matplotlib import patches
from neatsociety import population, iznn, visualize
from neatsociety.config import Config
# Network inputs and expected outputs.
xor_inputs = ((0, 0), (0, 1), (1, 0), (1, 1))
xor_outputs = (0, 1, 1, 0)
# Maximum amount of simulated time (in milliseconds) to wait for the network to produce an output.
max_time = 50.0
# Parameters for "fast spiking" Izhikevitch neurons, simulation time step 0.25 millisecond.
iz_params = [0.1, 0.2, -65.0, 2.0, 0.25]
def compute_output(t0, t1):
'''Compute the network's output based on the "time to first spike" of the two output neurons.'''
if t0 is None or t1 is None:
# If one of the output neurons failed to fire within the allotted time,
# give a response which produces a large error.
return -1.0
else:
# If the output neurons fire within 1.0 milliseconds of each other,
# the output is 1, and if they fire more than 11 milliseconds apart,
# the output is 0, with linear interpolation between 1 and 11 milliseconds.
response = 1.1 - 0.1 * abs(t0 - t1)
return max(0.0, min(1.0, response))
def simulate(genome):
# Create a network of Izhikevitch neurons based on the given genome.
net = iznn.create_phenotype(genome, *iz_params)
dt = iz_params[-1]
sum_square_error = 0.0
simulated = []
for inputData, outputData in zip(xor_inputs, xor_outputs):
neuron_data = {}
for i, n in net.neurons.items():
neuron_data[i] = []
# Reset the network, apply the XOR inputs, and run for the maximum allowed time.
net.reset()
net.set_inputs(inputData)
t0 = None
t1 = None
v0 = None
v1 = None
num_steps = int(max_time / dt)
for j in range(num_steps):
t = dt * j
output = net.advance()
# Capture the time and neuron membrane potential for later use if desired.
for i, n in net.neurons.items():
neuron_data[i].append((t, n.v))
# Remember time and value of the first output spikes from each neuron.
if t0 is None and output[0] > 0:
t0, v0 = neuron_data[net.outputs[0]][-2]
if t1 is None and output[1] > 0:
t1, v1 = neuron_data[net.outputs[1]][-2]
response = compute_output(t0, t1)
sum_square_error += (response - outputData) ** 2
simulated.append((inputData, outputData, t0, t1, v0, v1, neuron_data))
return sum_square_error, simulated
def eval_fitness(genomes):
for genome in genomes:
sum_square_error, simulated = simulate(genome)
genome.fitness = 1 - sum_square_error
def run():
# Load the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config = Config(os.path.join(local_dir, 'xor2_config'))
# For this network, we use two output neurons and use the difference between
# the "time to first spike" to determine the network response. There are
# probably a great many different choices one could make for an output encoding,
# and this choice may not be the best for tackling a real problem.
config.output_nodes = 2
pop = population.Population(config)
pop.run(eval_fitness, 200)
print('Number of evaluations: {0}'.format(pop.total_evaluations))
# Visualize the winner network and plot statistics.
winner = pop.statistics.best_genome()
node_names = {0: 'A', 1: 'B', 2: 'Out1', 3: 'Out2'}
visualize.draw_net(winner, view=True, node_names=node_names)
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
# Verify network output against training data.
print('\nBest network output:')
net = iznn.create_phenotype(winner, *iz_params)
sum_square_error, simulated = simulate(winner)
# Create a plot of the traces out to the max time for each set of inputs.
plt.figure(figsize=(12, 12))
for r, (inputData, outputData, t0, t1, v0, v1, neuron_data) in enumerate(simulated):
response = compute_output(t0, t1)
print("{0!r} expected {1:.3f} got {2:.3f}".format(inputData, outputData, response))
axes = plt.subplot(4, 1, r + 1)
plt.title("Traces for XOR input {{{0:.1f}, {1:.1f}}}".format(*inputData), fontsize=12)
for i, s in neuron_data.items():
if i in net.outputs:
t, v = zip(*s)
plt.plot(t, v, "-", label="neuron {0:d}".format(i))
# Circle the first peak of each output.
circle0 = patches.Ellipse((t0, v0), 1.0, 10.0, color='r', fill=False)
circle1 = patches.Ellipse((t1, v1), 1.0, 10.0, color='r', fill=False)
axes.add_artist(circle0)
axes.add_artist(circle1)
plt.ylabel("Potential (mv)", fontsize=10)
plt.ylim(-100, 50)
plt.tick_params(labelsize=8)
plt.grid()
plt.xlabel("Time (in ms)", fontsize=10)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig("traces.png", dpi=90)
plt.show()
if __name__ == '__main__':
run()
|
machinebrains/neat-python
|
examples/xor/xor2_spiking.py
|
Python
|
bsd-3-clause
| 5,293
|
[
"NEURON"
] |
70a4153eb9bd4cc72b0d014d5193c906b0981edc11d63774a6e58456703a6779
|
#! /usr/bin/env python
# This version uses NEST's RandomConvergentConnect functions.
import cynest as nest
import cynest.raster_plot
import time
from numpy import exp
nest.ResetKernel()
startbuild = time.time()
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
# Parameters for asynchronous irregular firing
g = 5.0
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
order = 2500
NE = 4*order
NI = 1*order
N_neurons = NE+NI
N_rec = 50 # record from 50 neurons
CE = int(epsilon*NE) # number of excitatory synapses per neuron
CI = int(epsilon*NI) # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
# Initialize the parameters of the integrate and fire neuron
tauMem = 20.0
theta = 20.0
J = 0.1 # postsynaptic amplitude in mV
J_ex = J
J_in = -g*J_ex
nu_th = theta/(J*CE*tauMem)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
nest.SetKernelStatus({"resolution": dt, "print_time": True})
print("Building network")
neuron_params= {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
nest.SetDefaults("iaf_psc_delta", neuron_params)
nodes_ex=nest.Create("iaf_psc_delta",NE)
nodes_in=nest.Create("iaf_psc_delta",NI)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
noise=nest.Create("poisson_generator")
espikes=nest.Create("spike_detector")
ispikes=nest.Create("spike_detector")
nest.SetStatus([espikes],[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True}])
nest.SetStatus([ispikes],[{"label": "brunel-py-in",
"withtime": True,
"withgid": True}])
print("Connecting devices.")
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
nest.DivergentConnect(noise,nodes_ex,model="excitatory")
nest.DivergentConnect(noise,nodes_in,model="excitatory")
nest.ConvergentConnect(list(range(1,N_rec+1)),espikes,model="excitatory")
nest.ConvergentConnect(list(range(NE+1,NE+1+N_rec)),ispikes,model="excitatory")
print("Connecting network.")
# We now iterate over all neuron IDs, and connect the neuron to
# the sources from our array. The first loop connects the excitatory neurons
# and the second loop the inhibitory neurons.
print("Excitatory connections")
nest.RandomConvergentConnect(nodes_ex, nodes_ex+nodes_in, CE,model="excitatory")
print("Inhibitory connections")
nest.RandomConvergentConnect(nodes_in, nodes_ex+nodes_in, CI,model="inhibitory")
endbuild=time.time()
print("Simulating.")
nest.Simulate(simtime)
endsimulate= time.time()
events_ex = nest.GetStatus(espikes,"n_events")[0]
rate_ex = events_ex/simtime*1000.0/N_rec
events_in = nest.GetStatus(ispikes,"n_events")[0]
rate_in = events_in/simtime*1000.0/N_rec
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
print("Brunel network simulation (Python)")
print("Number of neurons :", N_neurons)
print("Number of synapses:", num_synapses)
print(" Exitatory :", int(CE*N_neurons)+N_neurons)
print(" Inhibitory :", int(CI*N_neurons))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
nest.raster_plot.from_device(espikes, hist=True)
nest.raster_plot.show()
|
QJonny/CyNest
|
cynest/examples/brunel-delta-nest.py
|
Python
|
gpl-2.0
| 3,818
|
[
"NEURON"
] |
e0b853fc7554ce37fe17c865ed7989f4b02730ae9ea5e1ee39331467d169b8fa
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
gBaseLocalSiteSection = "/LocalSite"
def gridEnv():
"""
Return location of gridenv file to get a UI environment
"""
return gConfig.getValue(cfgPath(gBaseLocalSiteSection, "GridEnv"), "")
|
ic-hep/DIRAC
|
src/DIRAC/ConfigurationSystem/Client/Helpers/Local.py
|
Python
|
gpl-3.0
| 428
|
[
"DIRAC"
] |
b6f49e6f640ba0636205ea9de6604d5055ff3b6b02c4cc4050327d49d7e1c44f
|
#!/usr/bin/env python
import logging
import json
import time
import multiprocessing
import Queue
from cbtestlib.membase.api.rest_client import RestConnection
from cbtestlib.membase.api.exception import ServerUnavailableException
from tabula.table import Table
from tabula.section import Section
from seriesly import Seriesly
import seriesly.exceptions
from metadata.visit import retrieve_meta
from paint import TABULA_CONV_FUNCS, TABULA_DECO_FUNCS
from server import Server
from mc_source import MemcachedSource
from mc_collector import MemcachedCollector
from json_handler import JsonHandler
from seriesly_handler import SerieslyHandler
from data_helper import DataHelper
# configuration for each tabula.section
SECTION_CONFIG = {"settings": {"id": 0,
"show_row_hdrs": False,
"show_col_hdrs": False,
"show_col_hdr_in_cell": True},
"storage": {"id": 1,
"show_row_hdrs": False,
"show_col_hdrs": False,
"show_col_hdr_in_cell": True},
"buckets": {"id": 2,
"show_row_hdrs": False,
"show_col_hdrs": True,
"show_col_hdr_in_cell": False},
"nodes": {"id": 3,
"show_row_hdrs": False,
"show_col_hdrs": True,
"show_col_hdr_in_cell": False},
"Memory Stats": {"id": 4,
"show_row_hdrs": False,
"show_col_hdrs": True,
"show_col_hdr_in_cell": False}}
tbl = Table("cbtop", sep=" ")
cur_row = {} # {sec_nam: row name}
mc_jobs = multiprocessing.Queue(1)
mc_stats = multiprocessing.Queue(20)
store = None
class SerieslyStore(object):
def __init__(self, host, dbslow, dbfast):
self.slow = {}
self.fast = {}
self.dbslow = dbslow
self.dbfast = dbfast
self.seriesly = Seriesly(host=host)
try:
dbs = self.seriesly.list_dbs()
except seriesly.exceptions.ConnectionError, e:
logging.error("unable to connect to seriesly server: %s" % e)
return
if dbslow not in dbs:
self.seriesly.create_db(dbslow)
if dbfast not in dbs:
self.seriesly.create_db(dbfast)
def clear(self):
self.slow = {}
self.fast = {}
def add_fast(self, key, val):
self.fast[key] = val
def add_slow(self, key, val):
self.slow[key] = val
def persist(self):
try:
if self.slow:
self.seriesly[self.dbslow].append(self.slow)
if self.fast:
self.seriesly[self.dbfast].append(self.fast)
except seriesly.exceptions.ConnectionError, e:
logging.error("unable to connect to seriesly server: %s" % e)
return False
return True
def _show_stats(key, val, meta_inf):
"""
Show stats on the ascii table
"""
if not tbl or not isinstance(tbl, Table):
return False
if not meta_inf or not "section" in meta_inf:
logging.debug("unable to show data: key %s, val %s, invalid meta info"
% (key, val))
return False
# ok, not deal with unicode for now
sec_nam = str(meta_inf["section"])
val = str(val)
section = tbl.get_section(sec_nam)
if not section:
if sec_nam in SECTION_CONFIG:
config = SECTION_CONFIG[sec_nam]
section = Section(sec_nam, config["id"],
conv_funcs=TABULA_CONV_FUNCS,
deco_funcs=TABULA_DECO_FUNCS)
section.config(config["show_row_hdrs"],
config["show_col_hdrs"],
config["show_col_hdr_in_cell"])
else:
return False
tbl.add_section(section)
if "col" in meta_inf:
col = str(meta_inf["col"])
else:
col = str(key)
if "new_row" in meta_inf:
# create a new row using the col name
section.add_cell(val, col, val, "S50", meta=meta_inf)
cur_row[sec_nam] = val
return True
if not sec_nam in cur_row:
logging.debug("stats %s is not associated with a section" % key)
return True
row = cur_row[sec_nam]
section.add_cell(row, col, val, type="S50", meta=meta_inf)
return True
def show_all_stats(stats, meta):
if not isinstance(stats, dict) or not isinstance(meta, dict):
logging.error("failed to show all stats : invalid data")
return False
for key, val in stats.iteritems():
if not key in meta:
continue
_show_stats(key, val, meta[key])
def store_fast(root, parents, data, meta, coll,
key, val, meta_val, meta_inf, level):
"""Store time-series data into fast-changing database"""
store.add_fast(key, val)
return _show_stats(key, val, meta_inf)
def store_slow(root, parents, data, meta, coll,
key, val, meta_val, meta_inf, level):
"""Store time-series data into slow-changing database"""
store.add_slow(key, val)
return _show_stats(key, val, meta_inf)
def url_before(context, path):
return context, path
def url_after(context, path, root):
pass
def retrieve_data(context, path):
"""Retrieve json data from a couchbase server through REST calls"""
# TODO: use cbtestlib
server = Server(context.get("host", "127.0.0.1"))
rest = RestConnection(server)
api = rest.baseUrl + path
try:
status, content, header = rest._http_request(api) # TODO: expose
except ServerUnavailableException, e:
logging.error("unable to retrieve data from %s: %s" % (server, e))
return retrieve_meta(context, path)
if status:
return json.loads(content)
return retrieve_meta(context, path)
def collect_mc_stats(root, parents, data, meta, coll,
key, val, meta_val, meta_inf, level=0):
"""
Collect memcached stats
Dump time series data a json snapshot
"""
if not isinstance(val, list):
logging.error(
"unable to collect mc stats: val must be a list - %s" % val)
return False
try:
stats, meta = mc_stats.get(block=False)
show_all_stats(stats, meta)
except Queue.Empty:
pass
try:
mc_jobs.put([root, parents, val], block=False)
return True
except Queue.Full:
logging.debug("unable to collect mcstats : queue is full")
return False
def mc_worker(jobs, stats, ctl, store, timeout=5):
logging.info("mc_worker started")
while ctl["run_ok"]:
try:
root, parents, val = jobs.get(block=True, timeout=timeout)
except Queue.Empty:
logging.debug("mc_worker hasn't received jobs for %s seconds"
% timeout)
continue
start = time.time()
for server in val:
try:
ip, port = server.split(":")
except (ValueError, AttributeError), e:
logging.error("unable to collect mc stats from %s : %s"
% (server, e))
continue
mc_server = Server(ip)
# get bucket name from root and parent nodes
bucket = DataHelper.get_bucket(root, parents)
# initialize memcached source
mc_source = MemcachedSource(mc_server, bucket)
# initialize handlers to dump data json doc
j_handler = JsonHandler()
s_handler = SerieslyHandler(store)
# collect data from source and emit to handlers
mc_coll = MemcachedCollector([mc_source], [j_handler, s_handler])
mc_coll.collect()
mc_coll.emit()
stats.put([mc_source.fast, mc_source.meta], block=True)
stats.put([mc_source.slow, mc_source.meta], block=True)
delta = time.time() - start
logging.debug("collected mc stats from %s, took %s seconds"
% (val, delta))
if delta < timeout:
logging.debug("mc_worker sleep for %s seconds" % (timeout - delta))
time.sleep(timeout - delta)
logging.info("mc_worker stopped")
|
couchbaselabs/cbtop
|
libcbtop/visit_cb.py
|
Python
|
apache-2.0
| 8,502
|
[
"VisIt"
] |
80656020fb4e69b24d727edd52151ad06ce0b1173caa43315d91e525ee0faa52
|
from hyperopt import fmin, tpe, hp
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
n_steps = 1000
platform_name = "CUDA"
precision = "mixed"
#collision_rate = 5.0 / unit.picoseconds
collision_rate = None
sysname = "src"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
positions, boxes, state = lb_loader.equilibrate(testsystem, temperature, langevin_timestep, steps=equil_steps, minimize=True, use_hmc=False, precision=precision, platform_name=platform_name)
max_evals = 250
steps_per_hmc = hp.quniform("steps_per_hmc", 8, 50, 1)
timestep = hp.uniform("timestep", 0.05, 1.25)
def inner_objective(args):
steps_per_hmc, timestep = args
print("*" * 80)
print("steps=%d, timestep=%f, extra_chances=%d" % (steps_per_hmc, timestep, 0))
current_timestep = timestep * u.femtoseconds
steps_per_hmc = int(steps_per_hmc)
integrator = hmc_integrators.GHMCIntegrator(temperature, steps_per_hmc=steps_per_hmc, timestep=current_timestep, collision_rate=collision_rate)
simulation = lb_loader.build(testsystem, integrator, temperature, precision=precision, platform_name=platform_name)
simulation.integrator.step(n_steps)
return integrator, simulation # Have to pass simulation to keep it from being garbage collected
def objective(args):
integrator, simulation = inner_objective(args)
print("eff_ns_per_day=%f, eff_dt=%f" % (integrator.effective_ns_per_day, integrator.effective_timestep / u.femtoseconds))
return -1.0 * integrator.effective_ns_per_day
space = [steps_per_hmc, timestep]
best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=max_evals, verbose=1)
integrator, simulation = inner_objective((best["steps_per_hmc"], best["timestep"]))
print(best)
print(integrator.effective_ns_per_day, integrator.effective_timestep)
"""
In [5]: integrator
Out[5]: <openmmtools.hmc_integrators.ghmc.GHMCIntegrator; proxy of <Swig Object of type 'OpenMM::CustomIntegrator *' at 0x7fc090051ea0> >
In [6]: integrator.acceptance_rate
Out[6]: 0.279
In [7]: integrator.effective_ns_per_day
Out[7]: 4.033138027747178
In [8]: integrator.timestep
Out[8]: Quantity(value=0.5779193519733394, unit=femtosecond)
In [9]: integrator.steps_per_hmc
Out[9]: 9
"""
|
kyleabeauchamp/HMCNotes
|
code/optimize/test_optimize_hyper_ghmc.py
|
Python
|
gpl-2.0
| 2,473
|
[
"OpenMM"
] |
0274afe98e36b784baa7a5a4e66e8c8aa97a36d0c2e7120bce1e3a04a4bc35b8
|
import warnings
import operator
import sys
import functools as ft
from functools import reduce
import numpy as np
import xarray as xr
import pandas as pd
import dask.array as dsar
from dask import delayed
import scipy.signal as sps
import scipy.linalg as spl
from .detrend import detrend as _detrend
__all__ = [
"fft",
"ifft",
"dft",
"idft",
"power_spectrum",
"cross_spectrum",
"cross_phase",
"isotropize",
"isotropic_power_spectrum",
"isotropic_cross_spectrum",
"isotropic_powerspectrum",
"isotropic_crossspectrum",
"fit_loglog",
]
def _fft_module(da):
if da.chunks:
return dsar.fft
else:
return np.fft
def _apply_window(da, dims, window_type="hann"):
"""Creating windows in dimensions dims."""
if window_type == True:
window_type = "hann"
warnings.warn(
"Please provide the name of window adhering to scipy.signal.windows. The boolean option will be deprecated in future releases.",
FutureWarning,
)
elif window_type not in [
"hann",
"hamming",
"kaiser",
"tukey",
"parzen",
"taylor",
"boxcar",
"barthann",
"bartlett",
"blackman",
"blackmanharris",
"bohman",
"chebwin",
"cosine",
"dpss",
"exponential",
"flattop",
"gaussian",
"general_cosine",
"general_gaussian",
"general_hamming",
"triang",
"nuttall",
]:
raise NotImplementedError(
"Window type {window_type} not supported. Please adhere to scipy.signal.windows for naming convention."
)
if dims is None:
dims = list(da.dims)
else:
if isinstance(dims, str):
dims = [dims]
scipy_win_func = getattr(sps.windows, window_type)
if da.chunks:
def dask_win_func(n, sym=False):
return dsar.from_delayed(
delayed(scipy_win_func, pure=True)(n, sym=sym), (n,), float
)
win_func = dask_win_func
else:
win_func = scipy_win_func
windows = [
xr.DataArray(
win_func(len(da[d]), sym=False), dims=da[d].dims, coords=da[d].coords
)
for d in dims
]
return reduce(operator.mul, windows[::-1]), da * reduce(operator.mul, windows[::-1])
def _stack_chunks(da, dim, suffix="_segment"):
"""Reshape a DataArray so there is only one chunk along dimension `dim`"""
data = da.data
attr = da.attrs
newdims = []
newcoords = {}
newshape = []
for d in da.dims:
if d in dim:
axis_num = da.get_axis_num(d)
if np.diff(da.chunks[axis_num]).sum() != 0:
raise ValueError("Chunk lengths need to be the same.")
n = len(da[d])
chunklen = da.chunks[axis_num][0]
coord_rs = da[d].data.reshape((int(n / chunklen), int(chunklen)))
newdims.append(d + suffix)
newdims.append(d)
newshape.append(int(n / chunklen))
newshape.append(int(chunklen))
newcoords[d + suffix] = range(int(n / chunklen))
newcoords[d] = coord_rs[0]
else:
newdims.append(d)
newshape.append(len(da[d]))
newcoords[d] = da[d].data
da = xr.DataArray(
data.reshape(newshape), dims=newdims, coords=newcoords, attrs=attr
)
return da
def _freq(N, delta_x, real, shift):
# calculate frequencies from coordinates
# coordinates are always loaded eagerly, so we use numpy
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
# Discard negative frequencies from transform along last axis to be
# consistent with np.fft.rfftn
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(np.fft.rfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _ifreq(N, delta_x, real, shift):
# calculate frequencies from coordinates
# coordinates are always loaded eagerly, so we use numpy
if real is None:
fftfreq = [np.fft.fftfreq] * len(N)
else:
irfftfreq = lambda Nx, dx: np.fft.fftfreq(
2 * (Nx - 1), dx
) # Not in standard numpy !
fftfreq = [np.fft.fftfreq] * (len(N) - 1)
fftfreq.append(irfftfreq)
k = [fftfreq(Nx, dx) for (fftfreq, Nx, dx) in zip(fftfreq, N, delta_x)]
if shift:
k = [np.fft.fftshift(l) for l in k]
return k
def _new_dims_and_coords(da, dim, wavenm, prefix):
# set up new dimensions and coordinates for dataarray
swap_dims = dict()
new_coords = dict()
wavenm = dict(zip(dim, wavenm))
for d in dim:
k = wavenm[d]
new_name = prefix + d if d[: len(prefix)] != prefix else d[len(prefix) :]
new_dim = xr.DataArray(k, dims=new_name, coords={new_name: k}, name=new_name)
new_dim.attrs.update({"spacing": k[1] - k[0]})
new_coords[new_name] = new_dim
swap_dims[d] = new_name
return new_coords, swap_dims
def _diff_coord(coord):
"""Returns the difference as a xarray.DataArray."""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(coord, ref_units, calendar)
coord = xr.DataArray(decoded_time, dims=coord.dims, coords=coord.coords)
return np.diff(coord)
elif pd.api.types.is_datetime64_dtype(v0):
return np.diff(coord).astype("timedelta64[s]").astype("f8")
else:
return np.diff(coord)
def _lag_coord(coord):
"""Returns the coordinate lag"""
v0 = coord.values[0]
calendar = getattr(v0, "calendar", None)
if coord[-1] > coord[0]:
coord_data = coord.data
else:
coord_data = np.flip(coord.data, axis=-1)
lag = coord_data[len(coord.data) // 2]
if calendar:
import cftime
ref_units = "seconds since 1800-01-01 00:00:00"
decoded_time = cftime.date2num(lag, ref_units, calendar)
return decoded_time
elif pd.api.types.is_datetime64_dtype(v0):
return lag.astype("timedelta64[s]").astype("f8").data
else:
return lag.data
def dft(
da, dim=None, true_phase=False, true_amplitude=False, **kwargs
): # pragma: no cover
"""
Deprecated function. See fft doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `fft` instead"
)
warnings.warn(msg, FutureWarning)
return fft(
da, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def idft(
daft, dim=None, true_phase=False, true_amplitude=False, **kwargs
): # pragma: no cover
"""
Deprecated function. See ifft doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use `ifft` instead"
)
warnings.warn(msg, FutureWarning)
return ifft(
daft, dim=dim, true_phase=true_phase, true_amplitude=true_amplitude, **kwargs
)
def fft(
da,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
detrend=None,
window=None,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
**kwargs,
):
"""
Perform discrete Fourier transform of xarray data-array `da` along the
specified dimensions.
.. math::
daft = \mathbb{F}(da - \overline{da})
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed. If the inputs are dask arrays, the
arrays must not be chunked along these dimensions.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
shift : bool, default
Whether to shift the fft output. Default is `True`, unless `real_dim is not None`,
in which case shift will be set to False always.
detrend : {None, 'constant', 'linear'}
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT. For `linear`, only dims of length 1 and 2 are supported.
window : str, optional
Whether to apply a window to the data before the Fourier
transform is taken. A window will be applied to all the dimensions in
dim. Please follow `scipy.signal.windows`' naming convention.
true_phase : bool, optional
If set to False, standard fft algorithm is applied on signal without consideration of coordinates.
If set to True, coordinates location are correctly taken into account to evaluate Fourier Tranforrm phase and
fftshift is applied on input signal prior to fft (fft algorithm intrinsically considers that input signal is on fftshifted grid).
true_amplitude : bool, optional
If set to True, output is multiplied by the spacing of the transformed variables to match theoretical FT amplitude.
If set to False, amplitude regularisation by spacing is not applied (as in numpy.fft)
chunks_to_segments : bool, optional
Whether the data is chunked along the axis to take FFT.
prefix : str
The prefix for the new transformed dimensions.
Returns
-------
daft : `xarray.DataArray`
The output of the Fourier transformation, with appropriate dimensions.
"""
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.dft to preserve the theoretical phasing and amplitude of Fourier Transform. Consider using xrft.fft to ensure future compatibility with numpy.fft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(da.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.dft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in da.dims:
raise ValueError(
"The dimension along which real FT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
] # real dim has to be moved or added at the end !
if chunks_to_segments:
da = _stack_chunks(da, dim)
rawdims = da.dims # take care of segmented dimesions, if any
if real_dim is not None:
da = da.transpose(
*[d for d in da.dims if d not in [real_dim]] + [real_dim]
) # dimension for real transformed is moved at the end
fftm = _fft_module(da)
if real_dim is None:
fft_fn = fftm.fftn
else:
shift = False
fft_fn = fftm.rfftn
# the axes along which to take ffts
axis_num = [
da.get_axis_num(d) for d in dim
] # if there is a real dim , it has to be the last one
N = [da.shape[n] for n in axis_num]
# raise error if there are multiple coordinates attached to the dimension(s) over which the FFT is taken
for d in dim:
bad_coords = [
cname for cname in da.coords if cname != d and d in da[cname].dims
]
if bad_coords:
raise ValueError(
f"The input array contains coordinate variable(s) ({bad_coords}) whose dims include the transform dimension(s) `{d}`. "
f"Please drop these coordinates (`.drop({bad_coords}`) before invoking xrft."
)
# verify even spacing of input coordinates
delta_x = []
lag_x = []
for d in dim:
diff = _diff_coord(da[d])
delta = np.abs(diff[0])
lag = _lag_coord(da[d])
if not np.allclose(diff, diff[0], rtol=spacing_tol):
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if delta == 0.0:
raise ValueError(
"Can't take Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
lag_x.append(lag)
if detrend is not None:
if detrend == "linear":
orig_dims = da.dims
da = _detrend(da, dim, detrend_type=detrend).transpose(*orig_dims)
else:
da = _detrend(da, dim, detrend_type=detrend)
if window is not None:
_, da = _apply_window(da, dim, window_type=window)
if true_phase:
reversed_axis = [
da.get_axis_num(d) for d in dim if da[d][-1] < da[d][0]
] # handling decreasing coordinates
f = fft_fn(
fftm.ifftshift(np.flip(da, axis=reversed_axis), axes=axis_num),
axes=axis_num,
)
else:
f = fft_fn(da.data, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _freq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(da, dim, k, prefix)
daft = xr.DataArray(
f, dims=da.dims, coords=dict([c for c in da.coords.items() if c[0] not in dim])
)
daft = daft.swap_dims(swap_dims).assign_coords(newcoords)
daft = daft.drop([d for d in dim if d in daft.coords])
updated_dims = [
daft.dims[i] for i in da.get_axis_num(dim)
] # List of transformed dimensions
if true_phase:
for up_dim, lag in zip(updated_dims, lag_x):
daft = daft * xr.DataArray(
np.exp(-1j * 2.0 * np.pi * newcoords[up_dim] * lag),
dims=up_dim,
coords={up_dim: newcoords[up_dim]},
) # taking advantage of xarray broadcasting and ordered coordinates
daft[up_dim].attrs.update({"direct_lag": lag.obj})
if true_amplitude:
daft = daft * np.prod(delta_x)
return daft.transpose(
*[swap_dims.get(d, d) for d in rawdims]
) # Do nothing if da was not transposed
def ifft(
daft,
spacing_tol=1e-3,
dim=None,
real_dim=None,
shift=True,
true_phase=False,
true_amplitude=False,
chunks_to_segments=False,
prefix="freq_",
lag=None,
**kwargs,
):
"""
Perform inverse discrete Fourier transform of xarray data-array `daft` along the
specified dimensions.
.. math::
da = \mathbb{F}(daft - \overline{daft})
Parameters
----------
daft : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
shift : bool, default
Whether to shift the fft output. Default is `True`.
chunks_to_segments : bool, optional
Whether the data is chunked along the axis to take FFT.
prefix : str
The prefix for the new transformed dimensions.
true_phase : bool, optional
If set to False, standard ifft algorithm is applied on signal without consideration of coordinates order.
If set to True, coordinates are correctly taken into account to evaluate Inverse Fourier Tranforrm phase and
fftshift is applied on input signal prior to ifft (ifft algorithm intrinsically considers that input signal is on fftshifted grid).
true_amplitude : bool, optional
If set to True, output is divided by the spacing of the transformed variables to match theoretical IFT amplitude.
If set to False, amplitude regularisation by spacing is not applied (as in numpy.ifft)
lag : None, float or sequence of float and/or None, optional
Output coordinates of transformed dimensions will be shifted by corresponding lag values and correct signal phasing will be preserved if true_phase is set to True.
If lag is None (default), 'direct_lag' attributes of each dimension is used (or set to zero if not found).
If defined, lag must have same length as dim.
If lag is a sequence, a None element means that 'direct_lag' attribute will be used for the corresponding dimension
Manually set lag to zero to get output coordinates centered on zero.
Returns
-------
da : `xarray.DataArray`
The output of the Inverse Fourier transformation, with appropriate dimensions.
"""
if not true_phase and not true_amplitude:
msg = "Flags true_phase and true_amplitude will be set to True in future versions of xrft.idft to preserve the theoretical phasing and amplitude of Inverse Fourier Transform. Consider using xrft.ifft to ensure future compatibility with numpy.ifft like behavior and to deactivate this warning."
warnings.warn(msg, FutureWarning)
if dim is None:
dim = list(daft.dims)
else:
if isinstance(dim, str):
dim = [dim]
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.idft and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if real_dim is not None:
if real_dim not in daft.dims:
raise ValueError(
"The dimension along which real IFT is taken must be one of the existing dimensions."
)
else:
dim = [d for d in dim if d != real_dim] + [
real_dim
] # real dim has to be moved or added at the end !
if lag is None:
lag = [daft[d].attrs.get("direct_lag", 0.0) for d in dim]
msg = "Default idft's behaviour (lag=None) changed! Default value of lag was zero (centered output coordinates) and is now set to transformed coordinate's attribute: 'direct_lag'."
warnings.warn(msg, FutureWarning)
else:
if isinstance(lag, float) or isinstance(lag, int):
lag = [lag]
if len(dim) != len(lag):
raise ValueError("dim and lag must have the same length.")
if not true_phase:
msg = "Setting lag with true_phase=False does not guarantee accurate idft."
warnings.warn(msg, Warning)
lag = [
daft[d].attrs.get("direct_lag") if l is None else l
for d, l in zip(dim, lag)
] # enable lag of the form [3.2, None, 7]
if true_phase:
for d, l in zip(dim, lag):
daft = daft * np.exp(1j * 2.0 * np.pi * daft[d] * l)
if chunks_to_segments:
daft = _stack_chunks(daft, dim)
rawdims = daft.dims # take care of segmented dimensions, if any
if real_dim is not None:
daft = daft.transpose(
*[d for d in daft.dims if d not in [real_dim]] + [real_dim]
) # dimension for real transformed is moved at the end
fftm = _fft_module(daft)
if real_dim is None:
fft_fn = fftm.ifftn
else:
fft_fn = fftm.irfftn
# the axes along which to take ffts
axis_num = [daft.get_axis_num(d) for d in dim]
N = [daft.shape[n] for n in axis_num]
# verify even spacing of input coordinates (It handle fftshifted grids)
delta_x = []
for d in dim:
diff = _diff_coord(daft[d])
delta = np.abs(diff[0])
l = _lag_coord(daft[d]) if d is not real_dim else daft[d][0].data
if not np.allclose(
diff, delta, rtol=spacing_tol
): # means that input is not on regular increasing grid
reordered_coord = daft[d].copy()
reordered_coord = reordered_coord.sortby(d)
diff = _diff_coord(reordered_coord)
l = _lag_coord(reordered_coord)
if np.allclose(
diff, diff[0], rtol=spacing_tol
): # means that input is on fftshifted grid
daft = daft.sortby(d) # reordering the input
else:
raise ValueError(
"Can't take Fourier transform because "
"coodinate %s is not evenly spaced" % d
)
if np.abs(l) > spacing_tol:
raise ValueError(
"Inverse Fourier Transform can not be computed because coordinate %s is not centered on zero frequency"
% d
)
if delta == 0.0:
raise ValueError(
"Can't take Inverse Fourier transform because spacing in coordinate %s is zero"
% d
)
delta_x.append(delta)
axis_shift = [
daft.get_axis_num(d) for d in dim if d is not real_dim
] # remove real dim of the list
f = fftm.ifftshift(
daft.data, axes=axis_shift
) # Force to be on fftshift grid before Fourier Transform
f = fft_fn(f, axes=axis_num)
if not true_phase:
f = fftm.ifftshift(f, axes=axis_num)
if shift:
f = fftm.fftshift(f, axes=axis_num)
k = _ifreq(N, delta_x, real_dim, shift)
newcoords, swap_dims = _new_dims_and_coords(daft, dim, k, prefix)
da = xr.DataArray(
f,
dims=daft.dims,
coords=dict([c for c in daft.coords.items() if c[0] not in dim]),
)
da = da.swap_dims(swap_dims).assign_coords(newcoords)
da = da.drop([d for d in dim if d in da.coords])
with xr.set_options(
keep_attrs=True
): # This line ensures keeping spacing attribute in output coordinates
for d, l in zip(dim, lag):
tfd = swap_dims[d]
da = da.assign_coords({tfd: da[tfd] + l})
if true_amplitude:
da = da / np.prod([float(da[up_dim].spacing) for up_dim in swap_dims.values()])
return da.transpose(
*[swap_dims.get(d, d) for d in rawdims]
) # Do nothing if daft was not transposed
def power_spectrum(
da, dim=None, real_dim=None, scaling="density", window_correction=False, **kwargs
):
"""
Calculates the power spectrum of da.
.. math::
da' = da - \overline{da}
.. math::
ps = \mathbb{F}(da') {\mathbb{F}(da')}^*
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
scaling : str, optional
If 'density', it will normalize the output to power spectral density
If 'spectrum', it will normalize the output to power spectrum
window_correction : boolean
If True, it will correct for the energy reduction resulting from applying a non-uniform window.
This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).
If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.
If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.
If False, the spectrum gives a representation of the power in the windowed signal.
Note that when True, Parseval's theorem may only be approximately satisfied.
kwargs : dict : see xrft.dft for argument list
"""
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.power_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.power_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
kwargs.update(
{"true_amplitude": True, "true_phase": False}
) # true_phase do not matter in power_spectrum
daft = fft(da, dim=dim, real_dim=real_dim, **kwargs)
updated_dims = [
d for d in daft.dims if (d not in da.dims and "segment" not in d)
] # Transformed dimensions
ps = np.abs(daft) ** 2
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
] # find transformed real dimension
f = np.full(ps.sizes[real], 2.0)
if len(da[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
ps = ps * xr.DataArray(f, dims=real, coords=ps[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / (windows ** 2).mean()
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da, dim, window_type=kwargs.get("window"))
ps = ps / windows.mean() ** 2
fs = np.prod([float(ps[d].spacing) for d in updated_dims])
ps *= fs ** 2
elif scaling == "false_density": # Corresponds to density=False
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return ps
def cross_spectrum(
da1,
da2,
dim=None,
real_dim=None,
scaling="density",
window_correction=False,
true_phase=False,
**kwargs,
):
"""
Calculates the cross spectra of da1 and da2.
.. math::
da1' = da1 - \overline{da1};\ \ da2' = da2 - \overline{da2}
.. math::
cs = \mathbb{F}(da1') {\mathbb{F}(da2')}^*
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
dim : str or sequence of str, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
real_dim : str, optional
Real Fourier transform will be taken along this dimension.
scaling : str, optional
If 'density', it will normalize the output to power spectral density
If 'spectrum', it will normalize the output to power spectrum
window_correction : boolean
If True, it will correct for the energy reduction resulting from applying a non-uniform window.
This is the default behaviour of many tools for computing power spectrum (e.g scipy.signal.welch and scipy.signal.periodogram).
If scaling = 'spectrum', correct the amplitude of peaks in the spectrum. This ensures, for example, that the peak in the one-sided power spectrum of a 10 Hz sine wave with RMS**2 = 10 has a magnitude of 10.
If scaling = 'density', correct for the energy (integral) of the spectrum. This ensures, for example, that the power spectral density integrates to the square of the RMS of the signal (ie that Parseval's theorem is satisfied). Note that in most cases, Parseval's theorem will only be approximately satisfied with this correction as it assumes that the signal being windowed is independent of the window. The correction becomes more accurate as the width of the window gets large in comparison with any noticeable period in the signal.
If False, the spectrum gives a representation of the power in the windowed signal.
Note that when True, Parseval's theorem may only be approximately satisfied.
kwargs : dict : see xrft.dft for argument list
"""
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_spectrum output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
if "real" in kwargs:
real_dim = kwargs.get("real")
msg = "`real` flag will be deprecated in future version of xrft.cross_spectrum and replaced by `real_dim` flag."
warnings.warn(msg, FutureWarning)
if "density" in kwargs:
density = kwargs.pop("density")
msg = (
"density flag will be deprecated in future version of xrft.cross_spectrum and replaced by scaling flag. "
+ 'density=True should be replaced by scaling="density" and '
+ "density=False will not be maintained.\nscaling flag is ignored !"
)
warnings.warn(msg, FutureWarning)
scaling = "density" if density else "false_density"
kwargs.update({"true_amplitude": True})
daft1 = fft(da1, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
daft2 = fft(da2, dim=dim, real_dim=real_dim, true_phase=true_phase, **kwargs)
if daft1.dims != daft2.dims:
raise ValueError("The two datasets have different dimensions")
updated_dims = [
d for d in daft1.dims if (d not in da1.dims and "segment" not in d)
] # Transformed dimensions
cs = daft1 * np.conj(daft2)
if real_dim is not None:
real = [d for d in updated_dims if real_dim == d[-len(real_dim) :]][
0
] # find transformed real dimension
f = np.full(cs.sizes[real], 2.0)
if len(da1[real_dim]) % 2 == 0:
f[0], f[-1] = 1.0, 1.0
else:
f[0] = 1.0
cs = cs * xr.DataArray(f, dims=real, coords=cs[real].coords)
if scaling == "density":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da1, dim, window_type=kwargs.get("window"))
cs = cs / (windows ** 2).mean()
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs
elif scaling == "spectrum":
if window_correction:
if kwargs.get("window") == None:
raise ValueError(
"window_correction can only be applied when windowing is turned on."
)
else:
windows, _ = _apply_window(da1, dim, window_type=kwargs.get("window"))
cs = cs / windows.mean() ** 2
fs = np.prod([float(cs[d].spacing) for d in updated_dims])
cs *= fs ** 2
elif scaling == "false_density": # Corresponds to density=False
pass
else:
raise ValueError("Unknown {} scaling flag".format(scaling))
return cs
def cross_phase(da1, da2, dim=None, true_phase=False, **kwargs):
"""
Calculates the cross-phase between da1 and da2.
Returned values are in [-pi, pi].
.. math::
da1' = da1 - \overline{da1};\ \ da2' = da2 - \overline{da2}
.. math::
cp = \text{Arg} [\mathbb{F}(da1')^*, \mathbb{F}(da2')]
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
kwargs : dict : see xrft.dft for argument list
"""
if not true_phase:
msg = (
"true_phase flag will be set to True in future version of xrft.dft possibly impacting cross_phase output. "
+ "Set explicitely true_phase = False in cross_spectrum arguments list to ensure future compatibility "
+ "with numpy-like behavior where the coordinates are disregarded."
)
warnings.warn(msg, FutureWarning)
cp = xr.ufuncs.angle(
cross_spectrum(da1, da2, dim=dim, true_phase=true_phase, **kwargs)
)
if da1.name and da2.name:
cp.name = "{}_{}_phase".format(da1.name, da2.name)
return cp
def _binned_agg(
array: np.ndarray,
indices: np.ndarray,
num_bins: int,
*,
func,
fill_value,
dtype,
) -> np.ndarray:
"""NumPy helper function for aggregating over bins."""
try:
import numpy_groupies
except ImportError:
raise ImportError(
"This function requires the `numpy_groupies` package to be installed. Please install it with pip or conda."
)
mask = np.logical_not(np.isnan(indices))
int_indices = indices[mask].astype(int)
shape = array.shape[: -indices.ndim] + (num_bins,)
result = numpy_groupies.aggregate(
int_indices,
array[..., mask],
func=func,
size=num_bins,
fill_value=fill_value,
dtype=dtype,
axis=-1,
)
return result
def _groupby_bins_agg(
array: xr.DataArray,
group: xr.DataArray,
bins,
func="sum",
fill_value=0,
dtype=None,
**cut_kwargs,
) -> xr.DataArray:
"""Faster equivalent of Xarray's groupby_bins(...).sum()."""
# https://github.com/pydata/xarray/issues/4473
binned = pd.cut(np.ravel(group), bins, **cut_kwargs)
new_dim_name = group.name + "_bins"
indices = group.copy(data=binned.codes.reshape(group.shape))
result = xr.apply_ufunc(
_binned_agg,
array,
indices,
input_core_dims=[indices.dims, indices.dims],
output_core_dims=[[new_dim_name]],
output_dtypes=[array.dtype],
dask_gufunc_kwargs=dict(
allow_rechunk=True,
output_sizes={new_dim_name: binned.categories.size},
),
kwargs={
"num_bins": binned.categories.size,
"func": func,
"fill_value": fill_value,
"dtype": dtype,
},
dask="parallelized",
)
result.coords[new_dim_name] = binned.categories
return result
def isotropize(ps, fftdim, nfactor=4, truncate=False):
"""
Isotropize a 2D power spectrum or cross spectrum
by taking an azimuthal average.
.. math::
\text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2
where :math:`N` is the number of azimuthal bins.
Parameters
----------
ps : `xarray.DataArray`
The power spectrum or cross spectrum to be isotropized.
fftdim : list
The fft dimensions overwhich the isotropization must be performed.
nfactor : int, optional
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
"""
# compute radial wavenumber bins
k = ps[fftdim[1]]
l = ps[fftdim[0]]
N = [k.size, l.size]
nbins = int(min(N) / nfactor)
freq_r = np.sqrt(k ** 2 + l ** 2).rename("freq_r")
kr = _groupby_bins_agg(freq_r, freq_r, bins=nbins, func="mean")
if truncate:
if k.max() > l.max():
kmax = l.max()
else:
kmax = k.max()
kr = kr.where(kr <= kmax)
else:
msg = (
"The flag `truncate` will be set to True by default in future version "
+ "in order to truncate the isotropic wavenumber larger than the "
+ "Nyquist wavenumber."
)
warnings.warn(msg, FutureWarning)
iso_ps = (
_groupby_bins_agg(ps, freq_r, bins=nbins, func="mean")
.rename({"freq_r_bins": "freq_r"})
.drop_vars("freq_r")
)
iso_ps.coords["freq_r"] = kr.data
if truncate:
return (iso_ps * iso_ps.freq_r).dropna("freq_r")
else:
return iso_ps * iso_ps.freq_r
def isotropic_powerspectrum(*args, **kwargs): # pragma: no cover
"""
Deprecated function. See isotropic_power_spectrum doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_power_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_power_spectrum(*args, **kwargs)
def isotropic_power_spectrum(
da,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
"""
Calculates the isotropic spectrum from the
two-dimensional power spectrum by taking the
azimuthal average.
.. math::
\text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2
where :math:`N` is the number of azimuthal bins.
Note: the method is not lazy does trigger computations.
Parameters
----------
da : `xarray.DataArray`
The data to be transformed
spacing_tol: float, optional
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : list, optional
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
shift : bool, optional
Whether to shift the fft output.
detrend : str, optional
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT.
density : list, optional
If true, it will normalize the spectrum to spectral density
window : str, optional
Whether to apply a window to the data before the Fourier
transform is taken. Please adhere to scipy.signal.windows for naming convention.
nfactor : int, optional
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
Returns
-------
iso_ps : `xarray.DataArray`
Isotropic power spectrum
"""
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da.dims
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
ps = power_spectrum(
da,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(ps, fftdim, nfactor=nfactor, truncate=truncate)
def isotropic_crossspectrum(*args, **kwargs): # pragma: no cover
"""
Deprecated function. See isotropic_cross_spectrum doc
"""
msg = (
"This function has been renamed and will disappear in the future."
+ " Please use isotropic_cross_spectrum instead"
)
warnings.warn(msg, Warning)
return isotropic_cross_spectrum(*args, **kwargs)
def isotropic_cross_spectrum(
da1,
da2,
spacing_tol=1e-3,
dim=None,
shift=True,
detrend=None,
scaling="density",
window=None,
window_correction=False,
nfactor=4,
truncate=False,
**kwargs,
):
"""
Calculates the isotropic spectrum from the
two-dimensional power spectrum by taking the
azimuthal average.
.. math::
\text{iso}_{cs} = k_r N^{-1} \sum_{N} (\mathbb{F}(da1') {\mathbb{F}(da2')}^*)
where :math:`N` is the number of azimuthal bins.
Note: the method is not lazy does trigger computations.
Parameters
----------
da1 : `xarray.DataArray`
The data to be transformed
da2 : `xarray.DataArray`
The data to be transformed
spacing_tol: float (default)
Spacing tolerance. Fourier transform should not be applied to uneven grid but
this restriction can be relaxed with this setting. Use caution.
dim : list (optional)
The dimensions along which to take the transformation. If `None`, all
dimensions will be transformed.
shift : bool (optional)
Whether to shift the fft output.
detrend : str (optional)
If `constant`, the mean across the transform dimensions will be
subtracted before calculating the Fourier transform (FT).
If `linear`, the linear least-square fit will be subtracted before
the FT.
density : list (optional)
If true, it will normalize the spectrum to spectral density
window : str (optional)
Whether to apply a window to the data before the Fourier
transform is taken. Please adhere to scipy.signal.windows for naming convention.
nfactor : int (optional)
Ratio of number of bins to take the azimuthal averaging with the
data size. Default is 4.
truncate : bool, optional
If True, the spectrum will be truncated for wavenumbers larger than
the Nyquist wavenumber.
Returns
-------
iso_cs : `xarray.DataArray`
Isotropic cross spectrum
"""
if "density" in kwargs:
density = kwargs.pop("density")
scaling = "density" if density else "false_density"
if dim is None:
dim = da1.dims
dim2 = da2.dims
if dim != dim2:
raise ValueError("The two datasets have different dimensions")
if len(dim) != 2:
raise ValueError("The Fourier transform should be two dimensional")
cs = cross_spectrum(
da1,
da2,
spacing_tol=spacing_tol,
dim=dim,
shift=shift,
detrend=detrend,
scaling=scaling,
window_correction=window_correction,
window=window,
**kwargs,
)
fftdim = ["freq_" + d for d in dim]
return isotropize(cs, fftdim, nfactor=nfactor, truncate=truncate)
def fit_loglog(x, y):
"""
Fit a line to isotropic spectra in log-log space
Parameters
----------
x : `numpy.array`
Coordinate of the data
y : `numpy.array`
data
Returns
-------
y_fit : `numpy.array`
The linear fit
a : float64
Slope of the fit
b : float64
Intercept of the fit
"""
# fig log vs log
p = np.polyfit(np.log2(x), np.log2(y), 1)
y_fit = 2 ** (np.log2(x) * p[0] + p[1])
return y_fit, p[0], p[1]
|
xgcm/xrft
|
xrft/xrft.py
|
Python
|
mit
| 44,138
|
[
"Gaussian"
] |
93565710a23f18696643181cd2cd756015bcfb4b5780e4b77124acf4943344da
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# A script to test the threshold filter.
# Values above 2000 are set to 255.
# Values below 2000 are set to 0.
# Image pipeline
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,339,0,339,0,0)
# background black
imageCanvas.SetDrawColor(0)
imageCanvas.FillBox(0,511,0,511)
# thick box
imageCanvas.SetDrawColor(255)
imageCanvas.FillBox(10,110,10,110)
imageCanvas.SetDrawColor(0)
imageCanvas.FillBox(30,90,30,90)
# put a stop sign in the box
imageCanvas.SetDrawColor(255)
imageCanvas.DrawSegment(52,80,68,80)
imageCanvas.DrawSegment(68,80,80,68)
imageCanvas.DrawSegment(80,68,80,52)
imageCanvas.DrawSegment(80,52,68,40)
imageCanvas.DrawSegment(68,40,52,40)
imageCanvas.DrawSegment(52,40,40,52)
imageCanvas.DrawSegment(40,52,40,68)
imageCanvas.DrawSegment(40,68,52,80)
imageCanvas.FillPixel(60,60)
# diamond
imageCanvas.SetDrawColor(255)
imageCanvas.FillTube(145,145,195,195,34)
imageCanvas.SetDrawColor(0)
imageCanvas.FillTube(165,165,175,175,7)
# H
imageCanvas.SetDrawColor(255)
imageCanvas.FillBox(230,250,230,330)
imageCanvas.FillBox(310,330,230,330)
imageCanvas.FillBox(230,330,270,290)
# circle
imageCanvas.SetDrawColor(255)
imageCanvas.DrawCircle(280,170,50.0)
# point as center of circle
imageCanvas.SetDrawColor(255)
imageCanvas.DrawPoint(280,170)
# lines +
imageCanvas.DrawSegment(60,120,60,220)
imageCanvas.DrawSegment(10,170,110,170)
# lines X
imageCanvas.DrawSegment(10,230,110,330)
imageCanvas.DrawSegment(110,230,10,330)
# sloped lines
imageCanvas.DrawSegment(120,230,220,230)
imageCanvas.DrawSegment(120,230,220,250)
imageCanvas.DrawSegment(120,230,220,270)
imageCanvas.DrawSegment(120,230,220,290)
imageCanvas.DrawSegment(120,230,220,310)
imageCanvas.DrawSegment(120,230,220,330)
imageCanvas.DrawSegment(120,230,200,330)
imageCanvas.DrawSegment(120,230,180,330)
imageCanvas.DrawSegment(120,230,160,330)
imageCanvas.DrawSegment(120,230,140,330)
imageCanvas.DrawSegment(120,230,120,330)
# double thickness lines +
imageCanvas.DrawSegment(120,60,220,60)
imageCanvas.DrawSegment(120,61,220,61)
imageCanvas.DrawSegment(170,10,170,110)
imageCanvas.DrawSegment(171,10,171,110)
# lines X
imageCanvas.DrawSegment(230,10,330,110)
imageCanvas.DrawSegment(231,10,331,110)
imageCanvas.DrawSegment(230,110,330,10)
imageCanvas.DrawSegment(231,110,331,10)
skeleton1 = vtk.vtkImageSkeleton2D()
#skeleton1 BypassOn
skeleton1.SetInputConnection(imageCanvas.GetOutputPort())
skeleton1.SetPrune(0)
skeleton1.SetNumberOfIterations(20)
skeleton1.ReleaseDataFlagOff()
clip = vtk.vtkImageClip()
clip.SetInputConnection(skeleton1.GetOutputPort())
clip.SetOutputWholeExtent(0,120,0,120,0,0)
magnify = vtk.vtkImageMagnify()
magnify.SetInputConnection(clip.GetOutputPort())
magnify.SetMagnificationFactors(5,5,1)
magnify.InterpolateOff()
magnify.ReleaseDataFlagOff()
viewer1 = vtk.vtkImageViewer()
viewer1.SetInputConnection(imageCanvas.GetOutputPort())
viewer1.SetColorWindow(5)
viewer1.SetColorLevel(1)
viewer = vtk.vtkImageViewer()
#viewer SetInputConnection [magnify GetOutputPort]
viewer.SetInputConnection(skeleton1.GetOutputPort())
viewer.SetColorWindow(5)
viewer.SetColorLevel(1)
viewer.Render()
# --- end of script --
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestSkeleton2D.py
|
Python
|
gpl-3.0
| 3,308
|
[
"VTK"
] |
dd41ca2309899febbc1365041732c3d8d8c00cc6ea8545dd0973ace48c430ac9
|
# -*- coding: utf-8 -*-
#
# hl_api_info.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to get information on NEST.
"""
from .hl_api_helper import *
import sys
import os
import webbrowser
@check_stack
def sysinfo():
"""Print information on the platform on which NEST was compiled."""
sr("sysinfo")
@check_stack
def version():
"""Return the NEST version.
Returns
-------
str:
The version of NEST.
"""
sr("statusdict [[ /kernelname /version ]] get")
return " ".join(spp())
@check_stack
def authors():
"""Print the authors of NEST."""
sr("authors")
@check_stack
def helpdesk():
"""Open the NEST helpdesk in browser.
Use the system default browser.
"""
if sys.version_info < (2, 7, 8):
print("The NEST Helpdesk is only available with Python 2.7.8 or "
"later. \n")
return
if 'NEST_DOC_DIR' not in os.environ:
print(
'NEST help needs to know where NEST is installed.'
'Please source nest_vars.sh or define NEST_DOC_DIR manually.')
return
helpfile = os.path.join(os.environ['NEST_DOC_DIR'], 'help',
'helpindex.html')
# Under Windows systems webbrowser.open is incomplete
# See <https://bugs.python.org/issue8232>
if sys.platform[:3] == "win":
os.startfile(helpfile)
# Under MacOs we need to ask for the browser explicitly.
# See <https://bugs.python.org/issue30392>.
if sys.platform[:3] == "dar":
webbrowser.get('safari').open_new(helpfile)
else:
webbrowser.open_new(helpfile)
@check_stack
def help(obj=None, pager=None):
"""Show the help page for the given object using the given pager.
The default pager is more.
Parameters
----------
obj : object, optional
Object to display help for
pager : str, optional
Pager to use
"""
hlpobj = obj
if hlpobj is not None:
show_help_with_pager(hlpobj, pager)
else:
print("Type 'nest.helpdesk()' to access the online documentation "
"in a browser.")
print("Type 'nest.help(object)' to get help on a NEST object or "
"command.\n")
print("Type 'nest.Models()' to see a list of available models "
"in NEST.")
print("Type 'nest.authors()' for information about the makers "
"of NEST.")
print("Type 'nest.sysinfo()' to see details on the system "
"configuration.")
print("Type 'nest.version()' for information about the NEST "
"version.\n")
print("For more information visit http://www.nest-simulator.org.")
@check_stack
def get_argv():
"""Return argv as seen by NEST.
This is similar to Python sys.argv but might have changed after
MPI initialization.
Returns
-------
tuple:
Argv, as seen by NEST.
"""
sr('statusdict')
statusdict = spp()
return statusdict['argv']
@check_stack
def message(level, sender, text):
"""Print a message using NEST's message system.
Parameters
----------
level :
Level
sender :
Message sender
text : str
Text to be sent in the message
"""
sps(level)
sps(sender)
sps(text)
sr('message')
@check_stack
def SetStatus(nodes, params, val=None):
"""Set the parameters of nodes or connections to params.
If val is given, params has to be the name
of an attribute, which is set to val on the nodes/connections. val
can be a single value or a list of the same size as nodes.
Parameters
----------
nodes : list or tuple
Either a list of global ids of nodes, or a tuple of connection
handles as returned by GetConnections()
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as nodes. If val is given, this has to be the name of
a model property as a str.
val : str, optional
If given, params has to be the name of a model property.
Raises
------
TypeError
Description
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty list,
# which is an artifact of the API operating on lists, rather than
# relying on language idioms, such as comprehensions
#
if len(nodes) == 0:
return
if val is not None and is_literal(params):
if is_iterable(val) and not isinstance(val, (uni_str, dict)):
params = [{params: x} for x in val]
else:
params = {params: val}
params = broadcast(params, len(nodes), (dict,), "params")
if len(nodes) != len(params):
raise TypeError(
"status dict must be a dict, or list of dicts of length 1 "
"or len(nodes)")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
@check_stack
def GetStatus(nodes, keys=None):
"""Return the parameter dictionaries of nodes or connections.
If keys is given, a list of values is returned instead. keys may also be a
list, in which case the returned list contains lists of values.
Parameters
----------
nodes : list or tuple
Either a list of global ids of nodes, or a tuple of connection
handles as returned by GetConnections()
keys : str or list, optional
String or a list of strings naming model properties. GetDefaults then
returns a single value or a list of values belonging to the keys
given.
Returns
-------
dict:
All parameters
type:
If keys is a string, the corrsponding default parameter is returned
list:
If keys is a list of strings, a list of corrsponding default parameters
is returned
Raises
------
TypeError
Description
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
if len(nodes) == 0:
return nodes
if keys is None:
cmd = '{ GetStatus } Map'
elif is_literal(keys):
cmd = '{{ GetStatus /{0} get }} Map'.format(keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = '{{ GetStatus }} Map {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sr(cmd)
return spp()
|
hesam-setareh/nest-simulator
|
pynest/nest/lib/hl_api_info.py
|
Python
|
gpl-2.0
| 7,504
|
[
"VisIt"
] |
6616dd5794d7d803daa338a16f035a1777c8f1e385db7ed12a98465a96b7e13c
|
#!/usr/bin/python
import h2o
def get_model_property(model, summary=True, validation_metrics=False,
mean_residual_deviance=False, columns=False, categorical_col_domains=False,
parameters=False, all_output=False):
"""
A convenience function to return a property from a model, since they
are held in dicts that are not instrospectable.
Only one property will be returned: the first one on this list that
has a value of True.
Args:
model: An H2O model
summary: if True, returns it
validation_metrics: if True, returns it
mean_residual_deviance: if True, returns it
columns: if True, returns it
categorical_col_domains: if True, returns it
parameters: if True, returns it
all_output: if True, returns all of the above
Returns:
dict/list from json or value, depending on Args
"""
if summary:
return model._model_json['output']['model_summary']
elif validation_metrics:
return model._model_json['output']['validation_metrics']
elif mean_residual_deviance:
return model._model_json['output']['validation_metrics'].mean_residual_deviance()
elif columns:
return model._model_json['output']['names']
elif categorical_col_domains:
return model._model_json['output']['domains']
# ^ if this doesn't work maybe it's ['names']['domains']
elif parameters:
return model._model_json['parameters']
elif all_output:
return model._model_json['output']
else:
raise Exception('no valid arg is True')
def get_varimp(model):
"""
Returns a pandas dataframe of variable importances
Args:
model: an H2O model
Returns:
A pandas dataframe with columns 'variable', 'relative_importance',
'scaled_importance', 'pct'.
returns a dataframe of variable importances"""
import pandas as pd
df_varimp = pd.DataFrame(model.varimp(), columns = ['variable',
'relative_importance', 'scaled_importance', 'pct'])
df_varimp.sort_values('relative_importance', ascending=False,
inplace=True)
return df_varimp
def get_train_test_split(h2o_frame, cutoff):
"""
Splits h2oframe into train and test sets randomly.
Args:
h2o_frame: An h2o frame
cutoff: A float between 0 and 1
Returns:
A tuple of two h2o frames, with the largest one first. In other words,
cutoff=0.2 and cutoff=0.8 will return the exact same thing.
"""
assert 0 < cutoff < 1, "cutoff must be in range (0,1)"
r = h2o_frame.runif() # Random UNIForm numbers, one per row
return h2o_frame[r < cutoff], h2o_frame[r >= cutoff]
def get_predictions(model, h2o_frame):
"""
Returns predictions from an h2o model based on values in an h2o frame.
Args:
model: an h2o model
predictions: an h2o frame
Returns:
list of predicted values with same length as predictions h2o frame
"""
return list(model.predict(h2o_frame).as_data_frame()['predict'])
def print_H2OGradientBoostingEstimator_instantiation_args():
"""
Prints a helpful list of arguments for H2OGradientBoostingEstimator when
instantiated.
Args:
None
Returns:
None
"""
print("""H2OGradientBoostingEstimator args
model_id (str, optional) The unique id assigned to the resulting
model. If none is given, an id will automatically be generated.
distribution (str) The distribution function of the response. Must be
"AUTO", "bernoulli", "multinomial", "poisson", "gamma", "tweedie",
"laplace", "quantile" or "gaussian"
quantile_alpha (float) Quantile (only for Quantile regression, must be
between 0 and 1)
tweedie_power (float) Tweedie power (only for Tweedie distribution,
must be between 1 and 2)
ntrees (int) A non-negative integer that determines the number of
trees to grow.
max_depth (int) Maximum depth to grow the tree.
min_rows (int) Minimum number of rows to assign to terminal nodes.
learn_rate (float) Learning rate (from 0.0 to 1.0)
learn_rate_annealing (float) Multiply the learning rate by this factor
after every tree
sample_rate (float) Row sample rate per tree (from 0.0 to 1.0)
sample_rate_per_class (list) Row sample rate per tree per class (one
per class, from 0.0 to 1.0)
col_sample_rate (float) Column sample rate per split (from 0.0 to 1.0)
col_sample_rate_change_per_level (float) Relative change of the column
sampling rate for every level (from 0.0 to 2.0)
col_sample_rate_per_tree (float) Column sample rate per tree (from 0.0
to 1.0)
nbins (int) For numerical columns (real/int), build a histogram of (at
least) this many bins, then split at the best point.
nbins_top_level (int) For numerical columns (real/int), build a
histogram of (at most) this many bins at the root level, then decrease
by factor of two per level.
nbins_cats (int) For categorical columns (factors), build a histogram
of this many bins, then split at the best point. Higher values can
lead to more overfitting.
balance_classes (bool) logical, indicates whether or not to balance
training data class counts via over/under-sampling (for imbalanced
data)
class_sampling_factors (list) Desired over/under-sampling ratios per
class (in lexicographic order). If not specified, sampling factors
will be automatically computed to obtain class balance during
training. Requires balance_classes.
max_after_balance_size (float) Maximum relative size of the training
data after balancing class counts (can be less than 1.0). Ignored if
balance_classes is False, which is the default behavior.
seed (int) Seed for random numbers (affects sampling when
balance_classes=T)
build_tree_one_node (bool) Run on one node only; no network overhead
but fewer cpus used. Suitable for small datasets.
nfolds (int, optional) Number of folds for cross-validation. If nfolds
>= 2, then validation must remain empty.
fold_assignment (str) Cross-validation fold assignment scheme, if
fold_column is not specified. Must be "AUTO", "Random" or "Modulo"
keep_cross_validation_predictions (bool) Whether to keep the
predictions of the cross-validation models
keep_cross_validation_fold_assignment (bool) Whether to keep the
cross-validation fold assignment.
score_each_iteration (bool) Attempts to score each tree.
score_tree_interval (int) Score the model after every so many trees.
Disabled if set to 0.
stopping_rounds (int) Early stopping based on convergence of
stopping_metric. Stop if simple moving average of length k of the
stopping_metric does not improve (by stopping_tolerance) for
k=stopping_rounds scoring events. Can only trigger after at least 2k
scoring events. Use 0 to disable.
stopping_metric (str) Metric to use for convergence checking, only for
_stopping_rounds > 0 Can be one of "AUTO", "deviance", "logloss",
"MSE", "AUC", "r2", "misclassification".
stopping_tolerance (float) Relative tolerance for metric-based
stopping criterion (stop if relative improvement is not at least this
much)
min_split_improvement (float) Minimum relative improvement in squared
error reduction for a split to happen
random_split_points (boolean) Whether to use random split points for
histograms (to pick the best split from).
max_abs_leafnode_pred (float) Maximum absolute value of a leaf node
prediction.""")
# def print_H2OGradientBoostingEstimator_train_method_args():
# """
# Prints a helpful list of arguments for H2OGradientBoostingEstimator when
# .train() method is called.
# Args:
# None
# Returns:
# None
# """
# txt = ("""z
# x: A vector containing the names of the predictors to use while
# building the GBM model.
# y: A character string or index that represents the response variable
# in the model.
# training frame: An H2OFrame object containing the variables in the
# model.
# validation frame: An H2OFrame object containing the validation dataset
# used to construct confusion matrix. If blank, the training data is
# used by default.
# nfolds: Number of folds for cross-validation.
# ignore const cols: A boolean indicating if constant columns should be
# ignored. Default is True.
# ntrees: A non-negative integer that defines the number of trees. The
# default is 50.
# max depth: The user-defined tree depth. The default is 5.
# min rows: The minimum number of rows to assign to the terminal nodes.
# The default is 10.
# nbins: For numerical columns (real/int), build a histogram of at least
# the specified number of bins, then split at the best point The default
# is 20.
# nbins cats: For categorical columns (enum), build a histogram of the
# specified number of bins, then split at the best point. Higher values
# can lead to more overfitting. The default is 1024.
# seed: Seed containing random numbers that affects sampling.
# learn rate: An integer that defines the learning rate. The default is
# 0.1 and the range is 0.0 to 1.0.
# distribution: Enter AUTO, bernoulli, multinomial, gaussian, poisson,
# gamma or tweedie to select the distribution function. The default is
# AUTO.
# score each iteration: A boolean indicating whether to score during
# each iteration of model training. Default is false.
# fold assignment: Cross-validation fold assignment scheme, if fold
# column is not specified. The following options are supported: AUTO,
# Random, or Modulo.
# fold column: Column with cross-validation fold index assignment per
# observation.
# offset column: Specify the offset column. Note: Offsets are per-row
# bias values that are used during model training. For Gaussian
# distributions, they can be seen as simple corrections to the response
# (y) column. Instead of learning to predict the response (y-row), the
# model learns to predict the (row) offset of the response column. For
# other distributions, the offset corrections are applied in the
# linearized space before applying the inverse link function to get the
# actual response values.
# weights column: Specify the weights column. Note: Weights are per-row
# observation weights. This is typically the number of times a row is
# repeated, but non-integer values are supported as well. During
# training, rows with higher weights matter more, due to the larger loss
# function pre-factor.
# balance classes: Balance training data class counts via over or
# undersampling for imbalanced data. The default is FALSE.
# max confusion matrix size: Maximum size (number of classes) for
# confusion matrices to print in the H2O logs. Default it 20.
# max hit ratio k: (for multi-class only) Maximum number (top K) of
# predictions to use for hit ratio computation. Use 0 to disable.
# Default is 10.
# r2 stopping: Stop making trees when the R2 metric equals or exceeds
# this value. Default is 0.999999.
# build tree one node: Specify if GBM should be run on one node only; no
# network overhead but fewer CPUs used. Suitable for small datasets.
# Default is False.
# tweedie power: A numeric specifying the power for the tweedie function
# when distribution = "tweedie". Default is 1.5.
# checkpoint: Enter a model key associated with a previously-trained
# model. Use this option to build a new model as a continuation of a
# previously-generated model.
# keep cross validation predictions: Specify whether to keep the
# predictions of the crossvalidation models. Default is False.
# class sampling factors: Desired over/under-sampling ratios per class
# (in lexicographic order). If not specified, sampling factors will be
# automatically computed to obtain class balance during training.
# Requires balance classes.
# max after balance size: Maximum relative size of the training data
# after balancing class counts; can be less than 1.0. The default is 5.
# nbins top level: For numerical columns (real/int), build a histogram
# of (at most) this many bins at the root level, then decrease by factor
# of two per level.
# model id: The unique ID assigned to the generated model. If not
# specified, an ID is generated automatically.""")
# print(txt)
|
Prooffreader/pyprooff
|
pyprooff/h2o.py
|
Python
|
mit
| 13,301
|
[
"Gaussian"
] |
d9b888054efcf13a02b6881f00bec17f5321960d94ef4c287136a9549d41b7aa
|
# bayesiandb.py
# old bailey
#
# naive bayesian learner
# adapted from Segaran, Programming Collective Intelligence, Ch. 6
# persists training info in SQLite DB
from pysqlite2 import dbapi2 as sqlite
stopwords = ['a', 'about', 'above', 'across', 'after', 'afterwards']
stopwords += ['again', 'against', 'all', 'almost', 'alone', 'along']
stopwords += ['already', 'also', 'although', 'always', 'am', 'among']
stopwords += ['amongst', 'amoungst', 'amount', 'an', 'and', 'another']
stopwords += ['any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere']
stopwords += ['are', 'around', 'as', 'at', 'back', 'be', 'became']
stopwords += ['because', 'become', 'becomes', 'becoming', 'been']
stopwords += ['before', 'beforehand', 'behind', 'being', 'below']
stopwords += ['beside', 'besides', 'between', 'beyond', 'bill', 'both']
stopwords += ['bottom', 'but', 'by', 'call', 'can', 'cannot', 'cant']
stopwords += ['co', 'computer', 'con', 'could', 'couldnt', 'cry', 'de']
stopwords += ['describe', 'detail', 'did', 'do', 'done', 'down', 'due']
stopwords += ['during', 'each', 'eg', 'eight', 'either', 'eleven', 'else']
stopwords += ['elsewhere', 'empty', 'enough', 'etc', 'even', 'ever']
stopwords += ['every', 'everyone', 'everything', 'everywhere', 'except']
stopwords += ['few', 'fifteen', 'fifty', 'fill', 'find', 'fire', 'first']
stopwords += ['five', 'for', 'former', 'formerly', 'forty', 'found']
stopwords += ['four', 'from', 'front', 'full', 'further', 'get', 'give']
stopwords += ['go', 'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her']
stopwords += ['here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers']
stopwords += ['herself', 'him', 'himself', 'his', 'how', 'however']
stopwords += ['hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed']
stopwords += ['interest', 'into', 'is', 'it', 'its', 'itself', 'keep']
stopwords += ['last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made']
stopwords += ['many', 'may', 'me', 'meanwhile', 'might', 'mill', 'mine']
stopwords += ['more', 'moreover', 'most', 'mostly', 'move', 'much']
stopwords += ['must', 'my', 'myself', 'name', 'namely', 'neither', 'never']
stopwords += ['nevertheless', 'next', 'nine', 'no', 'nobody', 'none']
stopwords += ['noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'of']
stopwords += ['off', 'often', 'on','once', 'one', 'only', 'onto', 'or']
stopwords += ['other', 'others', 'otherwise', 'our', 'ours', 'ourselves']
stopwords += ['out', 'over', 'own', 'part', 'per', 'perhaps', 'please']
stopwords += ['put', 'rather', 're', 's', 'same', 'see', 'seem', 'seemed']
stopwords += ['seeming', 'seems', 'serious', 'several', 'she', 'should']
stopwords += ['show', 'side', 'since', 'sincere', 'six', 'sixty', 'so']
stopwords += ['some', 'somehow', 'someone', 'something', 'sometime']
stopwords += ['sometimes', 'somewhere', 'still', 'such', 'system', 'take']
stopwords += ['ten', 'than', 'that', 'the', 'their', 'them', 'themselves']
stopwords += ['then', 'thence', 'there', 'thereafter', 'thereby']
stopwords += ['therefore', 'therein', 'thereupon', 'these', 'they']
stopwords += ['thick', 'thin', 'third', 'this', 'those', 'though', 'three']
stopwords += ['three', 'through', 'throughout', 'thru', 'thus', 'to']
stopwords += ['together', 'too', 'top', 'toward', 'towards', 'twelve']
stopwords += ['twenty', 'two', 'un', 'under', 'until', 'up', 'upon']
stopwords += ['us', 'very', 'via', 'was', 'we', 'well', 'were', 'what']
stopwords += ['whatever', 'when', 'whence', 'whenever', 'where']
stopwords += ['whereafter', 'whereas', 'whereby', 'wherein', 'whereupon']
stopwords += ['wherever', 'whether', 'which', 'while', 'whither', 'who']
stopwords += ['whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with']
stopwords += ['within', 'without', 'would', 'yet', 'you', 'your']
stopwords += ['yours', 'yourself', 'yourselves', '1', '2', '3', '4', '5']
stopwords += ['6', '7', '8', '9', '10']
# given a list of words, remove any that are
# in a list of stop words
def removeStopwords(wordlist, stopwords):
return [w for w in wordlist if w not in stopwords]
# my version of this function removes stop words
# input is a string, output is a dictionary
def getwords(doc):
allwords = doc.split(' ')
wordlist = removeStopwords(allwords, stopwords)
return dict([(w,1) for w in wordlist])
# alternative to getwords returns unique
# ngrams instead
def gettwograms(doc):
allwords = doc.split(' ')
wordlist = removeStopwords(allwords, stopwords)
ngrams = [' '.join(wordlist[i:i+2]) for i in range(len(wordlist)-1)]
return dict([(w,1) for w in ngrams])
def getthreegrams(doc):
allwords = doc.split(' ')
wordlist = removeStopwords(allwords, stopwords)
ngrams = [' '.join(wordlist[i:i+3]) for i in range(len(wordlist)-2)]
return dict([(w,1) for w in ngrams])
def getfourgrams(doc):
allwords = doc.split(' ')
wordlist = removeStopwords(allwords, stopwords)
ngrams = [' '.join(wordlist[i:i+4]) for i in range(len(wordlist)-3)]
return dict([(w,1) for w in ngrams])
# high tf/idf terms in n-gram context
def gettfidfngrams(doc):
wordlist = doc.split('\n')
return dict([(w,1) for w in wordlist])
# coinflip guessing function
def coinflip():
import random
r = random.randint(0, 1)
if r: return 'y'
else: return 'n'
class classifier:
def __init__(self,getfeatures,filename=None):
# count feature/category combinations
self.fc={}
# count documents in each category
self.cc={}
self.getfeatures=getfeatures
# N.B. this isn't quite what Segaran has in the book
self.thresholds={}
def setthreshold(self,cat,t):
self.thresholds[cat]=t
def getthreshold(self,cat):
if cat not in self.thresholds: return 1.0
return self.thresholds[cat]
def setdb(self,dbfile):
self.con=sqlite.connect(dbfile)
self.con.execute('create table if not exists fc(feature,category,count)')
self.con.execute('create table if not exists cc(category,count)')
# increase count of feature/category pair
def incf(self,f,cat):
count=self.fcount(f,cat)
if count==0:
self.con.execute("insert into fc values ('%s','%s',1)" % (f,cat))
else:
self.con.execute("update fc set count=%d where feature='%s' and category='%s'" % (count+1,f,cat))
# increase the count of a category
def incc(self,cat):
count=self.catcount(cat)
if count==0:
self.con.execute("insert into cc values ('%s',1)" % (cat))
else:
self.con.execute("update cc set count=%d where category='%s'" % (count+1,cat))
# number of times a feature has appeared in a category
def fcount(self,f,cat):
res=self.con.execute('select count from fc where feature="%s" and category="%s"' % (f,cat)).fetchone()
if res==None: return 0
else: return float(res[0])
# number of items in a category
def catcount(self,cat):
res=self.con.execute('select count from cc where category="%s"' % (cat)).fetchone()
if res==None: return 0
else: return float(res[0])
# total number of items
def totalcount(self):
res=self.con.execute('select sum(count) from cc').fetchone();
if res==None: return 0
else: return res[0]
# list of all categories
def categories(self):
cur=self.con.execute('select category from cc');
return [d[0] for d in cur]
# take item (i.e., document) and classification
def train(self,item,cat):
features=self.getfeatures(item)
# increment count for every feature with this category
for f in features:
self.incf(f,cat)
# increment count for this category
self.incc(cat)
self.con.commit()
# calculate probabilities
def fprob(self,f,cat):
if self.catcount(cat)==0: return 0
# total number of times this feature appeared in this category
# divided by total number of items in this category
return self.fcount(f,cat)/self.catcount(cat)
# calculate weighted probabilities
def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
# current probability
basicprob=prf(f,cat)
# number of times feature appeared in all categories
totals=sum([self.fcount(f,c) for c in self.categories()])
# weighted average
bp=((weight*ap)+(totals*basicprob))/(weight+totals)
return bp
def classify(self,item,default=None):
probs={}
# N.B. I added this
best='n'
# find category with highest probability
max=0.0
for cat in self.categories():
probs[cat]=self.prob(item,cat)
if probs[cat]>max:
max=probs[cat]
best=cat
# make sure probability exceeds threshold times next best
for cat in probs:
if cat==best: continue
# N.B. I modified this next line - complete kluge!
if probs[cat]*self.getthreshold(best)>probs.get(best, 0.0): return default
# if probs[cat]*self.getthreshold(best)>probs[best]: return default
return best
class naivebayes(classifier):
def docprob(self,item,cat):
features=self.getfeatures(item)
# multiply probabilities of all features together
p=1
for f in features:
p*=self.weightedprob(f,cat,self.fprob)
return p
def prob(self,item,cat):
catprob=self.catcount(cat)/self.totalcount()
docprob=self.docprob(item,cat)
return docprob*catprob
########## test scaffolding begins
# def sampletrain(cl):
# cl.train('the quick brown fox jumps','good')
# cl.train('nobody owns the water','good')
# cl.train('buy pharmaceuticals now','bad')
# cl.train('make quick money at the online casino','bad')
# cl.train('the quick rabbit jumps fences','good')
# cl=classifier(pcigetwords)
# sampletrain(cl)
# print "quick good"
# print cl.fcount('quick', 'good')
# print cl.fprob('quick', 'good')
# print "quick bad"
# print cl.fcount('quick', 'bad')
# print cl.fprob('quick', 'bad')
# print "casino good"
# print cl.fcount('casino', 'good')
# print cl.fprob('casino', 'good')
# print "casino bad"
# print cl.fcount('casino', 'bad')
# print cl.fprob('casino', 'bad')
# print "weightedprob first pass money good"
# print cl.weightedprob('money','good',cl.fprob)
# sampletrain(cl)
# print "weightedprob 2nd pass money good"
# print cl.weightedprob('money','good',cl.fprob)
# clnb=naivebayes(pcigetwords)
# sampletrain(clnb)
# print "quick rabbit good"
# print clnb.prob('quick rabbit','good')
# print "quick rabbit bad"
# print clnb.prob('quick rabbit','bad')
# print "quick rabbit classify"
# print clnb.classify('quick rabbit',default='unknown')
# print "quick money classify"
# print clnb.classify('quick money',default='unknown')
########## test scaffolding ends
|
williamjturkel/Digital-History-Hacks--2005-08-
|
bayesiandb.py
|
Python
|
mit
| 11,273
|
[
"CASINO"
] |
866f822cda295f305c544d03da0e9056388d3bdd31fb3517d3807a7d52abe925
|
#! /usr/bin/python
# -*- coding: iso-8859-15 -*-
from openturns import *
from openturns.viewer import ViewImage
triang = Triangular(1.0, 2.0, 4.0)
norm = Normal(-1.0, 1.0)
norm2 = Normal(3.0, 1.0)
aCollection = DistributionCollection(3)
aCollection[0] = Distribution(triang)
aCollection[1] = Distribution(norm)
aCollection[2] = Distribution(norm2)
aCollection[0].setWeight(0.20)
aCollection[1].setWeight(0.50)
aCollection[2].setWeight(0.30)
myDistribution = Mixture(aCollection)
# Mixture
myDistribution_pdf = myDistribution.drawPDF(-4.0,8.0,150)
myDistribution_pdf_draw = myDistribution_pdf.getDrawable(0)
myDistribution_pdf_draw.setColor("black")
myDistribution_pdf_draw.setLegendName("Real distribution")
myDistribution_pdf.setDrawable(myDistribution_pdf_draw,0)
myDistribution_pdf2 = Graph(myDistribution_pdf)
myDistribution_cdf = myDistribution.drawCDF(-4.0,8.0,150)
myDistribution_cdf_draw = myDistribution_cdf.getDrawable(0)
myDistribution_cdf_draw.setColor("black")
myDistribution_cdf_draw.setLegendName("Real distribution")
myDistribution_cdf.setDrawable(myDistribution_cdf_draw,0)
myDistribution_cdf2 = Graph(myDistribution_cdf)
# Sample
sample = myDistribution.getNumericalSample(10000)
#####################################
# Bandwith selection : automatic method
# n=10^4 so it is the mixted method
#####################################
# Normal kernel smoothing
kernel = KernelSmoothing()
gaussianSmoothed = kernel.build(sample)
gaussianSmoothedPDF = gaussianSmoothed.drawPDF(-4,8,251)
gaussianSmoothedPDF_draw = gaussianSmoothedPDF.getDrawables()[0]
gaussianSmoothedPDF_draw.setColor("blue")
gaussianSmoothedPDF_draw.setLegendName("normal kernel")
myDistribution_pdf.add(gaussianSmoothedPDF_draw)
##myDistribution_pdf.setTitle("Gaussian Kernel Smoothing PDF")
##myDistribution_pdf.draw("pdf_gaussKernelSmooth", 640,480)
##ViewImage(myDistribution_pdf.getBitmap())
gaussianSmoothedCDF = gaussianSmoothed.drawCDF(-4,8,251)
gaussianSmoothedCDF_draw = gaussianSmoothedCDF.getDrawables()[0]
gaussianSmoothedCDF_draw.setColor("blue")
gaussianSmoothedCDF_draw.setLegendName("normal kernel")
myDistribution_cdf.add(gaussianSmoothedCDF_draw)
##myDistribution_cdf.setTitle("Gaussian Kernel Smoothing CDF")
##myDistribution_cdf.draw("cdf_gaussKernelSmooth", 640,480)
##ViewImage(myDistribution_cdf.getBitmap())
# Triangular kernel smoothing
kernel = KernelSmoothing(Distribution(Triangular()))
triangularSmoothed = kernel.build(sample)
triangularSmoothedPDF = triangularSmoothed.drawPDF(-4,8,251)
triangularSmoothedPDF_draw = triangularSmoothedPDF.getDrawables()[0]
triangularSmoothedPDF_draw.setColor("green")
triangularSmoothedPDF_draw.setLegendName("triangular kernel")
myDistribution_pdf.add(triangularSmoothedPDF_draw)
triangularSmoothedCDF = triangularSmoothed.drawCDF(-4,8,251)
triangularSmoothedCDF_draw = triangularSmoothedCDF.getDrawables()[0]
triangularSmoothedCDF_draw.setColor("green")
triangularSmoothedCDF_draw.setLegendName("triangular kernel")
myDistribution_cdf.add(triangularSmoothedCDF_draw)
# Epanechnikov kernel smoothing
kernel = KernelSmoothing(Distribution(Epanechnikov()))
epanechnikovSmoothed = kernel.build(sample)
epanechnikovSmoothedPDF = epanechnikovSmoothed.drawPDF(-4,8,251)
epanechnikovSmoothedPDF_draw = epanechnikovSmoothedPDF.getDrawables()[0]
epanechnikovSmoothedPDF_draw.setColor("red")
epanechnikovSmoothedPDF_draw.setLegendName("epanechnikov kernel")
myDistribution_pdf.add(epanechnikovSmoothedPDF_draw)
epanechnikovSmoothedCDF = epanechnikovSmoothed.drawCDF(-4,8,251)
epanechnikovSmoothedCDF_draw = epanechnikovSmoothedCDF.getDrawables()[0]
epanechnikovSmoothedCDF_draw.setColor("red")
epanechnikovSmoothedCDF_draw.setLegendName("epanechnikov kernel")
myDistribution_cdf.add(epanechnikovSmoothedCDF_draw)
# Show the graph
myDistribution_pdf.setTitle("Effect of the kernel selection")
myDistribution_pdf.draw("kernelSmoothing_pdf")
ViewImage(myDistribution_pdf.getBitmap())
myDistribution_cdf.setTitle("Effect of the kernel selection")
myDistribution_cdf.setLegendPosition("bottomright")
myDistribution_cdf.draw("kernelSmoothing_cdf")
ViewImage(myDistribution_cdf.getBitmap())
# Boundary treatment
exp = Exponential(2.0, 0.0)
expPDF = exp.drawPDF()
expPDF_draw = expPDF.getDrawable(0)
expPDF_draw.setColor("black")
expPDF.setDrawable(expPDF_draw,0)
expCDF = exp.drawCDF()
expCDF_draw = expCDF.getDrawable(0)
expCDF_draw.setColor("black")
expCDF.setDrawable(expCDF_draw,0)
sample2 = exp.getNumericalSample(10000)
kernel = KernelSmoothing()
# whith boundary treatment
smoothedBoundary = kernel.build(sample2, True)
smoothedBoundaryPDF = smoothedBoundary.drawPDF()
smoothedBoundaryPDF_draw = smoothedBoundaryPDF.getDrawable(0)
smoothedBoundaryPDF_draw.setLegendName("whith boundary treatment")
expPDF.add(smoothedBoundaryPDF_draw)
smoothedBoundaryCDF = smoothedBoundary.drawCDF()
smoothedBoundaryCDF_draw = smoothedBoundaryCDF.getDrawable(0)
smoothedBoundaryCDF_draw.setLegendName("whith boundary treatment")
expCDF.add(smoothedBoundaryCDF_draw)
# whithout boundary treatment
smoothedNoBoundary = kernel.build(sample2)
smoothedNoBoundaryPDF = smoothedNoBoundary.drawPDF()
smoothedNoBoundaryPDF_draw = smoothedNoBoundaryPDF.getDrawable(0)
smoothedNoBoundaryPDF_draw.setColor("blue")
smoothedNoBoundaryPDF_draw.setLegendName("whithout boundary treatment")
expPDF.add(smoothedNoBoundaryPDF_draw)
smoothedNoBoundaryCDF = smoothedNoBoundary.drawCDF()
smoothedNoBoundaryCDF_draw = smoothedNoBoundaryCDF.getDrawable(0)
smoothedNoBoundaryCDF_draw.setColor("blue")
smoothedNoBoundaryCDF_draw.setLegendName("whithout boundary treatment")
expCDF.add(smoothedNoBoundaryCDF_draw)
# Show the graph
expPDF.setTitle("Effect of the boundary treatment")
expPDF.draw("kernelSmoothing_boundary_pdf")
ViewImage(expPDF.getBitmap())
expCDF.setTitle("Effect of the boundary treatment")
expCDF.setLegendPosition("bottomright")
expCDF.draw("kernelSmoothing_boundary_cdf")
ViewImage(expCDF.getBitmap())
|
dbarbier/privot-doc
|
src/UseCasesGuide/script_kernelSmoothing.py
|
Python
|
lgpl-2.1
| 5,963
|
[
"Gaussian"
] |
a970dc3591a93afdb4fd083d12e7494529833ece60708af97945f9b943f3f134
|
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_noop, ugettext_lazy as _
from corehq.apps.accounting.models import SoftwarePlanEdition as Edition, SoftwareProductType as Product, FeatureType
DESC_BY_EDITION = {
Edition.COMMUNITY: {
'name': _("Community"),
'description': _("For projects in a pilot phase with a small group (up to 50) of "
"mobile users that only need very basic CommCare features."),
},
Edition.STANDARD: {
'name': _("Standard"),
'description': _("For projects with a medium set (up to 100) of mobile users that want to "
"build in limited SMS workflows and have increased data security needs."),
},
Edition.PRO: {
'name': _("Pro"),
'description': _("For projects with a large group (up to 500) of mobile users that want to "
"build in comprehensive SMS workflows and have increased reporting needs."),
},
Edition.ADVANCED: {
'name': _("Advanced"),
'description': _("For projects scaling to an even larger group (up to 1,000) of mobile users "
"that want the full CommCare feature set and dedicated support from Dimagi "
"staff.")
},
Edition.ENTERPRISE: {
'name': _("Enterprise"),
'description': _("For projects scaling regionally or country wide (1,001+ people) that require "
"the full CommCare feature set. Your organization will receive discounted "
"pricing and dedicated enterprise-level support from Dimagi.")
}
}
FEATURE_TYPE_TO_NAME = {
FeatureType.SMS: _("SMS Messages"),
FeatureType.USER: _("Mobile Workers"),
}
# This exists here specifically so that text can be translated
def ensure_product(product):
if product not in [s[0] for s in Product.CHOICES]:
raise ValueError("Unsupported Product")
def get_feature_name(feature_type, product):
ensure_product(product)
if feature_type not in [f[0] for f in FeatureType.CHOICES]:
raise ValueError("Unsupported Feature")
return {
FeatureType.USER: {
Product.COMMCARE: _("Mobile Users"),
Product.COMMCONNECT: _("Mobile Users"),
Product.COMMTRACK: _("Facilities"),
}[product],
FeatureType.SMS: _("Monthly SMS"),
}[feature_type]
class PricingTableCategories(object):
CORE = 'core'
MOBILE = 'mobile'
WEB = 'web'
ANALYTICS = 'analytics'
SMS = 'sms'
USER_MANAGEMENT_AND_SECURITY = 'user_management_security'
SUPPORT = 'support'
@classmethod
def get_wiki_url(cls, category):
return {
cls.MOBILE: _("https://wiki.commcarehq.org/display/commcarepublic/CommCare+Plan+Details#CommCarePlanDetails-Mobile"),
cls.WEB: _("https://wiki.commcarehq.org/display/commcarepublic/CommCare+Plan+Details#CommCarePlanDetails-Web"),
cls.ANALYTICS: _("https://wiki.commcarehq.org/display/commcarepublic/CommCare+Plan+Details#CommCarePlanDetails-Analytics"),
cls.SMS: _("https://wiki.commcarehq.org/display/commcarepublic/CommCare+Plan+Details#CommCarePlanDetails-SMS(CommConnect)"),
cls.USER_MANAGEMENT_AND_SECURITY: _("https://wiki.commcarehq.org/display/commcarepublic/CommCare+Plan+Details#CommCarePlanDetails-UserManagementandSecurity"),
cls.SUPPORT: _("https://wiki.commcarehq.org/display/commcarepublic/CommCare+Plan+Details#CommCarePlanDetails-Support"),
}.get(category)
@classmethod
def get_title(cls, category, product):
ensure_product(product)
return {
cls.MOBILE: _("Mobile"),
cls.WEB: _("Web"),
cls.ANALYTICS: _("Analytics"),
cls.SMS: {
Product.COMMCARE: _("SMS (CommConnect)"),
Product.COMMCONNECT: _("SMS"),
Product.COMMTRACK: _("SMS"),
}[product],
cls.USER_MANAGEMENT_AND_SECURITY: _("User Management and Security"),
cls.SUPPORT: _("Support"),
}.get(category)
@classmethod
def get_features(cls, category):
f = PricingTableFeatures
return {
cls.CORE: (
f.PRICING,
f.MOBILE_LIMIT,
f.ADDITIONAL_MOBILE_USER,
),
cls.MOBILE: (
f.JAVA_AND_ANDROID,
f.MULTIMEDIA_SUPPORT,
),
cls.WEB: (
f.APP_BUILDER,
f.EXCHANGE,
f.API_ACCESS,
f.LOOKUP_TABLES,
f.WEB_APPS,
f.CUSTOM_BRANDING,
),
cls.ANALYTICS: (
f.DATA_EXPORT,
f.STANDARD_REPORTS,
f.CROSS_PROJECT_REPORTS,
f.CUSTOM_REPORTS,
f.ADM,
),
cls.SMS: (
f.OUTBOUND_SMS,
f.RULES_ENGINE,
f.ANDROID_GATEWAY,
f.SMS_DATA_COLLECTION,
f.INBOUND_SMS,
f.INCLUDED_SMS_DIMAGI,
f.INCLUDED_SMS_CUSTOM,
),
cls.USER_MANAGEMENT_AND_SECURITY: (
f.USER_GROUPS,
f.DATA_SECURITY_PRIVACY,
f.ADVANCED_ROLES,
f.BULK_CASE_USER_MANAGEMENT,
f.HIPAA_COMPLIANCE,
f.DE_ID_DATA,
),
cls.SUPPORT: (
f.COMMUNITY_SUPPORT,
f.EMAIL_SUPPORT,
f.PHONE_SUPPORT,
f.APP_TROUBLESHOOTING,
f.DEDICATED_SUPPORT_STAFF,
f.DEDICATED_ACCOUNT_MANAGEMENT,
),
}[category]
class PricingTableFeatures(object):
SOFTWARE_PLANS = 'software_plans'
PRICING = 'pricing'
MOBILE_LIMIT = 'mobile_limit'
ADDITIONAL_MOBILE_USER = 'additional_mobile_user'
JAVA_AND_ANDROID = 'java_and_android'
MULTIMEDIA_SUPPORT = 'multimedia_support'
APP_BUILDER = 'app_builder'
EXCHANGE = 'exchange'
API_ACCESS = 'api_access'
LOOKUP_TABLES = 'lookup_tables'
WEB_APPS = 'web_apps'
CUSTOM_BRANDING = 'custom_branding'
DATA_EXPORT = 'data_export'
STANDARD_REPORTS = 'standard_reports'
CROSS_PROJECT_REPORTS = 'cross_project_reports'
CUSTOM_REPORTS = 'custom_reports'
ADM = 'adm'
OUTBOUND_SMS = 'outbound_sms'
RULES_ENGINE = 'rules_engine'
ANDROID_GATEWAY = 'android_gateway'
SMS_DATA_COLLECTION = 'sms_data_collection'
INBOUND_SMS = 'inbound_sms'
INCLUDED_SMS_DIMAGI = 'included_sms_dimagi'
INCLUDED_SMS_CUSTOM = 'included_sms_custom'
USER_GROUPS = 'user_groups'
DATA_SECURITY_PRIVACY = 'data_security_privacy'
ADVANCED_ROLES = 'advanced_roles'
BULK_CASE_USER_MANAGEMENT = 'bulk_case_user_management'
HIPAA_COMPLIANCE = 'hipaa_compliance'
DE_ID_DATA = 'de_id_data'
COMMUNITY_SUPPORT = 'community_support'
EMAIL_SUPPORT = 'email_support'
PHONE_SUPPORT = 'phone_support'
APP_TROUBLESHOOTING = 'app_troubleshooting'
DEDICATED_SUPPORT_STAFF = 'dedicated_support_staff'
DEDICATED_ACCOUNT_MANAGEMENT = 'dedicated_account_management'
@classmethod
def get_title(cls, feature, product):
ensure_product(product)
return {
cls.SOFTWARE_PLANS: _("Software Plans"),
cls.PRICING: _("Pricing*"),
cls.MOBILE_LIMIT: {
Product.COMMCARE: _("Mobile Users"),
Product.COMMCONNECT: _("Mobile Users"),
Product.COMMTRACK: _("Facilities")
}[product],
cls.ADDITIONAL_MOBILE_USER: {
Product.COMMCARE: _("Price per Additional Mobile User"),
Product.COMMCONNECT: _("Price per Additional Mobile User"),
Product.COMMTRACK: _("Price per Additional Facility")
}[product],
cls.JAVA_AND_ANDROID: _("Java Feature Phones and Android Phones"),
cls.MULTIMEDIA_SUPPORT: _("Multimedia Support"),
cls.APP_BUILDER: {
Product.COMMCARE: _('CommCare Application Builder'),
Product.COMMCONNECT: _('CommCare Application Builder'),
Product.COMMTRACK: _('CommTrack Application Builder'),
}[product],
cls.EXCHANGE: _('CommCare Exchange (<a href="http://www.commcarehq.org/exchange/">visit the exchange</a>)'),
cls.API_ACCESS: _("API Access"),
cls.LOOKUP_TABLES: _("Lookup Tables"),
cls.WEB_APPS: _('Web-based Applications (<a href="https://confluence.dimagi.com/display/commcarepublic/CloudCare+-+Web+Data+Entry">CloudCare</a>)'),
cls.CUSTOM_BRANDING: _("Custom Branding"),
cls.DATA_EXPORT: _("Data Export"),
cls.STANDARD_REPORTS: _("Standard Reports"),
cls.CROSS_PROJECT_REPORTS: _("Cross-Project Reports"),
cls.CUSTOM_REPORTS: _("Custom Reports Access"),
cls.ADM: _('Active Data Management (<a href="http://www.commcarehq.org/tour/adm/">read more</a>)'),
cls.OUTBOUND_SMS: _("Outbound Messaging"),
cls.RULES_ENGINE: _("Rules Engine"),
cls.ANDROID_GATEWAY: _("Android-based SMS Gateway"),
cls.SMS_DATA_COLLECTION: _("SMS Data Collection"),
cls.INBOUND_SMS: _("Inbound SMS (where available)"),
cls.INCLUDED_SMS_DIMAGI: _("Free Messages (Dimagi Gateway)**"),
cls.INCLUDED_SMS_CUSTOM: _("Messages (Your Gateway)"),
cls.USER_GROUPS: _("User Groups"),
cls.DATA_SECURITY_PRIVACY: _("Data Security and Privacy"),
cls.ADVANCED_ROLES: _("Advanced Role-Based Access"),
cls.BULK_CASE_USER_MANAGEMENT: _("Bulk Case and User Management"),
cls.HIPAA_COMPLIANCE: _("HIPAA Compliance Assurance"),
cls.DE_ID_DATA: _("De-identified Data"),
cls.COMMUNITY_SUPPORT: {
Product.COMMCARE: _('Community Support (<a href="https://groups.google.com/forum/?fromgroups#!forum/commcare-users">visit commcare-users</a>)'),
Product.COMMCONNECT: _('Community Support (<a href="https://groups.google.com/forum/?fromgroups#!forum/commcare-users">visit commcare-users</a>)'),
Product.COMMTRACK: _('Community Support (<a href="https://groups.google.com/forum/?fromgroups#!forum/commtrack-users">visit commtrack-users</a>)'),
}[product],
cls.EMAIL_SUPPORT: _("Direct Email Support"),
cls.PHONE_SUPPORT: _("Phone Support"),
cls.APP_TROUBLESHOOTING: _("Application Troubleshooting"),
cls.DEDICATED_SUPPORT_STAFF: _("Dedicated Support Staff"),
cls.DEDICATED_ACCOUNT_MANAGEMENT: _("Dedicated Enterprise Account Management"),
}[feature]
@classmethod
def get_columns(cls, feature):
return {
cls.SOFTWARE_PLANS: (Edition.COMMUNITY, Edition.STANDARD, Edition.PRO, Edition.ADVANCED, Edition.ENTERPRISE),
cls.PRICING: (_("Free"), _("$100 /month"), _("$500 /month"), _("$1,000 /month"), _('(<a href="http://www.dimagi.com/collaborate/contact-us/" target="_blank">Contact Us</a>)')),
cls.MOBILE_LIMIT: (_("50"), _("100"), _("500"), _("1,000"), _("Unlimited / Discounted Pricing")),
cls.ADDITIONAL_MOBILE_USER: (_("1 USD /month"), _("1 USD /month"), _("1 USD /month"), _("1 USD /month"), _("Unlimited / Discounted Pricing")),
cls.JAVA_AND_ANDROID: (True, True, True, True, True),
cls.MULTIMEDIA_SUPPORT: (True, True, True, True, True),
cls.APP_BUILDER: (True, True, True, True, True),
cls.EXCHANGE: (True, True, True, True, True),
cls.API_ACCESS: (False, True, True, True, True),
cls.LOOKUP_TABLES: (False, True, True, True, True),
cls.WEB_APPS: (False, False, True, True, True),
cls.CUSTOM_BRANDING: (False, False, False, True, True),
cls.DATA_EXPORT: (True, True, True, True, True),
cls.STANDARD_REPORTS: (True, True, True, True, True),
cls.CROSS_PROJECT_REPORTS: (False, True, True, True, True),
cls.CUSTOM_REPORTS: (False, False, True, True, True),
cls.ADM: (False, False, False, True, True),
cls.OUTBOUND_SMS: (False, True, True, True, True),
cls.RULES_ENGINE: (False, True, True, True, True),
cls.ANDROID_GATEWAY: (False, True, True, True, True),
cls.SMS_DATA_COLLECTION: (False, False, True, True, True),
cls.INBOUND_SMS: (False, False, True, True, True),
cls.INCLUDED_SMS_DIMAGI: (False, _("100 /month"), _("500 /month"), _("1,000 /month"), _("2,000 /month")),
cls.INCLUDED_SMS_CUSTOM: (False, _("1 cent/SMS"), _("1 cent/SMS"), _("1 cent/SMS"), _("1 cent/SMS")),
cls.USER_GROUPS: (True, True, True, True, True),
cls.DATA_SECURITY_PRIVACY: (True, True, True, True, True),
cls.ADVANCED_ROLES: (False, True, True, True, True),
cls.BULK_CASE_USER_MANAGEMENT: (False, True, True, True, True),
cls.HIPAA_COMPLIANCE: (False, False, True, True, True),
cls.DE_ID_DATA: (False, False, True, True, True),
cls.COMMUNITY_SUPPORT: (True, True, True, True, True),
cls.EMAIL_SUPPORT: (False, True, True, True, True),
cls.PHONE_SUPPORT: (False, False, True, True, True),
cls.APP_TROUBLESHOOTING: (False, False, True, True, True),
cls.DEDICATED_SUPPORT_STAFF: (False, False, False, True, True),
cls.DEDICATED_ACCOUNT_MANAGEMENT: (False, False, False, False, True),
}[feature]
class PricingTable(object):
STRUCTURE_BY_PRODUCT = {
Product.COMMCARE: (
PricingTableCategories.CORE,
PricingTableCategories.MOBILE,
PricingTableCategories.WEB,
PricingTableCategories.ANALYTICS,
PricingTableCategories.SMS,
PricingTableCategories.USER_MANAGEMENT_AND_SECURITY,
PricingTableCategories.SUPPORT,
),
Product.COMMCONNECT: (
PricingTableCategories.CORE,
PricingTableCategories.MOBILE,
PricingTableCategories.WEB,
PricingTableCategories.ANALYTICS,
PricingTableCategories.SMS,
PricingTableCategories.USER_MANAGEMENT_AND_SECURITY,
PricingTableCategories.SUPPORT,
),
Product.COMMTRACK: (
PricingTableCategories.CORE,
PricingTableCategories.MOBILE,
PricingTableCategories.SMS,
PricingTableCategories.WEB,
PricingTableCategories.ANALYTICS,
PricingTableCategories.USER_MANAGEMENT_AND_SECURITY,
PricingTableCategories.SUPPORT,
),
}
VISIT_WIKI_TEXT = ugettext_noop("Visit the help site to learn more.")
@classmethod
def get_footer_by_product(cls, product, domain=None):
ensure_product(product)
from corehq.apps.domain.views import ProBonoStaticView
return (
ugettext_noop(
mark_safe(
_('*Local taxes and other country-specific fees not included. Dimagi provides pro-bono '
'software plans on a needs basis. To learn more about this opportunity or see if your '
'program qualifies, please fill out our <a href="%(url)s">pro-bono form</a>.') % {
'url': (reverse('pro_bono', args=[domain]) if domain is
not None else reverse(ProBonoStaticView.urlname)),
},
)
),
_("**Additional incoming and outgoing messages will be charged on a per-message fee, which "
"depends on the telecommunications provider and country. Please note that this does not apply "
"to the unlimited messages option, which falls under the category below."),
)
@classmethod
def get_table_by_product(cls, product, domain=None):
ensure_product(product)
categories = cls.STRUCTURE_BY_PRODUCT[product]
editions = PricingTableFeatures.get_columns(PricingTableFeatures.SOFTWARE_PLANS)
edition_data = [(edition.lower(), DESC_BY_EDITION[edition]) for edition in editions]
table_sections = []
for category in categories:
features = PricingTableCategories.get_features(category)
feature_rows = []
for feature in features:
feature_rows.append({
'title': PricingTableFeatures.get_title(feature, product),
'columns': [(editions[ind].lower(), col) for ind, col in
enumerate(PricingTableFeatures.get_columns(feature))],
})
table_sections.append({
'title': PricingTableCategories.get_title(category, product),
'url': PricingTableCategories.get_wiki_url(category),
'features': feature_rows,
'category': category,
})
table = {
'editions': edition_data,
'title': PricingTableFeatures.get_title(PricingTableFeatures.SOFTWARE_PLANS, product),
'sections': table_sections,
'visit_wiki_text': cls.VISIT_WIKI_TEXT,
'footer': cls.get_footer_by_product(product, domain=domain),
}
return table
|
SEL-Columbia/commcare-hq
|
corehq/apps/accounting/user_text.py
|
Python
|
bsd-3-clause
| 17,631
|
[
"VisIt"
] |
42a807575a28d494ef383dab5f25fa7229453ff8602147f7a04a862cff37544f
|
#!/usr/bin/python
# Copyright(C) 2013 David Ream
# Released under Biopython license. http://www.biopython.org/DIST/LICENSE
# Do not remove this comment
import os
import argparse
import time
import sys
from Bio import Entrez
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description='Determine information about the genbank files under study, and report this information for use by other modules.')
parser.add_argument("-i", "--infile", dest="infile", metavar="FILE", default='./viral_info.txt',
help="Folder containing all genbank files for use by the program.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="FOLDER", default='./sequences/',
help="Folder where results will be stored.")
return parser.parse_args()
# A function that will check the command line input for errors. If serious errors exist, it will exit.
def check_options(parsed_args):
if os.path.exists(parsed_args.infile):
infile = parsed_args.infile
else:
print "The file %s does not exist." % parsed_args.infile
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
return infile, outfolder
def download_genbank_files(inlist, outfolder):
for name, accession in inlist:
net_handle = Entrez.efetch(db="nucleotide", id=accession, rettype="gb")
out_handle = open(outfolder + name + '.gbk', "w")
out_handle.write(net_handle.read())
out_handle.close()
net_handle.close()
def main():
# Timer, used during debug to determine the fastest implementation for a code block
start = time.time()
Entrez.email = 'jamietmorton@gmail.com'
parsed_args = parser_code()
infile, outfolder = check_options(parsed_args)
in_list = []
for item1,item2 in [(i.strip().split('\t')) for i in open(infile).readlines()]:
in_list.append((item1, item2))
download_genbank_files(in_list, outfolder)
print time.time() - start
if __name__ == '__main__':
main()
|
idoerg/BOA
|
tools/download_script.py
|
Python
|
gpl-3.0
| 2,402
|
[
"Biopython"
] |
386e899473e3486c8c9d334bf7b4fd39a727a0936979d636982dc7b06b35ddd1
|
from django.conf.urls import patterns, include, url, static
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'collabCTF.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'collabCTF.views.index', name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^settings$', 'collabCTF.views.user_settings', name='settings'),
url(r'^profile$', 'collabCTF.views.profile', name='profile'),
url(r'^login$', 'collabCTF.views.log_in', name='login'),
url(r'^logout$', 'django.contrib.auth.views.logout', {'next_page': "/login"}, name='logout'),
url(r'^register$', 'collabCTF.views.register', name='register'),
url(r'^reports$', 'collabCTF.views.reports', name='reports'),
url(r'^ctf-tools$', 'tools.views.ctf_tools', name='ctf_tools'),
url(r'^ctf/add$', 'competition.views.add_ctf', name='add_ctf'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/$', 'competition.views.view_ctf', name='view_ctf'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/update$', 'competition.views.update_ctf', name='update_ctf'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/delete$', 'competition.views.delete_ctf', name='delete_ctf'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/add$', 'competition.views.add_challenge', name='add_challenge'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/(?P<chall_slug>[a-z\d_\-]+)/$', 'competition.views.view_challenge',
name='view_challenge'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/(?P<chall_slug>[a-z\d_\-]+)/update$', 'competition.views.update_challenge',
name='update_challenge'),
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/(?P<chall_slug>[a-z\d_\-]+)/delete$', 'competition.views.delete_challenge',
name='delete_challenge'),
url(r'ctf/(?P<ctf_slug>[a-z\d_\-]+)/(?P<chall_slug>[a-z\d_\-]+)/add', 'competition.views.add_file',
name='add_file'),
# ajax
url(r'^ctf/(?P<ctf_slug>[a-z\d_\-]+)/.chart$', 'competition.ajax.chart_data', name='ctf_chart'),
url(r'^tools/.hash$', 'tools.ajax.hash_val', name='tools_hash'),
url(r'^tools/.rot$', 'tools.ajax.rot_val', name='tools_rot'),
url(r'^tools/.base_conversions$', 'tools.ajax.base_conversion_val', name='tools_base_conversion'),
url(r'^tools/.xor$', 'tools.ajax.xor_val', name='tools_xor'),
url(r'^tools/.url-quote$', 'tools.ajax.quote_url', name='tools_quote'),
url(r'^tools/.url-unquote$', 'tools.ajax.unquote_url', name='tools_unquote'),
url(r'^.challenge-visit$', 'competition.ajax.track_challenge_visit', name='track_challenge_visit'),
)
if settings.DEBUG:
if 'MEDIA_ROOT' in dir(settings):
media_root = settings.MEDIA_ROOT
else:
media_root = 'files'
urlpatterns += static.static(settings.MEDIA_URL, document_root=media_root)
|
HackUCF/collabCTF
|
collabCTF/urls.py
|
Python
|
mit
| 2,840
|
[
"VisIt"
] |
0a99f3e31e80ddca5fb441d9a1af07fe0019e7ef4f949614c6cc4ac1800cca2e
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
# make sure to have the same regression image on all platforms.
renWin.SetMultiSamples(0)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Force a starting random value
raMath = vtk.vtkMath()
raMath.RandomSeed(6)
# Generate random attributes on a plane
#
ps = vtk.vtkPlaneSource()
ps.SetXResolution(10)
ps.SetYResolution(10)
ag = vtk.vtkRandomAttributeGenerator()
ag.SetInputConnection(ps.GetOutputPort())
ag.GenerateAllDataOn()
ss = vtk.vtkSphereSource()
ss.SetPhiResolution(16)
ss.SetThetaResolution(32)
tg = vtk.vtkTensorGlyph()
tg.SetInputConnection(ag.GetOutputPort())
tg.SetSourceConnection(ss.GetOutputPort())
tg.SetScaleFactor(0.1)
tg.SetMaxScaleFactor(10)
tg.ClampScalingOn()
n = vtk.vtkPolyDataNormals()
n.SetInputConnection(tg.GetOutputPort())
pdm = vtk.vtkPolyDataMapper()
pdm.SetInputConnection(n.GetOutputPort())
a = vtk.vtkActor()
a.SetMapper(pdm)
pm = vtk.vtkPolyDataMapper()
pm.SetInputConnection(ps.GetOutputPort())
pa = vtk.vtkActor()
pa.SetMapper(pm)
ren1.AddActor(a)
ren1.AddActor(pa)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.Render()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/General/Testing/Python/TestRandomAttributeGenerator.py
|
Python
|
bsd-3-clause
| 1,472
|
[
"VTK"
] |
4f371cd1977a8c189d761c8329058fe57b12d2e048e52345c9000532aef544c5
|
"""
General helper functions that don't fit neatly under any given category.
They provide some useful string and conversion methods that might
be of use when designing your own game.
"""
from __future__ import division, print_function
from builtins import object, range
from future.utils import viewkeys, raise_
import os
import sys
import imp
import types
import math
import re
import textwrap
import random
from importlib import import_module
from inspect import ismodule, trace, getmembers, getmodule
from collections import defaultdict, OrderedDict
from twisted.internet import threads, defer, reactor
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from evennia.utils import logger
_MULTIMATCH_SEPARATOR = settings.SEARCH_MULTIMATCH_SEPARATOR
try:
import cPickle as pickle
except ImportError:
import pickle
ENCODINGS = settings.ENCODINGS
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
def is_iter(iterable):
"""
Checks if an object behaves iterably.
Args:
iterable (any): Entity to check for iterability.
Returns:
is_iterable (bool): If `iterable` is iterable or not.
Notes:
Strings are *not* accepted as iterable (although they are
actually iterable), since string iterations are usually not
what we want to do with a string.
"""
return hasattr(iterable, '__iter__')
def make_iter(obj):
"""
Makes sure that the object is always iterable.
Args:
obj (any): Object to make iterable.
Returns:
iterable (list or iterable): The same object
passed-through or made iterable.
"""
return not hasattr(obj, '__iter__') and [obj] or obj
def wrap(text, width=_DEFAULT_WIDTH, indent=0):
"""
Safely wrap text to a certain number of characters.
Args:
text (str): The text to wrap.
width (int, optional): The number of characters to wrap to.
indent (int): How much to indent new lines (the first line
will not be indented)
Returns:
text (str): Properly wrapped text.
"""
if not text:
return ""
text = to_unicode(text)
indent = " " * indent
return to_str(textwrap.fill(text, width, subsequent_indent=indent))
# alias - fill
fill = wrap
def pad(text, width=_DEFAULT_WIDTH, align="c", fillchar=" "):
"""
Pads to a given width.
Args:
text (str): Text to pad.
width (int, optional): The width to pad to, in characters.
align (str, optional): This is one of 'c', 'l' or 'r' (center,
left or right).
fillchar (str, optional): The character to fill with.
Returns:
text (str): The padded text.
"""
align = align if align in ('c', 'l', 'r') else 'c'
fillchar = fillchar[0] if fillchar else " "
if align == 'l':
return text.ljust(width, fillchar)
elif align == 'r':
return text.rjust(width, fillchar)
else:
return text.center(width, fillchar)
def crop(text, width=_DEFAULT_WIDTH, suffix="[...]"):
"""
Crop text to a certain width, throwing away text from too-long
lines.
Args:
text (str): Text to crop.
width (int, optional): Width of line to crop, in characters.
suffix (str, optional): This is appended to the end of cropped
lines to show that the line actually continues. Cropping
will be done so that the suffix will also fit within the
given width. If width is too small to fit both crop and
suffix, the suffix will be dropped.
Returns:
text (str): The cropped text.
"""
utext = to_unicode(text)
ltext = len(utext)
if ltext <= width:
return text
else:
lsuffix = len(suffix)
utext = utext[:width] if lsuffix >= width else "%s%s" % (utext[:width - lsuffix], suffix)
return to_str(utext)
def dedent(text):
"""
Safely clean all whitespace at the left of a paragraph.
Args:
text (str): The text to dedent.
Returns:
text (str): Dedented string.
Notes:
This is useful for preserving triple-quoted string indentation
while still shifting it all to be next to the left edge of the
display.
"""
if not text:
return ""
return textwrap.dedent(text)
def list_to_string(inlist, endsep="and", addquote=False):
"""
This pretty-formats a list as string output, adding an optional
alternative separator to the second to last entry. If `addquote`
is `True`, the outgoing strings will be surrounded by quotes.
Args:
inlist (list): The list to print.
endsep (str, optional): If set, the last item separator will
be replaced with this value.
addquote (bool, optional): This will surround all outgoing
values with double quotes.
Returns:
liststr (str): The list represented as a string.
Examples:
```python
# no endsep:
[1,2,3] -> '1, 2, 3'
# with endsep=='and':
[1,2,3] -> '1, 2 and 3'
# with addquote and endsep
[1,2,3] -> '"1", "2" and "3"'
```
"""
if not endsep:
endsep = ","
else:
endsep = " " + endsep
if not inlist:
return ""
if addquote:
if len(inlist) == 1:
return "\"%s\"" % inlist[0]
return ", ".join("\"%s\"" % v for v in inlist[:-1]) + "%s %s" % (endsep, "\"%s\"" % inlist[-1])
else:
if len(inlist) == 1:
return str(inlist[0])
return ", ".join(str(v) for v in inlist[:-1]) + "%s %s" % (endsep, inlist[-1])
def wildcard_to_regexp(instring):
"""
Converts a player-supplied string that may have wildcards in it to
regular expressions. This is useful for name matching.
Args:
instring (string): A string that may potentially contain
wildcards (`*` or `?`).
Returns:
regex (str): A string where wildcards were replaced with
regular expressions.
"""
regexp_string = ""
# If the string starts with an asterisk, we can't impose the beginning of
# string (^) limiter.
if instring[0] != "*":
regexp_string += "^"
# Replace any occurances of * or ? with the appropriate groups.
regexp_string += instring.replace("*", "(.*)").replace("?", "(.{1})")
# If there's an asterisk at the end of the string, we can't impose the
# end of string ($) limiter.
if instring[-1] != "*":
regexp_string += "$"
return regexp_string
def time_format(seconds, style=0):
"""
Function to return a 'prettified' version of a value in seconds.
Args:
seconds (int): Number if seconds to format.
style (int): One of the following styles:
0. "1d 08:30"
1. "1d"
2. "1 day, 8 hours, 30 minutes"
3. "1 day, 8 hours, 30 minutes, 10 seconds"
"""
if seconds < 0:
seconds = 0
else:
# We'll just use integer math, no need for decimal precision.
seconds = int(seconds)
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
if style is 0:
"""
Standard colon-style output.
"""
if days > 0:
retval = '%id %02i:%02i' % (days, hours, minutes,)
else:
retval = '%02i:%02i' % (hours, minutes,)
return retval
elif style is 1:
"""
Simple, abbreviated form that only shows the highest time amount.
"""
if days > 0:
return '%id' % (days,)
elif hours > 0:
return '%ih' % (hours,)
elif minutes > 0:
return '%im' % (minutes,)
else:
return '%is' % (seconds,)
elif style is 2:
"""
Full-detailed, long-winded format. We ignore seconds.
"""
days_str = hours_str = ''
minutes_str = '0 minutes'
if days > 0:
if days == 1:
days_str = '%i day, ' % days
else:
days_str = '%i days, ' % days
if days or hours > 0:
if hours == 1:
hours_str = '%i hour, ' % hours
else:
hours_str = '%i hours, ' % hours
if hours or minutes > 0:
if minutes == 1:
minutes_str = '%i minute ' % minutes
else:
minutes_str = '%i minutes ' % minutes
retval = '%s%s%s' % (days_str, hours_str, minutes_str)
elif style is 3:
"""
Full-detailed, long-winded format. Includes seconds.
"""
days_str = hours_str = minutes_str = seconds_str = ''
if days > 0:
if days == 1:
days_str = '%i day, ' % days
else:
days_str = '%i days, ' % days
if days or hours > 0:
if hours == 1:
hours_str = '%i hour, ' % hours
else:
hours_str = '%i hours, ' % hours
if hours or minutes > 0:
if minutes == 1:
minutes_str = '%i minute ' % minutes
else:
minutes_str = '%i minutes ' % minutes
if minutes or seconds > 0:
if seconds == 1:
seconds_str = '%i second ' % seconds
else:
seconds_str = '%i seconds ' % seconds
retval = '%s%s%s%s' % (days_str, hours_str, minutes_str, seconds_str)
return retval.strip()
def datetime_format(dtobj):
"""
Pretty-prints the time since a given time.
Args:
dtobj (datetime): An datetime object, e.g. from Django's
`DateTimeField`.
Returns:
deltatime (str): A string describing how long ago `dtobj`
took place.
"""
year, month, day = dtobj.year, dtobj.month, dtobj.day
hour, minute, second = dtobj.hour, dtobj.minute, dtobj.second
now = timezone.now()
if year < now.year:
# another year
timestring = str(dtobj.date())
elif dtobj.date() < now.date():
# another date, same year
timestring = "%02i-%02i" % (day, month)
elif hour < now.hour - 1:
# same day, more than 1 hour ago
timestring = "%02i:%02i" % (hour, minute)
else:
# same day, less than 1 hour ago
timestring = "%02i:%02i:%02i" % (hour, minute, second)
return timestring
def host_os_is(osname):
"""
Check to see if the host OS matches the query.
Args:
osname (str): Common names are "posix" (linux/unix/mac) and
"nt" (windows).
Args:
is_os (bool): If the os matches or not.
"""
return os.name == osname
def get_evennia_version():
"""
Helper method for getting the current evennia version.
Returns:
version (str): The version string.
"""
import evennia
return evennia.__version__
def pypath_to_realpath(python_path, file_ending='.py'):
"""
Converts a dotted Python path to an absolute path under the
Evennia library directory or under the current game directory.
Args:
python_path (str): a dot-python path
file_ending (str): a file ending, including the period.
Returns:
abspaths (list of str): The two absolute paths created by prepending
`EVENNIA_DIR` and `GAME_DIR` respectively. These are checked for
existence before being returned, so this may be an empty list.
"""
pathsplit = python_path.strip().split('.')
paths = [os.path.join(settings.EVENNIA_DIR, *pathsplit),
os.path.join(settings.GAME_DIR, *pathsplit)]
if file_ending:
# attach file ending to the paths if not already set (a common mistake)
file_ending = ".%s" % file_ending if not file_ending.startswith(".") else file_ending
paths = ["%s%s" % (p, file_ending) if not p.endswith(file_ending) else p
for p in paths]
# check so the paths actually exists before returning
return [p for p in paths if os.path.isfile(p)]
def dbref(dbref, reqhash=True):
"""
Converts/checks if input is a valid dbref.
Args:
dbref (int or str): A datbase ref on the form N or #N.
reqhash (bool, optional): Require the #N form to accept
input as a valid dbref.
Returns:
dbref (int or None): The integer part of the dbref or `None`
if input was not a valid dbref.
"""
if reqhash:
num = (int(dbref.lstrip('#')) if (isinstance(dbref, basestring) and
dbref.startswith("#") and
dbref.lstrip('#').isdigit())
else None)
return num if num > 0 else None
elif isinstance(dbref, basestring):
dbref = dbref.lstrip('#')
return int(dbref) if dbref.isdigit() and int(dbref) > 0 else None
else:
return dbref if isinstance(dbref, int) else None
def dbid_to_obj(inp, objclass, raise_errors=True):
"""
Convert a #dbid to a valid object.
Args:
inp (str or int): A valid dbref.
objclass (class): A valid django model to filter against.
raise_errors (bool, optional): Whether to raise errors
or return `None` on errors.
Returns:
obj (Object or None): An entity loaded from the dbref.
Raises:
Exception: If `raise_errors` is `True` and
`objclass.objects.get(id=dbref)` did not return a valid
object.
"""
dbid = dbref(inp)
if not dbid:
# we only convert #dbrefs
return inp
try:
if int(inp) < 0:
return None
except ValueError:
return None
# if we get to this point, inp is an integer dbref; get the matching object
try:
return objclass.objects.get(id=inp)
except Exception:
if raise_errors:
raise
return inp
def to_unicode(obj, encoding='utf-8', force_string=False):
"""
This decodes a suitable object to the unicode format.
Args:
obj (any): Object to decode to unicode.
encoding (str, optional): The encoding type to use for the
dedoding.
force_string (bool, optional): Always convert to string, no
matter what type `obj` is initially.
Returns:
result (unicode or any): Will return a unicode object if input
was a string. If input was not a string, the original will be
returned unchanged unless `force_string` is also set.
Notes:
One needs to encode the obj back to utf-8 before writing to disk
or printing. That non-string objects are let through without
conversion is important for e.g. Attributes.
"""
if force_string and not isinstance(obj, basestring):
# some sort of other object. Try to
# convert it to a string representation.
if hasattr(obj, '__str__'):
obj = obj.__str__()
elif hasattr(obj, '__unicode__'):
obj = obj.__unicode__()
else:
# last resort
obj = str(obj)
if isinstance(obj, basestring) and not isinstance(obj, unicode):
try:
obj = unicode(obj, encoding)
return obj
except UnicodeDecodeError:
for alt_encoding in ENCODINGS:
try:
obj = unicode(obj, alt_encoding)
return obj
except UnicodeDecodeError:
pass
raise Exception("Error: '%s' contains invalid character(s) not in %s." % (obj, encoding))
return obj
def to_str(obj, encoding='utf-8', force_string=False):
"""
This encodes a unicode string back to byte-representation,
for printing, writing to disk etc.
Args:
obj (any): Object to encode to bytecode.
encoding (str, optional): The encoding type to use for the
encoding.
force_string (bool, optional): Always convert to string, no
matter what type `obj` is initially.
Notes:
Non-string objects are let through without modification - this
is required e.g. for Attributes. Use `force_string` to force
conversion of objects to strings.
"""
if force_string and not isinstance(obj, basestring):
# some sort of other object. Try to
# convert it to a string representation.
try:
obj = str(obj)
except Exception:
obj = unicode(obj)
if isinstance(obj, basestring) and isinstance(obj, unicode):
try:
obj = obj.encode(encoding)
return obj
except UnicodeEncodeError:
for alt_encoding in ENCODINGS:
try:
obj = obj.encode(encoding)
return obj
except UnicodeEncodeError:
pass
raise Exception("Error: Unicode could not encode unicode string '%s'(%s) to a bytestring. " % (obj, encoding))
return obj
def validate_email_address(emailaddress):
"""
Checks if an email address is syntactically correct.
Args:
emailaddress (str): Email address to validate.
Returns:
is_valid (bool): If this is a valid email or not.
Notes.
(This snippet was adapted from
http://commandline.org.uk/python/email-syntax-check.)
"""
emailaddress = r"%s" % emailaddress
domains = ("aero", "asia", "biz", "cat", "com", "coop",
"edu", "gov", "info", "int", "jobs", "mil", "mobi", "museum",
"name", "net", "org", "pro", "tel", "travel")
# Email address must be more than 7 characters in total.
if len(emailaddress) < 7:
return False # Address too short.
# Split up email address into parts.
try:
localpart, domainname = emailaddress.rsplit('@', 1)
host, toplevel = domainname.rsplit('.', 1)
except ValueError:
return False # Address does not have enough parts.
# Check for Country code or Generic Domain.
if len(toplevel) != 2 and toplevel not in domains:
return False # Not a domain name.
for i in '-_.%+.':
localpart = localpart.replace(i, "")
for i in '-_.':
host = host.replace(i, "")
if localpart.isalnum() and host.isalnum():
return True # Email address is fine.
else:
return False # Email address has funny characters.
def inherits_from(obj, parent):
"""
Takes an object and tries to determine if it inherits at *any*
distance from parent.
Args:
obj (any): Object to analyze. This may be either an instance
or a class.
parent (any): Can be either instance, class or python path to class.
Returns:
inherits_from (bool): If `parent` is a parent to `obj` or not.
Notes:
What differs this function from e.g. `isinstance()` is that `obj`
may be both an instance and a class, and parent may be an
instance, a class, or the python path to a class (counting from
the evennia root directory).
"""
if callable(obj):
# this is a class
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()]
else:
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()]
if isinstance(parent, basestring):
# a given string path, for direct matching
parent_path = parent
elif callable(parent):
# this is a class
parent_path = "%s.%s" % (parent.__module__, parent.__name__)
else:
parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__)
return any(1 for obj_path in obj_paths if obj_path == parent_path)
def server_services():
"""
Lists all services active on the Server. Observe that since
services are launched in memory, this function will only return
any results if called from inside the game.
Returns:
services (dict): A dict of available services.
"""
from evennia.server.sessionhandler import SESSIONS
if hasattr(SESSIONS, "server") and hasattr(SESSIONS.server, "services"):
server = SESSIONS.server.services.namedServices
else:
# This function must be called from inside the evennia process.
server = {}
del SESSIONS
return server
def uses_database(name="sqlite3"):
"""
Checks if the game is currently using a given database. This is a
shortcut to having to use the full backend name.
Args:
name (str): One of 'sqlite3', 'mysql', 'postgresql_psycopg2'
or 'oracle'.
Returns:
uses (bool): If the given database is used or not.
"""
try:
engine = settings.DATABASES["default"]["ENGINE"]
except KeyError:
engine = settings.DATABASE_ENGINE
return engine == "django.db.backends.%s" % name
def delay(delay=2, callback=None, retval=None):
"""
Delay the return of a value.
Args:
delay (int or float): The delay in seconds
callback (callable, optional): Will be called without arguments
or with `retval` after delay seconds.
retval (any, optional): Whis will be returned by this function
after a delay, or as input to callback.
Returns:
deferred (deferred): Will fire fire with callback after
`delay` seconds. Note that if `delay()` is used in the
commandhandler callback chain, the callback chain can be
defined directly in the command body and don't need to be
specified here.
"""
callb = callback or defer.Deferred().callback
if retval is not None:
return reactor.callLater(delay, callb, retval)
else:
return reactor.callLater(delay, callb)
_TYPECLASSMODELS = None
_OBJECTMODELS = None
def clean_object_caches(obj):
"""
Clean all object caches on the given object.
Args:
obj (Object instace): An object whose caches to clean.
Notes:
This is only the contents cache these days.
"""
global _TYPECLASSMODELS, _OBJECTMODELS
if not _TYPECLASSMODELS:
from evennia.typeclasses import models as _TYPECLASSMODELS
if not obj:
return
# contents cache
try:
_SA(obj, "_contents_cache", None)
except AttributeError:
pass
# on-object property cache
[_DA(obj, cname) for cname in viewkeys(obj.__dict__)
if cname.startswith("_cached_db_")]
try:
hashid = _GA(obj, "hashid")
_TYPECLASSMODELS._ATTRIBUTE_CACHE[hashid] = {}
except AttributeError:
pass
_PPOOL = None
_PCMD = None
_PROC_ERR = "A process has ended with a probable error condition: process ended by signal 9."
def run_async(to_execute, *args, **kwargs):
"""
Runs a function or executes a code snippet asynchronously.
Args:
to_execute (callable): If this is a callable, it will be
executed with *args and non-reserved *kwargs as arguments.
The callable will be executed using ProcPool, or in a thread
if ProcPool is not available.
Kwargs:
at_return (callable): Should point to a callable with one
argument. It will be called with the return value from
to_execute.
at_return_kwargs (dict): This dictionary will be used as
keyword arguments to the at_return callback.
at_err (callable): This will be called with a Failure instance
if there is an error in to_execute.
at_err_kwargs (dict): This dictionary will be used as keyword
arguments to the at_err errback.
Notes:
All other `*args` and `**kwargs` will be passed on to
`to_execute`. Run_async will relay executed code to a thread
or procpool.
Use this function with restrain and only for features/commands
that you know has no influence on the cause-and-effect order of your
game (commands given after the async function might be executed before
it has finished). Accessing the same property from different threads
can lead to unpredicted behaviour if you are not careful (this is called a
"race condition").
Also note that some databases, notably sqlite3, don't support access from
multiple threads simultaneously, so if you do heavy database access from
your `to_execute` under sqlite3 you will probably run very slow or even get
tracebacks.
"""
# handle special reserved input kwargs
callback = kwargs.pop("at_return", None)
errback = kwargs.pop("at_err", None)
callback_kwargs = kwargs.pop("at_return_kwargs", {})
errback_kwargs = kwargs.pop("at_err_kwargs", {})
if callable(to_execute):
# no process pool available, fall back to old deferToThread mechanism.
deferred = threads.deferToThread(to_execute, *args, **kwargs)
else:
# no appropriate input for this server setup
raise RuntimeError("'%s' could not be handled by run_async" % to_execute)
# attach callbacks
if callback:
deferred.addCallback(callback, **callback_kwargs)
deferred.addErrback(errback, **errback_kwargs)
def check_evennia_dependencies():
"""
Checks the versions of Evennia's dependencies including making
some checks for runtime libraries.
Returns:
result (bool): `False` if a show-stopping version mismatch is
found.
"""
# check main dependencies
from evennia.server.evennia_launcher import check_main_evennia_dependencies
not_error = check_main_evennia_dependencies()
errstring = ""
# South is no longer used ...
if 'south' in settings.INSTALLED_APPS:
errstring += "\n ERROR: 'south' found in settings.INSTALLED_APPS. " \
"\n South is no longer used. If this was added manually, remove it."
not_error = False
# IRC support
if settings.IRC_ENABLED:
try:
import twisted.words
twisted.words # set to avoid debug info about not-used import
except ImportError:
errstring += "\n ERROR: IRC is enabled, but twisted.words is not installed. Please install it." \
"\n Linux Debian/Ubuntu users should install package 'python-twisted-words', others" \
"\n can get it from http://twistedmatrix.com/trac/wiki/TwistedWords."
not_error = False
errstring = errstring.strip()
if errstring:
mlen = max(len(line) for line in errstring.split("\n"))
logger.log_err("%s\n%s\n%s" % ("-"*mlen, errstring, '-'*mlen))
return not_error
def has_parent(basepath, obj):
"""
Checks if `basepath` is somewhere in `obj`s parent tree.
Args:
basepath (str): Python dotpath to compare against obj path.
obj (any): Object whose path is to be checked.
Returns:
has_parent (bool): If the check was successful or not.
"""
try:
return any(cls for cls in obj.__class__.mro()
if basepath == "%s.%s" % (cls.__module__, cls.__name__))
except (TypeError, AttributeError):
# this can occur if we tried to store a class object, not an
# instance. Not sure if one should defend against this.
return False
def mod_import(module):
"""
A generic Python module loader.
Args:
module (str, module): This can be either a Python path
(dot-notation like `evennia.objects.models`), an absolute path
(e.g. `/home/eve/evennia/evennia/objects.models.py`) or an
already imported module object (e.g. `models`)
Returns:
module (module or None): An imported module. If the input argument was
already a module, this is returned as-is, otherwise the path is
parsed and imported. Returns `None` and logs error if import failed.
"""
if not module:
return None
if isinstance(module, types.ModuleType):
# if this is already a module, we are done
mod = module
else:
# first try to import as a python path
try:
mod = __import__(module, fromlist=["None"])
except ImportError as ex:
# check just where the ImportError happened (it could have been
# an erroneous import inside the module as well). This is the
# trivial way to do it ...
if str(ex) != "Import by filename is not supported.":
raise
# error in this module. Try absolute path import instead
if not os.path.isabs(module):
module = os.path.abspath(module)
path, filename = module.rsplit(os.path.sep, 1)
modname = re.sub(r"\.py$", "", filename)
try:
result = imp.find_module(modname, [path])
except ImportError:
logger.log_trace("Could not find module '%s' (%s.py) at path '%s'" % (modname, modname, path))
return
try:
mod = imp.load_module(modname, *result)
except ImportError:
logger.log_trace("Could not find or import module %s at path '%s'" % (modname, path))
mod = None
# we have to close the file handle manually
result[0].close()
return mod
def all_from_module(module):
"""
Return all global-level variables defined in a module.
Args:
module (str, module): This can be either a Python path
(dot-notation like `evennia.objects.models`), an absolute path
(e.g. `/home/eve/evennia/evennia/objects.models.py`) or an
already imported module object (e.g. `models`)
Returns:
variables (dict): A dict of {variablename: variable} for all
variables in the given module.
Notes:
Ignores modules and variable names starting with an underscore.
"""
mod = mod_import(module)
if not mod:
return {}
# make sure to only return variables actually defined in this
# module if available (try to avoid not imports)
members = getmembers(mod, predicate=lambda obj: getmodule(obj) in (mod, None))
return dict((key, val) for key, val in members if not key.startswith("_"))
#return dict((key, val) for key, val in mod.__dict__.items()
# if not (key.startswith("_") or ismodule(val)))
def callables_from_module(module):
"""
Return all global-level callables defined in a module.
Args:
module (str, module): A python-path to a module or an actual
module object.
Returns:
callables (dict): A dict of {name: callable, ...} from the module.
Notes:
Will ignore callables whose names start with underscore "_".
"""
mod = mod_import(module)
if not mod:
return {}
# make sure to only return callables actually defined in this module (not imports)
members = getmembers(mod, predicate=lambda obj: callable(obj) and getmodule(obj) == mod)
return dict((key, val) for key, val in members if not key.startswith("_"))
def variable_from_module(module, variable=None, default=None):
"""
Retrieve a variable or list of variables from a module. The
variable(s) must be defined globally in the module. If no variable
is given (or a list entry is `None`), all global variables are
extracted from the module.
Args:
module (string or module): Python path, absolute path or a module.
variable (string or iterable, optional): Single variable name or iterable
of variable names to extract. If not given, all variables in
the module will be returned.
default (string, optional): Default value to use if a variable fails to
be extracted. Ignored if `variable` is not given.
Returns:
variables (value or list): A single value or a list of values
depending on if `variable` is given or not. Errors in lists
are replaced by the `default` argument.
"""
if not module:
return default
mod = mod_import(module)
if variable:
result = []
for var in make_iter(variable):
if var:
# try to pick a named variable
result.append(mod.__dict__.get(var, default))
else:
# get all
result = [val for key, val in mod.__dict__.items()
if not (key.startswith("_") or ismodule(val))]
if len(result) == 1:
return result[0]
return result
def string_from_module(module, variable=None, default=None):
"""
This is a wrapper for `variable_from_module` that requires return
value to be a string to pass. It's primarily used by login screen.
Args:
module (string or module): Python path, absolute path or a module.
variable (string or iterable, optional): Single variable name or iterable
of variable names to extract. If not given, all variables in
the module will be returned.
default (string, optional): Default value to use if a variable fails to
be extracted. Ignored if `variable` is not given.
Returns:
variables (value or list): A single (string) value or a list of values
depending on if `variable` is given or not. Errors in lists (such
as the value not being a string) are replaced by the `default` argument.
"""
val = variable_from_module(module, variable=variable, default=default)
if val:
if variable:
return val
else:
result = [v for v in make_iter(val) if isinstance(v, basestring)]
return result if result else default
return default
def random_string_from_module(module):
"""
Returns a random global string from a module.
Args:
module (string or module): Python path, absolute path or a module.
Returns:
random (string): A random stribg variable from `module`.
"""
return random.choice(string_from_module(module))
def fuzzy_import_from_module(path, variable, default=None, defaultpaths=None):
"""
Import a variable based on a fuzzy path. First the literal
`path` will be tried, then all given `defaultpaths` will be
prepended to see a match is found.
Args:
path (str): Full or partial python path.
variable (str): Name of variable to import from module.
default (string, optional): Default value to use if a variable fails to
be extracted. Ignored if `variable` is not given.
defaultpaths (iterable, options): Python paths to attempt in order if
importing directly from `path` doesn't work.
Returns:
value (any): The variable imported from the module, or `default`, if
not found.
"""
paths = [path] + make_iter(defaultpaths)
for modpath in paths:
try:
mod = import_module(path)
except ImportError as ex:
if not str(ex).startswith ("No module named %s" % path):
# this means the module was found but it
# triggers an ImportError on import.
raise ex
return getattr(mod, variable, default)
return default
def class_from_module(path, defaultpaths=None):
"""
Return a class from a module, given the module's path. This is
primarily used to convert db_typeclass_path:s to classes.
Args:
path (str): Full Python dot-path to module.
defaultpaths (iterable, optional): If a direc import from `path` fails,
try subsequent imports by prepending those paths to `path`.
Returns:
class (Class): An uninstatiated class recovered from path.
Raises:
ImportError: If all loading failed.
"""
cls = None
if defaultpaths:
paths = [path] + ["%s.%s" % (dpath, path) for dpath in make_iter(defaultpaths)] if defaultpaths else []
else:
paths = [path]
for testpath in paths:
if "." in path:
testpath, clsname = testpath.rsplit(".", 1)
else:
raise ImportError("the path '%s' is not on the form modulepath.Classname." % path)
try:
mod = import_module(testpath, package="evennia")
except ImportError:
if len(trace()) > 2:
# this means the error happened within the called module and
# we must not hide it.
exc = sys.exc_info()
raise_(exc[1], None, exc[2])
else:
# otherwise, try the next suggested path
continue
try:
cls = getattr(mod, clsname)
break
except AttributeError:
if len(trace()) > 2:
# AttributeError within the module, don't hide it
exc = sys.exc_info()
raise_(exc[1], None, exc[2])
if not cls:
err = "Could not load typeclass '%s'" % path
if defaultpaths:
err += "\nPaths searched:\n %s" % "\n ".join(paths)
else:
err += "."
raise ImportError(err)
return cls
# alias
object_from_module = class_from_module
def init_new_player(player):
"""
Deprecated.
"""
from evennia.utils import logger
logger.log_dep("evennia.utils.utils.init_new_player is DEPRECATED and should not be used.")
def string_similarity(string1, string2):
"""
This implements a "cosine-similarity" algorithm as described for example in
*Proceedings of the 22nd International Conference on Computation
Linguistics* (Coling 2008), pages 593-600, Manchester, August 2008.
The measure-vectors used is simply a "bag of words" type histogram
(but for letters).
Args:
string1 (str): String to compare (may contain any number of words).
string2 (str): Second string to compare (any number of words).
Returns:
similarity (float): A value 0...1 rating how similar the two
strings are.
"""
vocabulary = set(list(string1 + string2))
vec1 = [string1.count(v) for v in vocabulary]
vec2 = [string2.count(v) for v in vocabulary]
try:
return float(sum(vec1[i] * vec2[i] for i in range(len(vocabulary)))) / \
(math.sqrt(sum(v1**2 for v1 in vec1)) * math.sqrt(sum(v2**2 for v2 in vec2)))
except ZeroDivisionError:
# can happen if empty-string cmdnames appear for some reason.
# This is a no-match.
return 0
def string_suggestions(string, vocabulary, cutoff=0.6, maxnum=3):
"""
Given a `string` and a `vocabulary`, return a match or a list of
suggestions based on string similarity.
Args:
string (str): A string to search for.
vocabulary (iterable): A list of available strings.
cutoff (int, 0-1): Limit the similarity matches (the higher
the value, the more exact a match is required).
maxnum (int): Maximum number of suggestions to return.
Returns:
suggestions (list): Suggestions from `vocabulary` with a
similarity-rating that higher than or equal to `cutoff`.
Could be empty if there are no matches.
"""
return [tup[1] for tup in sorted([(string_similarity(string, sugg), sugg)
for sugg in vocabulary],
key=lambda tup: tup[0], reverse=True)
if tup[0] >= cutoff][:maxnum]
def string_partial_matching(alternatives, inp, ret_index=True):
"""
Partially matches a string based on a list of `alternatives`.
Matching is made from the start of each subword in each
alternative. Case is not important. So e.g. "bi sh sw" or just
"big" or "shiny" or "sw" will match "Big shiny sword". Scoring is
done to allow to separate by most common demoninator. You will get
multiple matches returned if appropriate.
Args:
alternatives (list of str): A list of possible strings to
match.
inp (str): Search criterion.
ret_index (bool, optional): Return list of indices (from alternatives
array) instead of strings.
Returns:
matches (list): String-matches or indices if `ret_index` is `True`.
"""
if not alternatives or not inp:
return []
matches = defaultdict(list)
inp_words = inp.lower().split()
for altindex, alt in enumerate(alternatives):
alt_words = alt.lower().split()
last_index = 0
score = 0
for inp_word in inp_words:
# loop over parts, making sure only to visit each part once
# (this will invalidate input in the wrong word order)
submatch = [last_index + alt_num for alt_num, alt_word
in enumerate(alt_words[last_index:])
if alt_word.startswith(inp_word)]
if submatch:
last_index = min(submatch) + 1
score += 1
else:
score = 0
break
if score:
if ret_index:
matches[score].append(altindex)
else:
matches[score].append(alt)
if matches:
return matches[max(matches)]
return []
def format_table(table, extra_space=1):
"""
Note: `evennia.utils.evtable` is more powerful than this, but this
function can be useful when the number of columns and rows are
unknown and must be calculated on the fly.
Args.
table (list): A list of lists to represent columns in the
table: `[[val,val,val,...], [val,val,val,...], ...]`, where
each val will be placed on a separate row in the
column. All columns must have the same number of rows (some
positions may be empty though).
extra_space (int, optional): Sets how much *minimum* extra
padding (in characters) should be left between columns.
Returns:
table (list): A list of lists representing the rows to print
out one by one.
Notes:
The function formats the columns to be as wide as the widest member
of each column.
Examples:
```python
for ir, row in enumarate(ftable):
if ir == 0:
# make first row white
string += "\n{w" + ""join(row) + "{n"
else:
string += "\n" + "".join(row)
print string
```
"""
if not table:
return [[]]
max_widths = [max([len(str(val)) for val in col]) for col in table]
ftable = []
for irow in range(len(table[0])):
ftable.append([str(col[irow]).ljust(max_widths[icol]) + " " * extra_space
for icol, col in enumerate(table)])
return ftable
def get_evennia_pids():
"""
Get the currently valid PIDs (Process IDs) of the Portal and
Server by trying to access a PID file.
Returns:
server, portal (tuple): The PIDs of the respective processes,
or two `None` values if not found.
Examples:
This can be used to determine if we are in a subprocess by
something like:
```python
self_pid = os.getpid()
server_pid, portal_pid = get_evennia_pids()
is_subprocess = self_pid not in (server_pid, portal_pid)
```
"""
server_pidfile = os.path.join(settings.GAME_DIR, 'server.pid')
portal_pidfile = os.path.join(settings.GAME_DIR, 'portal.pid')
server_pid, portal_pid = None, None
if os.path.exists(server_pidfile):
f = open(server_pidfile, 'r')
server_pid = f.read()
f.close()
if os.path.exists(portal_pidfile):
f = open(portal_pidfile, 'r')
portal_pid = f.read()
f.close()
if server_pid and portal_pid:
return int(server_pid), int(portal_pid)
return None, None
from gc import get_referents
from sys import getsizeof
def deepsize(obj, max_depth=4):
"""
Get not only size of the given object, but also the size of
objects referenced by the object, down to `max_depth` distance
from the object.
Args:
obj (object): the object to be measured.
max_depth (int, optional): maximum referential distance
from `obj` that `deepsize()` should cover for
measuring objects referenced by `obj`.
Returns:
size (int): deepsize of `obj` in Bytes.
Notes:
This measure is necessarily approximate since some
memory is shared between objects. The `max_depth` of 4 is roughly
tested to give reasonable size information about database models
and their handlers.
"""
def _recurse(o, dct, depth):
if max_depth >= 0 and depth > max_depth:
return
for ref in get_referents(o):
idr = id(ref)
if not idr in dct:
dct[idr] = (ref, getsizeof(ref, default=0))
_recurse(ref, dct, depth+1)
sizedict = {}
_recurse(obj, sizedict, 0)
#count = len(sizedict) + 1
size = getsizeof(obj) + sum([p[1] for p in sizedict.values()])
return size
# lazy load handler
_missing = object()
class lazy_property(object):
"""
Delays loading of property until first access. Credit goes to the
Implementation in the werkzeug suite:
http://werkzeug.pocoo.org/docs/utils/#werkzeug.utils.cached_property
This should be used as a decorator in a class and in Evennia is
mainly used to lazy-load handlers:
```python
@lazy_property
def attributes(self):
return AttributeHandler(self)
```
Once initialized, the `AttributeHandler` will be available as a
property "attributes" on the object.
"""
def __init__(self, func, name=None, doc=None):
"Store all properties for now"
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
"Triggers initialization"
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
_STRIP_ANSI = None
_RE_CONTROL_CHAR = re.compile('[%s]' % re.escape(''.join([unichr(c) for c in range(0,32)])))# + range(127,160)])))
def strip_control_sequences(string):
"""
Remove non-print text sequences.
Args:
string (str): Text to strip.
Returns.
text (str): Stripped text.
"""
global _STRIP_ANSI
if not _STRIP_ANSI:
from evennia.utils.ansi import strip_raw_ansi as _STRIP_ANSI
return _RE_CONTROL_CHAR.sub('', _STRIP_ANSI(string))
def calledby(callerdepth=1):
"""
Only to be used for debug purposes. Insert this debug function in
another function; it will print which function called it.
Args:
callerdepth (int): Must be larger than 0. When > 1, it will
print the caller of the caller etc.
Returns:
calledby (str): A debug string detailing which routine called
us.
"""
import inspect, os
stack = inspect.stack()
# we must step one extra level back in stack since we don't want
# to include the call of this function itself.
callerdepth = min(max(2, callerdepth + 1), len(stack)-1)
frame = inspect.stack()[callerdepth]
path = os.path.sep.join(frame[1].rsplit(os.path.sep, 2)[-2:])
return "[called by '%s': %s:%s %s]" % (frame[3], path, frame[2], frame[4])
def m_len(target):
"""
Provides length checking for strings with MXP patterns, and falls
back to normal len for other objects.
Args:
target (string): A string with potential MXP components
to search.
Returns:
length (int): The length of `target`, ignoring MXP components.
"""
# Would create circular import if in module root.
from evennia.utils.ansi import ANSI_PARSER
if inherits_from(target, basestring):
return len(ANSI_PARSER.strip_mxp(target))
return len(target)
#------------------------------------------------------------------
# Search handler function
#------------------------------------------------------------------
#
# Replace this hook function by changing settings.SEARCH_AT_RESULT.
#
def at_search_result(matches, caller, query="", quiet=False, **kwargs):
"""
This is a generic hook for handling all processing of a search
result, including error reporting.
Args:
matches (list): This is a list of 0, 1 or more typeclass instances,
the matched result of the search. If 0, a nomatch error should
be echoed, and if >1, multimatch errors should be given. Only
if a single match should the result pass through.
caller (Object): The object performing the search and/or which should
receive error messages.
query (str, optional): The search query used to produce `matches`.
quiet (bool, optional): If `True`, no messages will be echoed to caller
on errors.
Kwargs:
nofound_string (str): Replacement string to echo on a notfound error.
multimatch_string (str): Replacement string to echo on a multimatch error.
Returns:
processed_result (Object or None): This is always a single result
or `None`. If `None`, any error reporting/handling should
already have happened.
"""
error = ""
if not matches:
# no results.
error = kwargs.get("nofound_string") or _("Could not find '%s'." % query)
matches = None
elif len(matches) > 1:
error = kwargs.get("multimatch_string") or \
_("More than one match for '%s' (please narrow target):" % query)
for num, result in enumerate(matches):
aliases = result.aliases.all()
error += "\n %i%s%s%s%s" % (
num + 1, _MULTIMATCH_SEPARATOR,
result.get_display_name(caller) if hasattr(result, "get_display_name") else query,
" [%s]" % ";".join(aliases) if aliases else "",
result.get_extra_info(caller))
matches = None
else:
# exactly one match
matches = matches[0]
if error and not quiet:
caller.msg(error.strip())
return matches
class LimitedSizeOrderedDict(OrderedDict):
"""
This dictionary subclass is both ordered and limited to a maximum
number of elements. Its main use is to hold a cache that can never
grow out of bounds.
"""
def __init__(self, *args, **kwargs):
"""
Limited-size ordered dict.
Kwargs:
size_limit (int): Use this to limit the number of elements
alloweds to be in this list. By default the overshooting elements
will be removed in FIFO order.
fifo (bool, optional): Defaults to `True`. Remove overshooting elements
in FIFO order. If `False`, remove in FILO order.
"""
super(LimitedSizeOrderedDict, self).__init__()
self.size_limit = kwargs.get("size_limit", None)
self.filo = not kwargs.get("fifo", True) # FIFO inverse of FILO
self._check_size()
def _check_size(self):
filo = self.filo
if self.size_limit is not None:
while self.size_limit < len(self):
self.popitem(last=filo)
def __setitem__(self, key, value):
super(LimitedSizeOrderedDict, self).__setitem__(key, value)
self._check_size()
def update(self, *args, **kwargs):
super(LimitedSizeOrderedDict, self).update(*args, **kwargs)
self._check_size()
def get_game_dir_path():
"""
This is called by settings_default in order to determine the path
of the game directory.
Returns:
path (str): Full OS path to the game dir
"""
# current working directory, assumed to be somewhere inside gamedir.
for i in range(10):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(os.path.join("server", "conf", "settings.py")):
return gpath
else:
os.chdir(os.pardir)
raise RuntimeError("server/conf/settings.py not found: Must start from inside game dir.")
|
ergodicbreak/evennia
|
evennia/utils/utils.py
|
Python
|
bsd-3-clause
| 52,956
|
[
"VisIt"
] |
27e9c62a9d1623ee4fe459e9e9a623fbd5c49ab1f50a41eab83f93f2ebac6953
|
from ecell4.reaction_reader.decorator2 import species_attributes, reaction_rules
from ecell4.reaction_reader.network import generate_reactions
k1 = 0.1
k2 = 0.2
k3 = 0.3
k4 = 0.4
@species_attributes
def attributegen():
S(e,y=zero) | 1
kinase(s) | 2
pptase(s) | 3
ATP() | 4
ADP() | 5
@reaction_rules
def rulegen():
# binding rules
S(e) + kinase(s) == S(e^1).kinase(s^1) | (1,2)
S(e) + pptase(s) == S(e^1).pptase(s^1) | (3,4)
# catalysis
S(e^1,y=zero).kinase(s^1) + ATP == S(e^1,y=P).kinase(s^1) + ADP | (5,6)
S(e^1,y=P).pptase(s^1) == S(e^1,y=zero).pptase(s^1) | (7,8)
if __name__ == "__main__":
newseeds = []
for i, (sp, attr) in enumerate(attributegen()):
print i, sp, attr
newseeds.append(sp)
print ''
rules = rulegen()
for i, rr in enumerate(rules):
print i, rr
print ''
generate_reactions(newseeds, rules)
## Catalysis in energy BNG
## justin.s.hogg@gmail.com, 9 Apr 2013
#
## requires BioNetGen version >= 2.2.4
#version("2.2.4")
## Quantities have units in moles, so set this to Avogadro's Number
#setOption("NumberPerQuantityUnit",6.0221e23)
#
#begin model
#begin parameters
# # fundamental constants
# RT 2.577 # kJ/mol
# NA 6.022e23 # /mol
# # simulation volume, L
# volC 1e-12
# # initial concentrations, mol/L
# conc_S_0 1e-6
# conc_kinase_0 10e-9
# conc_pptase_0 10e-9
# conc_ATP_0 1.0e-3
# conc_ADP_0 0.1e-3
# # standard free energy of formation, kJ/mol
# Gf_Sp 51.1
# Gf_S_kinase -41.5
# Gf_S_pptase -41.5
# Gf_ATP 51.1
# # baseline activation energy, kJ/mol
# Ea0_S_kinase -7.7
# Ea0_S_pptase -7.7
# Ea0_cat_kinase -11.9
# Ea0_cat_pptase 11.9
# # rate distribution parameter, no units
# phi 0.5
#end parameters
#begin compartments
# # generic compartment
# C 3 volC
#end compartments
#begin molecule types
# S(e,y~0~P) # substrate with enzyme binding domain and site of phosphorylation
# kinase(s) # kinase enzyme
# pptase(s) # phosphotase enzyme
# ATP()
# ADP()
#end molecule types
#begin species
# S(e,y~0)@C conc_S_0*NA*volC
# kinase(s)@C conc_kinase_0*NA*volC
# pptase(s)@C conc_pptase_0*NA*volC
# $ATP()@C conc_ATP_0*NA*volC # ATP concentration held constant
# $ADP()@C conc_ADP_0*NA*volC # ADP concentration held constant
#end species
#begin reaction rules
# # binding rules
# S(e) + kinase(s) <-> S(e!1).kinase(s!1) Arrhenius(phi,Ea0_S_kinase)
# S(e) + pptase(s) <-> S(e!1).pptase(s!1) Arrhenius(phi,Ea0_S_pptase)
# # catalysis
# S(e!1,y~0).kinase(s!1) + ATP <-> S(e!1,y~P).kinase(s!1) + ADP Arrhenius(phi,Ea0_cat_kinase)
# S(e!1,y~P).pptase(s!1) <-> S(e!1,y~0).pptase(s!1) Arrhenius(phi,Ea0_cat_pptase)
#end reaction rules
#begin energy patterns
# S(y~P) Gf_Sp/RT # phosphorylated subtrate
# S(e!0).kinase(s!0) Gf_S_kinase/RT # substrate-kinase binding
# S(e!0).pptase(s!0) Gf_S_pptase/RT # substrate-pptase binding
# ATP() Gf_ATP/RT # ATP energy (relative to ADP)
#end energy patterns
#begin observables
# Molecules Sp S(y~P)
# Molecules S_kinase S(e!1).kinase(s!1)
# Molecules S_pptase S(e!1).pptase(s!1)
# Molecules Stot S()
# Molecules kinaseTot kinase()
# Molecules pptaseTot pptase()
#end observables
#end model
#
## generate reaction network..
#generate_network({overwrite=>1})
#
## simulate ODE system to steady state..
#simulate({method=>"ode",t_start=>0,t_end=>3600,n_steps=>120,atol=>1e-3,rtol=>1e-7})
#
|
navoj/ecell4
|
python/samples/reaction_reader/catalysis/catalysis.py
|
Python
|
gpl-2.0
| 3,761
|
[
"Avogadro"
] |
dbb09a22f4b9ee0e72b29df542125aab9ed716c124e142805c712d3156512b1f
|
#!/usr/bin/env python2.7
#coding=utf-8
#author@alingse
#2014.11.08
from __future__ import print_function
import argparse
#import rapidjson as json
import json
import sys
log = print
def get_id_from_id_line(id_line):
return id_line.strip()
#return id_line.strip().decode('utf-8')
#return id_line.strip()[-32:]
#return id_line.split('\t',1)[0]
#return id_line.split('\t',1)[1]
#return id_line.strip().split('\t', 1)[1]
#return id_line.split(' ',1)[0]
#return json.loads(id_line).get('item_id')
#return json.loads(id_line).get('unique')
#return json.loads(id_line).get('name').decode('utf-8')
#return json.loads(id_line).get('weibo')
#result = json.loads(id_line)
#return result['feed_id']+'ID'+result['datenow']
#return json.loads(id_line).get('shop_id')
#import re
#weibofinder=re.compile('"weibo": "([^"]+)"').findall
def get_id_from_data_line(data_line):
"""
try:
data=json.loads(data_line)
return data.get("data").get("itemInfoModel").get("itemId")
except:
return None
"""
#result = json.loads(data_line)
#return result['feed_id']+'ID'+result['datenow']
#return result['start']+result['end']+result['key']
#return data_line.strip()
#return data_line.strip()[-32:]
return data_line.split('\t',1)[0]
#return data_line.split('\t',2)[1]
#return data_line.split('\t')[1]
#return data_line.split('\t')[0]
#return data_line.strip().split('\t')[2]
#return data_line.strip().split('\t')[1]
#return data_line.split(' ')[1]
#return data_line.split(' ',1)[0]
#return data_line.split(',',1)[0]
#return data_line.split('\t')[1].strip()
#return weibofinder(data_line[:300])[0]
#return str(json.loads(data_line).get('uin'))
#return str(json.loads(data_line).get('cat_id'))
#return json.loads(data_line).get('shop_id')
#return json.loads(data_line).get('id')[-32:]
#return json.loads(data_line).get('unique')
#return json.loads(data_line).get('company_id')[-32:]
#return json.loads(data_line).get('KeyNo')
#return json.loads(data_line).get('category')
#return json.loads(data_line).get('name').decode('utf-8')
#return json.loads(data_line).get('ent_name').decode('utf-8')
#return json.loads(data_line).get('id')
#return json.loads(data_line).get('wb')
#return json.loads(data_line).get('weibo')
#return str(json.loads(data_line).get('item_id'))
#return json.loads(data_line).get('seller_id')
#return json.loads(data_line).get('item_id')
#return json.loads(data_line).get('nid')
#return json.loads(data_line).get('feed_id')
#return json.loads(data_line).get('item_info',{}).get('item_id')
#return json.loads(data_line).get('item_info',{}).get('category_id')
#return json.loads(data_line).get('guid')
#return json.loads(data_line).get('uid')
#return str(json.loads(data_line).get('brandId'))
def load_id_dict(idfd):
id_dict = {}
for id_line in idfd:
if id_line == "\n":
continue
_id = get_id_from_id_line(id_line)
if _id != None:
id_dict[_id] = 0
return id_dict
def filter_data(id_dict, id_ct, datafd, outfd, multi=False):
hit = 0
ct = 0
for data_line in datafd:
ct += 1
if data_line == '\n':
continue
_id = get_id_from_data_line(data_line)
if _id == None:
continue
if _id not in id_dict:
continue
#multi data share one id
if multi == False:
if id_dict[_id] == 1:
continue
if id_dict[_id] == 0:
id_dict[_id] = 1
hit += 1
outfd.write(data_line)
if hit == id_ct:
break
#for log
if ct % 1000 == 0:
log('id:{},visit data:{} hit:{}'.format(id_ct, data_ct, hit))
log('id:{} hit:{} notin:{}'.format(id_ct, hit, (id_ct - hit)))
return hit
def dump_notin(idfd, id_dict, notinfd):
for id_line in idfd:
if id_line == "\n":
continue
_id = get_id_from_id_line(id_line)
if _id != None:
if id_dict[_id] == 0:
notinfd.write(id_line)
def main(idf, dataf, outf, notinf, multi=False):
idfd = open(idf, 'r')
id_dict = load_id_dict(idfd)
idfd.close()
id_ct = len(id_dict)
log("got id count: {}".format(id_ct))
datafd = open(dataf, 'r')
outfd = open(outf, 'w')
hit = filter_data(id_dict, id_ct, datafd, outfd, multi=multi)
datafd.close()
outfd.close()
if hit == id_ct:
return True
if notinf == None:
return None
log('dump the notin id file')
idfd = open(idf, 'r')
notinfd = open(notinf, 'w')
dump_notin(idfd, id_dict, notinfd)
idfd.close()
notinfd.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('idf', help='id file')
parser.add_argument('dataf', help='data file')
parser.add_argument('outf', help='filter data file')
parser.add_argument('notinf', nargs='?', help='notin id file')
parser.add_argument('-m',
'--multi',
action='store_true',
help='multi data share one id')
args = parser.parse_args()
log(args)
idf = args.idf
dataf = args.dataf
outf = args.outf
notinf = args.notinf
multi = args.multi
main(idf, dataf, outf, notinf, multi=multi)
|
alingse/crawler-common
|
dodata/filter_data_by_id.py
|
Python
|
apache-2.0
| 5,515
|
[
"VisIt"
] |
d2d6e41b319068a9d882e6d453d561eb467fb81ce88f59c7d21e70737ca31fe8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes the ternary conditional operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import templates
class IfExp(converter.Base):
"""Canonicalizes all IfExp nodes into plain conditionals."""
def visit_IfExp(self, node):
template = """
ag__.utils.run_cond(test, lambda: (body,), lambda: (orelse,))
"""
desugared_ifexp = templates.replace_as_expression(
template, test=node.test, body=node.body, orelse=node.orelse)
return desugared_ifexp
def transform(node, ctx):
"""Desugar IfExp nodes into plain conditionals.
Args:
node: ast.AST, the node to transform
ctx: converter.EntityContext
Returns:
new_node: an AST with no IfExp nodes, only conditionals.
"""
node = IfExp(ctx).visit(node)
return node
|
lukeiwanski/tensorflow
|
tensorflow/contrib/autograph/converters/ifexp.py
|
Python
|
apache-2.0
| 1,631
|
[
"VisIt"
] |
38358565595485dbb146ca00c736e4d58162220c7be52dc1905706eb1382ada1
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
import numpy as np
from MDAnalysisTests.datafiles import waterPSF, waterDCD
from MDAnalysis.analysis.lineardensity import LinearDensity
from numpy.testing import TestCase, assert_allclose
class TestLinearDensity(TestCase):
def setUp(self):
self.universe = mda.Universe(waterPSF, waterDCD)
self.sel_string = 'all'
self.selection = self.universe.select_atoms(self.sel_string)
self.xpos = np.array([0., 0., 0., 0.0072334, 0.00473299, 0.,
0., 0., 0., 0.])
def test_serial(self):
ld = LinearDensity(self.selection, binsize=5).run()
assert_allclose(self.xpos, ld.results['x']['pos'], rtol=1e-6, atol=0)
# def test_parallel(self):
# ld = LinearDensity(self.universe, self.selection, binsize=5)
# ld.run(parallel=True)
# assert_equal(self.xpos, ld.results['x']['pos'])
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_lineardensity.py
|
Python
|
gpl-2.0
| 1,939
|
[
"MDAnalysis"
] |
2a9c3b76216343c188d2f4327baf774d6323d7cf403ca63f79aca3c1f116c589
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import os
import re
import json
import warnings
from io import open
from enum import Enum
from pymatgen.core.units import Mass, Length, unitized, FloatWithUnit, Unit, \
SUPPORTED_UNIT_NAMES
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
"""
Module contains classes presenting Element and Specie (Element + oxidation
state) and PeriodicTable.
"""
__author__ = "Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
# Loads element data from json file
with open(os.path.join(os.path.dirname(__file__),
"periodic_table.json"), "rt") as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
data_add = {"Og": {"Electrical resistivity": "no data",
"Electronic structure": "[Rn].5f14.6d10.7s2.7p6",
"Oxidation states": [0],
"Reflectivity": "no data",
"Molar volume": "no data",
"Van der waals radius": "no data",
"Melting point": "no data",
"Bulk modulus": "no data",
"Mineral hardness": "no data",
"Mendeleev no": 294,
"Atomic no": 118,
"Boiling point": "no data",
"Common oxidation states": [0],
"Brinell hardness": "no data",
"Density of solid": "no data",
"Thermal conductivity": "no data",
"Critical temperature": "no data K",
"Name": "Oganesson",
"Atomic mass": '294',
"Coefficient of linear thermal expansion": "6.4 x10<sup>-6</sup>K<sup>-1</sup>",
"Rigidity modulus": "173 GPa",
"Youngs modulus": "447 GPa",
"Velocity of sound": "5970 m s<sup>-1</sup>",
"X": 2.2,
"Liquid range": "1816 K",
"Atomic radius calculated": 1.78,
"Vickers hardness": "no data MN m<sup>-2</sup>",
"Superconduction temperature": "0.49 K",
"Refractive index": "no data",
"Ionic radii": {"8": 0.5, "3": 0.82, "4": 0.76, "5": 0.705, "7": 0.52},
"Atomic radius": 1.3}}
_pt_data.update(data_add)
class Element(Enum):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: atomic_orbitals
Atomic Orbitals. Energy of the atomic orbitals as a dict.
E.g., The orbitals energies in eV are represented as
{'1s': -1.0, '2s': -0.1}
Data is obtained from
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
The LDA values for neutral atoms are used
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
# This name = value convention is redundant and dumb, but unfortunately is
# necessary to preserve backwards compatibility with a time when Element is
# a regular object that is constructed with Element(symbol).
H = "H"
He = "He"
Li = "Li"
Be = "Be"
B = "B"
C = "C"
N = "N"
O = "O"
F = "F"
Ne = "Ne"
Na = "Na"
Mg = "Mg"
Al = "Al"
Si = "Si"
P = "P"
S = "S"
Cl = "Cl"
Ar = "Ar"
K = "K"
Ca = "Ca"
Sc = "Sc"
Ti = "Ti"
V = "V"
Cr = "Cr"
Mn = "Mn"
Fe = "Fe"
Co = "Co"
Ni = "Ni"
Cu = "Cu"
Zn = "Zn"
Ga = "Ga"
Ge = "Ge"
As = "As"
Se = "Se"
Br = "Br"
Kr = "Kr"
Rb = "Rb"
Sr = "Sr"
Y = "Y"
Zr = "Zr"
Nb = "Nb"
Mo = "Mo"
Tc = "Tc"
Ru = "Ru"
Rh = "Rh"
Pd = "Pd"
Ag = "Ag"
Cd = "Cd"
In = "In"
Sn = "Sn"
Sb = "Sb"
Te = "Te"
I = "I"
Xe = "Xe"
Cs = "Cs"
Ba = "Ba"
La = "La"
Ce = "Ce"
Pr = "Pr"
Nd = "Nd"
Pm = "Pm"
Sm = "Sm"
Eu = "Eu"
Gd = "Gd"
Tb = "Tb"
Dy = "Dy"
Ho = "Ho"
Er = "Er"
Tm = "Tm"
Yb = "Yb"
Lu = "Lu"
Hf = "Hf"
Ta = "Ta"
W = "W"
Re = "Re"
Os = "Os"
Ir = "Ir"
Pt = "Pt"
Au = "Au"
Hg = "Hg"
Tl = "Tl"
Pb = "Pb"
Bi = "Bi"
Po = "Po"
At = "At"
Rn = "Rn"
Fr = "Fr"
Ra = "Ra"
Ac = "Ac"
Th = "Th"
Pa = "Pa"
U = "U"
Np = "Np"
Pu = "Pu"
Am = "Am"
Cm = "Cm"
Bk = "Bk"
Cf = "Cf"
Es = "Es"
Fm = "Fm"
Md = "Md"
No = "No"
Lr = "Lr"
Og = "Og"
def __init__(self, symbol):
self.symbol = "%s" % symbol
d = _pt_data[symbol]
# Store key variables for quick access
self.Z = d["Atomic no"]
at_r = d.get("Atomic radius", "no data")
if str(at_r).startswith("no data"):
self.atomic_radius = None
else:
self.atomic_radius = Length(at_r, "ang")
self.atomic_mass = Mass(d["Atomic mass"], "amu")
self._data = d
@property
def X(self):
if "X" in self._data:
return self._data["X"]
else:
warnings.warn("No electronegativity for %s. Setting to infinity. "
"This has no physical meaning, and is mainly done to "
"avoid errors caused by the code expecting a float."
% self.symbol)
return float("inf")
def __getattr__(self, item):
if item in ["mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius", "atomic_orbitals",
"coefficient_of_linear_thermal_expansion"]:
kstr = item.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
elif type(val) == dict:
pass
else:
try:
val = float(val)
except ValueError:
nobracket = re.sub(r'\(.*\)', "", val)
toks = nobracket.replace("about", "").strip().split(" ", 1)
if len(toks) == 2:
try:
if "10<sup>" in toks[1]:
base_power = re.findall(r'([+-]?\d+)', toks[1])
factor = "e" + base_power[1]
if toks[0] in [">", "high"]:
toks[0] = "1" # return the border value
toks[0] += factor
if item == "electrical_resistivity":
unit = "ohm m"
elif (
item ==
"coefficient_of_linear_thermal_expansion"
):
unit = "K^-1"
else:
unit = toks[1]
val = FloatWithUnit(toks[0], unit)
else:
unit = toks[1].replace("<sup>", "^").replace(
"</sup>", "").replace("Ω",
"ohm")
units = Unit(unit)
if set(units.keys()).issubset(
SUPPORTED_UNIT_NAMES):
val = FloatWithUnit(toks[0], unit)
except ValueError as ex:
# Ignore error. val will just remain a string.
pass
return val
raise AttributeError
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
@unitized("ang")
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
return sum(radii.values()) / len(radii)
else:
return 0
@property
@unitized("ang")
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): v for k, v in self._data["Ionic radii"].items()}
else:
return {}
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def icsd_oxidation_states(self):
"""Tuple of all oxidation states with at least 10 instances in
ICSD database AND at least 1% of entries for that element"""
return tuple(self._data.get("ICSD oxidation states", list()))
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match(r"(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
def __eq__(self, other):
return isinstance(other, Element) and self.Z == other.Z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.Z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
if self.X != other.X:
return self.X < other.X
else:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row, group):
"""
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
raise ValueError("No element with this row and group!")
@staticmethod
def is_valid_symbol(symbol):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
try:
Element(symbol)
return True
except:
return False
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
elif 89 <= z <= 103:
return 9
for i in range(len(_pt_row_sizes)):
total += _pt_row_sizes[i]
if total >= z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
z = self.Z
if z == 1:
return 1
if z == 2:
return 18
if 3 <= z <= 18:
if (z - 2) % 8 == 0:
return 18
elif (z - 2) % 8 <= 2:
return (z - 2) % 8
else:
return 10 + (z - 2) % 8
if 19 <= z <= 54:
if (z - 18) % 18 == 0:
return 18
else:
return (z - 18) % 18
if (z - 54) % 32 == 0:
return 18
elif (z - 54) % 32 >= 18:
return (z - 54) % 32 - 14
else:
return (z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
block = ""
if (self.is_actinoid or self.is_lanthanoid) and \
self.Z not in [71, 103]:
block = "f"
elif self.is_actinoid or self.is_lanthanoid:
block = "d"
elif self.group in [1, 2]:
block = "s"
elif self.group in range(13, 19):
block = "p"
elif self.group in range(3, 13):
block = "d"
else:
raise ValueError("unable to determine block")
return block
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self.Z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self.Z in ns
@property
def is_rare_earth_metal(self):
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metalloid(self):
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self):
"""
True if element is an alkali metal.
"""
return self.Z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self):
"""
True if element is an alkaline earth metal (group II).
"""
return self.Z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self.Z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self.Z in (8, 16, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self.Z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self.Z < 104
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@staticmethod
def print_periodic_table(filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
class Specie(MSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
cache = {}
def __new__(cls, *args, **kwargs):
key = (cls,) + args + tuple(kwargs.items())
try:
inst = Specie.cache.get(key, None)
except TypeError:
# Can't cache this set of arguments
inst = key = None
if inst is None:
inst = object.__new__(cls)
if key is not None:
Specie.cache[key] = inst
return inst
supported_properties = ("spin",)
def __init__(self, symbol, oxidation_state=None, properties=None):
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doesn't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Equal Specie should have the same str representation, hence
should hash equally. Unequal Specie will have differnt str
representations.
"""
return self.__str__().__hash__()
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state, followed by spin.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
elif self.oxi_state:
other_oxi = 0 if (isinstance(other, Element)
or other.oxi_state is None) else other.oxi_state
return self.oxi_state < other_oxi
elif getattr(self, "spin", False):
other_spin = getattr(other, "spin", 0)
return self.spin < other.spin
else:
return False
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
elif oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9\.]*)([\+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).replace(",","").split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
else:
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_crystal_field_spin(self, coordination="oct", spin_config="high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or \
spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}"
.format(self.oxi_state, self.symbol))
if spin_config == "high":
return nelectrons if nelectrons <= 5 else 10 - nelectrons
elif spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
elif nelectrons <= 6:
return 6 - nelectrons
elif nelectrons <= 8:
return nelectrons - 6
else:
return 10 - nelectrons
elif coordination == "tet":
if nelectrons <= 2:
return nelectrons
elif nelectrons <= 4:
return 4 - nelectrons
elif nelectrons <= 7:
return nelectrons - 4
else:
return 10 - nelectrons
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
class DummySpecie(Specie):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
.. attribute:: symbol
Symbol for the DummySpecie.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number of 0.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self, symbol="X", oxidation_state=0, properties=None):
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __hash__(self):
return self.symbol.__hash__()
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self):
"""
DummySpecie is always assigned an atomic number equal to the hash of
the symbol. The expectation is that someone would be an actual dummy
to use atomic numbers for a Dummy specie.
"""
return self.symbol.__hash__()
@property
def oxi_state(self):
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self):
"""
DummySpecie is always assigned an electronegativity of 0. The effect of
this is that DummySpecie are always sorted in front of actual Specie.
"""
return 0
@property
def symbol(self):
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0
else:
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
@classmethod
def safe_from_composition(cls, comp, oxidation_state=0):
"""
Returns a DummySpecie object that can be safely used
with (i.e. not present in) a given composition
"""
# We don't want to add a DummySpecie with the same
# symbol as anything in the composition, even if the
# oxidation state is different
els = comp.element_composition.elements
for c in 'abcdfghijklmnopqrstuvwxyz':
if DummySpecie('X' + c) not in els:
return DummySpecie('X' + c, oxidation_state)
raise ValueError("All attempted DummySpecies already "
"present in {}".format(comp))
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj))
|
setten/pymatgen
|
pymatgen/core/periodic_table.py
|
Python
|
mit
| 40,961
|
[
"CRYSTAL",
"pymatgen"
] |
15e17d4a11cc28762aea2e9a9976867a889b76917d12cfbac11ce3a6d40e0d7b
|
########################################################################
# $HeadURL $
# File: File.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/08/03 15:02:53
########################################################################
"""
:mod: File
.. module: File
:synopsis: RMS operation file
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
operation file
"""
# for properties
# pylint: disable=E0211,W0612,W0142,E1101,E0102,C0103
__RCSID__ = "$Id $"
# #
# @file File.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/08/03 15:03:03
# @brief Definition of File class.
# # imports
import os
# import urlparse
# # from DIRAC
from DIRAC import S_OK
from DIRAC.RequestManagementSystem.private.Record import Record
from DIRAC.Core.Utilities.File import checkGuid
import datetime
########################################################################
class File( Record ):
"""
.. class:: File
A bag object holding Operation file attributes.
:param Operation _parent: reference to parent Operation
:param dict __data__: attrs dict
"""
def __init__( self, fromDict = None ):
"""c'tor
:param self: self reference
:param dict fromDict: property dict
"""
Record.__init__( self )
self._parent = None
self.__data__["FileID"] = 0
self.__data__["OperationID"] = 0
self.__data__["Status"] = "Waiting"
self.__data__["LFN"] = ''
self.__data__["PFN"] = ''
self.__data__["ChecksumType"] = ''
self.__data__["Checksum"] = ''
self.__data__["GUID"] = ''
self.__data__["Attempt"] = 0
self.__data__["Size"] = 0
self.__data__["Error"] = ''
self._duration = 0
fromDict = fromDict if fromDict else {}
for attrName, attrValue in fromDict.items():
if attrName not in self.__data__:
raise AttributeError( "unknown File attribute %s" % str( attrName ) )
if attrValue:
setattr( self, attrName, attrValue )
@staticmethod
def tableDesc():
""" get table desc """
return { "Fields" :
{ "FileID" : "INTEGER NOT NULL AUTO_INCREMENT",
"OperationID" : "INTEGER NOT NULL",
"Status" : "ENUM('Waiting', 'Done', 'Failed', 'Scheduled') DEFAULT 'Waiting'",
"LFN" : "VARCHAR(255)",
"PFN" : "VARCHAR(255)",
"ChecksumType" : "ENUM('ADLER32', 'MD5', 'SHA1', '') DEFAULT ''",
"Checksum" : "VARCHAR(255)",
"GUID" : "VARCHAR(36)",
"Size" : "BIGINT",
"Attempt": "INTEGER",
"Error" : "VARCHAR(255)" },
"PrimaryKey" : "FileID",
'ForeignKeys': {'OperationID': 'Operation.OperationID' },
"Indexes" : { "LFN" : [ "LFN" ] } }
# # properties
@property
def FileID( self ):
""" FileID getter """
return self.__data__["FileID"]
@FileID.setter
def FileID( self, value ):
""" FileID setter """
self.__data__["FileID"] = int( value ) if value else 0
@property
def OperationID( self ):
""" operation ID (RO) """
self.__data__["OperationID"] = self._parent.OperationID if self._parent else 0
return self.__data__["OperationID"]
@OperationID.setter
def OperationID( self, value ):
""" operation ID (RO) """
self.__data__["OperationID"] = self._parent.OperationID if self._parent else 0
@property
def Attempt( self ):
""" attempt getter """
return self.__data__["Attempt"]
@Attempt.setter
def Attempt( self, value ):
""" attempt setter """
self.__data__["Attempt"] = int( value ) if value else 0
@property
def Size( self ):
""" file size getter """
return self.__data__["Size"]
@Size.setter
def Size( self, value ):
""" file size setter """
self.__data__["Size"] = long( value ) if value else 0
@property
def LFN( self ):
""" LFN prop """
return self.__data__["LFN"]
@LFN.setter
def LFN( self, value ):
""" lfn setter """
if type( value ) != str:
raise TypeError( "LFN has to be a string!" )
if not os.path.isabs( value ):
raise ValueError( "LFN should be an absolute path!" )
self.__data__["LFN"] = value
@property
def PFN( self ):
""" PFN prop """
return self.__data__["PFN"]
@PFN.setter
def PFN( self, value ):
""" PFN setter """
if type( value ) != str:
raise TypeError( "PFN has to be a string!" )
# isURL = urlparse.urlparse( value ).scheme
# isABS = os.path.isabs( value )
# if not isURL and not isABS:
# raise ValueError( "Wrongly formatted PFN!" )
self.__data__["PFN"] = value
@property
def GUID( self ):
""" GUID prop """
return self.__data__["GUID"]
@GUID.setter
def GUID( self, value ):
""" GUID setter """
if value:
if type( value ) not in ( str, unicode ):
raise TypeError( "GUID should be a string!" )
if not checkGuid( value ):
raise ValueError( "'%s' is not a valid GUID!" % str( value ) )
self.__data__["GUID"] = value
@property
def ChecksumType( self ):
""" checksum type prop """
return self.__data__["ChecksumType"]
@ChecksumType.setter
def ChecksumType( self, value ):
""" checksum type setter """
if not value:
self.__data__["ChecksumType"] = ""
elif value and str( value ).strip().upper() not in ( "ADLER32", "MD5", "SHA1" ):
if str( value ).strip().upper() == 'AD':
self.__data__["ChecksumType"] = 'ADLER32'
else:
raise ValueError( "unknown checksum type: %s" % value )
else:
self.__data__["ChecksumType"] = str( value ).strip().upper()
@property
def Checksum( self ):
""" checksum prop """
return self.__data__["Checksum"]
@Checksum.setter
def Checksum( self, value ):
""" checksum setter """
self.__data__["Checksum"] = str( value ) if value else ""
@property
def Error( self ):
""" error prop """
return self.__data__["Error"]
@Error.setter
def Error( self, value ):
""" error setter """
if type( value ) != str:
raise TypeError( "Error has to be a string!" )
self.__data__["Error"] = self._escapeStr( value , 255 )
@property
def Status( self ):
""" status prop """
if not self.__data__["Status"]:
self.__data__["Status"] = "Waiting"
return self.__data__["Status"]
@Status.setter
def Status( self, value ):
""" status setter """
if value not in ( "Waiting", "Failed", "Done", "Scheduled" ):
raise ValueError( "Unknown Status: %s!" % str( value ) )
if value == 'Done':
self.__data__['Error'] = ''
updateTime = ( self.__data__["Status"] != value )
if updateTime and self._parent:
self._parent.LastUpdate = datetime.datetime.utcnow().replace( microsecond = 0 )
self.__data__["Status"] = value
if self._parent:
self._parent._notify()
def __str__( self ):
""" str operator """
return str( self.toJSON()["Value"] )
def toSQL( self ):
""" get SQL INSERT or UPDATE statement """
if not self._parent:
raise AttributeError( "File does not belong to any Operation" )
colVals = [ ( "`%s`" % column, "'%s'" % getattr( self, column )
if type( getattr( self, column ) ) == str
else str( getattr( self, column ) ) if getattr( self, column ) != None else "NULL" )
for column in self.__data__
if ( column == 'Error' or getattr( self, column ) ) and column != "FileID" ]
query = []
if self.FileID:
query.append( "UPDATE `File` SET " )
query.append( ", ".join( [ "%s=%s" % item for item in colVals ] ) )
query.append( " WHERE `FileID`=%d;\n" % self.FileID )
else:
query.append( "INSERT INTO `File` " )
columns = "(%s)" % ",".join( [ column for column, value in colVals ] )
values = "(%s)" % ",".join( [ value for column, value in colVals ] )
query.append( columns )
query.append( " VALUES %s;\n" % values )
return S_OK( "".join( query ) )
def toJSON( self ):
""" get json """
digest = dict( [( key, str( getattr( self, key ) ) if getattr( self, key ) else '' ) for key in self.__data__] )
return S_OK( digest )
|
calancha/DIRAC
|
RequestManagementSystem/Client/File.py
|
Python
|
gpl-3.0
| 8,188
|
[
"DIRAC"
] |
febe71573f9dfdd6210a2eacfb692feab27554f81bc2c226e173f3862e1294eb
|
import cython
cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
Options=object, UtilNodes=object, LetNode=object,
LetRefNode=object, TreeFragment=object, EncodedString=object,
error=object, warning=object, copy=object)
import PyrexTypes
import Naming
import ExprNodes
import Nodes
import Options
import Builtin
from Cython.Compiler.Visitor import VisitorTransform, TreeVisitor
from Cython.Compiler.Visitor import CythonTransform, EnvTransform, ScopeTrackingTransform
from Cython.Compiler.UtilNodes import LetNode, LetRefNode, ResultRefNode
from Cython.Compiler.TreeFragment import TreeFragment
from Cython.Compiler.StringEncoding import EncodedString
from Cython.Compiler.Errors import error, warning, CompileError, InternalError
from Cython.Compiler.Code import UtilityCode
import copy
class NameNodeCollector(TreeVisitor):
"""Collect all NameNodes of a (sub-)tree in the ``name_nodes``
attribute.
"""
def __init__(self):
super(NameNodeCollector, self).__init__()
self.name_nodes = []
def visit_NameNode(self, node):
self.name_nodes.append(node)
def visit_Node(self, node):
self._visitchildren(node, None)
class SkipDeclarations(object):
"""
Variable and function declarations can often have a deep tree structure,
and yet most transformations don't need to descend to this depth.
Declaration nodes are removed after AnalyseDeclarationsTransform, so there
is no need to use this for transformations after that point.
"""
def visit_CTypeDefNode(self, node):
return node
def visit_CVarDefNode(self, node):
return node
def visit_CDeclaratorNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return node
def visit_CEnumDefNode(self, node):
return node
def visit_CStructOrUnionDefNode(self, node):
return node
class NormalizeTree(CythonTransform):
"""
This transform fixes up a few things after parsing
in order to make the parse tree more suitable for
transforms.
a) After parsing, blocks with only one statement will
be represented by that statement, not by a StatListNode.
When doing transforms this is annoying and inconsistent,
as one cannot in general remove a statement in a consistent
way and so on. This transform wraps any single statements
in a StatListNode containing a single statement.
b) The PassStatNode is a noop and serves no purpose beyond
plugging such one-statement blocks; i.e., once parsed a
` "pass" can just as well be represented using an empty
StatListNode. This means less special cases to worry about
in subsequent transforms (one always checks to see if a
StatListNode has no children to see if the block is empty).
"""
def __init__(self, context):
super(NormalizeTree, self).__init__(context)
self.is_in_statlist = False
self.is_in_expr = False
def visit_ExprNode(self, node):
stacktmp = self.is_in_expr
self.is_in_expr = True
self.visitchildren(node)
self.is_in_expr = stacktmp
return node
def visit_StatNode(self, node, is_listcontainer=False):
stacktmp = self.is_in_statlist
self.is_in_statlist = is_listcontainer
self.visitchildren(node)
self.is_in_statlist = stacktmp
if not self.is_in_statlist and not self.is_in_expr:
return Nodes.StatListNode(pos=node.pos, stats=[node])
else:
return node
def visit_StatListNode(self, node):
self.is_in_statlist = True
self.visitchildren(node)
self.is_in_statlist = False
return node
def visit_ParallelAssignmentNode(self, node):
return self.visit_StatNode(node, True)
def visit_CEnumDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_CStructOrUnionDefNode(self, node):
return self.visit_StatNode(node, True)
def visit_PassStatNode(self, node):
"""Eliminate PassStatNode"""
if not self.is_in_statlist:
return Nodes.StatListNode(pos=node.pos, stats=[])
else:
return []
def visit_ExprStatNode(self, node):
"""Eliminate useless string literals"""
if node.expr.is_string_literal:
return self.visit_PassStatNode(node)
else:
return self.visit_StatNode(node)
def visit_CDeclaratorNode(self, node):
return node
class PostParseError(CompileError): pass
# error strings checked by unit tests, so define them
ERR_CDEF_INCLASS = 'Cannot assign default value to fields in cdef classes, structs or unions'
ERR_BUF_DEFAULTS = 'Invalid buffer defaults specification (see docs)'
ERR_INVALID_SPECIALATTR_TYPE = 'Special attributes must not have a type declared'
class PostParse(ScopeTrackingTransform):
"""
Basic interpretation of the parse tree, as well as validity
checking that can be done on a very basic level on the parse
tree (while still not being a problem with the basic syntax,
as such).
Specifically:
- Default values to cdef assignments are turned into single
assignments following the declaration (everywhere but in class
bodies, where they raise a compile error)
- Interpret some node structures into Python runtime values.
Some nodes take compile-time arguments (currently:
TemplatedTypeNode[args] and __cythonbufferdefaults__ = {args}),
which should be interpreted. This happens in a general way
and other steps should be taken to ensure validity.
Type arguments cannot be interpreted in this way.
- For __cythonbufferdefaults__ the arguments are checked for
validity.
TemplatedTypeNode has its directives interpreted:
Any first positional argument goes into the "dtype" attribute,
any "ndim" keyword argument goes into the "ndim" attribute and
so on. Also it is checked that the directive combination is valid.
- __cythonbufferdefaults__ attributes are parsed and put into the
type information.
Note: Currently Parsing.py does a lot of interpretation and
reorganization that can be refactored into this transform
if a more pure Abstract Syntax Tree is wanted.
"""
def __init__(self, context):
super(PostParse, self).__init__(context)
self.specialattribute_handlers = {
'__cythonbufferdefaults__' : self.handle_bufferdefaults
}
def visit_ModuleNode(self, node):
self.lambda_counter = 1
self.genexpr_counter = 1
return super(PostParse, self).visit_ModuleNode(node)
def visit_LambdaNode(self, node):
# unpack a lambda expression into the corresponding DefNode
lambda_id = self.lambda_counter
self.lambda_counter += 1
node.lambda_name = EncodedString(u'lambda%d' % lambda_id)
collector = YieldNodeCollector()
collector.visitchildren(node.result_expr)
if collector.yields or isinstance(node.result_expr, ExprNodes.YieldExprNode):
body = Nodes.ExprStatNode(
node.result_expr.pos, expr=node.result_expr)
else:
body = Nodes.ReturnStatNode(
node.result_expr.pos, value=node.result_expr)
node.def_node = Nodes.DefNode(
node.pos, name=node.name, lambda_name=node.lambda_name,
args=node.args, star_arg=node.star_arg,
starstar_arg=node.starstar_arg,
body=body, doc=None)
self.visitchildren(node)
return node
def visit_GeneratorExpressionNode(self, node):
# unpack a generator expression into the corresponding DefNode
genexpr_id = self.genexpr_counter
self.genexpr_counter += 1
node.genexpr_name = EncodedString(u'genexpr%d' % genexpr_id)
node.def_node = Nodes.DefNode(node.pos, name=node.name,
doc=None,
args=[], star_arg=None,
starstar_arg=None,
body=node.loop)
self.visitchildren(node)
return node
# cdef variables
def handle_bufferdefaults(self, decl):
if not isinstance(decl.default, ExprNodes.DictNode):
raise PostParseError(decl.pos, ERR_BUF_DEFAULTS)
self.scope_node.buffer_defaults_node = decl.default
self.scope_node.buffer_defaults_pos = decl.pos
def visit_CVarDefNode(self, node):
# This assumes only plain names and pointers are assignable on
# declaration. Also, it makes use of the fact that a cdef decl
# must appear before the first use, so we don't have to deal with
# "i = 3; cdef int i = i" and can simply move the nodes around.
try:
self.visitchildren(node)
stats = [node]
newdecls = []
for decl in node.declarators:
declbase = decl
while isinstance(declbase, Nodes.CPtrDeclaratorNode):
declbase = declbase.base
if isinstance(declbase, Nodes.CNameDeclaratorNode):
if declbase.default is not None:
if self.scope_type in ('cclass', 'pyclass', 'struct'):
if isinstance(self.scope_node, Nodes.CClassDefNode):
handler = self.specialattribute_handlers.get(decl.name)
if handler:
if decl is not declbase:
raise PostParseError(decl.pos, ERR_INVALID_SPECIALATTR_TYPE)
handler(decl)
continue # Remove declaration
raise PostParseError(decl.pos, ERR_CDEF_INCLASS)
first_assignment = self.scope_type != 'module'
stats.append(Nodes.SingleAssignmentNode(node.pos,
lhs=ExprNodes.NameNode(node.pos, name=declbase.name),
rhs=declbase.default, first=first_assignment))
declbase.default = None
newdecls.append(decl)
node.declarators = newdecls
return stats
except PostParseError, e:
# An error in a cdef clause is ok, simply remove the declaration
# and try to move on to report more errors
self.context.nonfatal_error(e)
return None
# Split parallel assignments (a,b = b,a) into separate partial
# assignments that are executed rhs-first using temps. This
# restructuring must be applied before type analysis so that known
# types on rhs and lhs can be matched directly. It is required in
# the case that the types cannot be coerced to a Python type in
# order to assign from a tuple.
def visit_SingleAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, [node.lhs, node.rhs])
def visit_CascadedAssignmentNode(self, node):
self.visitchildren(node)
return self._visit_assignment_node(node, node.lhs_list + [node.rhs])
def _visit_assignment_node(self, node, expr_list):
"""Flatten parallel assignments into separate single
assignments or cascaded assignments.
"""
if sum([ 1 for expr in expr_list
if expr.is_sequence_constructor or expr.is_string_literal ]) < 2:
# no parallel assignments => nothing to do
return node
expr_list_list = []
flatten_parallel_assignments(expr_list, expr_list_list)
temp_refs = []
eliminate_rhs_duplicates(expr_list_list, temp_refs)
nodes = []
for expr_list in expr_list_list:
lhs_list = expr_list[:-1]
rhs = expr_list[-1]
if len(lhs_list) == 1:
node = Nodes.SingleAssignmentNode(rhs.pos,
lhs = lhs_list[0], rhs = rhs)
else:
node = Nodes.CascadedAssignmentNode(rhs.pos,
lhs_list = lhs_list, rhs = rhs)
nodes.append(node)
if len(nodes) == 1:
assign_node = nodes[0]
else:
assign_node = Nodes.ParallelAssignmentNode(nodes[0].pos, stats = nodes)
if temp_refs:
duplicates_and_temps = [ (temp.expression, temp)
for temp in temp_refs ]
sort_common_subsequences(duplicates_and_temps)
for _, temp_ref in duplicates_and_temps[::-1]:
assign_node = LetNode(temp_ref, assign_node)
return assign_node
def _flatten_sequence(self, seq, result):
for arg in seq.args:
if arg.is_sequence_constructor:
self._flatten_sequence(arg, result)
else:
result.append(arg)
return result
def visit_DelStatNode(self, node):
self.visitchildren(node)
node.args = self._flatten_sequence(node, [])
return node
def visit_ExceptClauseNode(self, node):
if node.is_except_as:
# except-as must delete NameNode target at the end
del_target = Nodes.DelStatNode(
node.pos,
args=[ExprNodes.NameNode(
node.target.pos, name=node.target.name)],
ignore_nonexisting=True)
node.body = Nodes.StatListNode(
node.pos,
stats=[Nodes.TryFinallyStatNode(
node.pos,
body=node.body,
finally_clause=Nodes.StatListNode(
node.pos,
stats=[del_target]))])
self.visitchildren(node)
return node
def eliminate_rhs_duplicates(expr_list_list, ref_node_sequence):
"""Replace rhs items by LetRefNodes if they appear more than once.
Creates a sequence of LetRefNodes that set up the required temps
and appends them to ref_node_sequence. The input list is modified
in-place.
"""
seen_nodes = set()
ref_nodes = {}
def find_duplicates(node):
if node.is_literal or node.is_name:
# no need to replace those; can't include attributes here
# as their access is not necessarily side-effect free
return
if node in seen_nodes:
if node not in ref_nodes:
ref_node = LetRefNode(node)
ref_nodes[node] = ref_node
ref_node_sequence.append(ref_node)
else:
seen_nodes.add(node)
if node.is_sequence_constructor:
for item in node.args:
find_duplicates(item)
for expr_list in expr_list_list:
rhs = expr_list[-1]
find_duplicates(rhs)
if not ref_nodes:
return
def substitute_nodes(node):
if node in ref_nodes:
return ref_nodes[node]
elif node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
return node
# replace nodes inside of the common subexpressions
for node in ref_nodes:
if node.is_sequence_constructor:
node.args = list(map(substitute_nodes, node.args))
# replace common subexpressions on all rhs items
for expr_list in expr_list_list:
expr_list[-1] = substitute_nodes(expr_list[-1])
def sort_common_subsequences(items):
"""Sort items/subsequences so that all items and subsequences that
an item contains appear before the item itself. This is needed
because each rhs item must only be evaluated once, so its value
must be evaluated first and then reused when packing sequences
that contain it.
This implies a partial order, and the sort must be stable to
preserve the original order as much as possible, so we use a
simple insertion sort (which is very fast for short sequences, the
normal case in practice).
"""
def contains(seq, x):
for item in seq:
if item is x:
return True
elif item.is_sequence_constructor and contains(item.args, x):
return True
return False
def lower_than(a,b):
return b.is_sequence_constructor and contains(b.args, a)
for pos, item in enumerate(items):
key = item[1] # the ResultRefNode which has already been injected into the sequences
new_pos = pos
for i in xrange(pos-1, -1, -1):
if lower_than(key, items[i][0]):
new_pos = i
if new_pos != pos:
for i in xrange(pos, new_pos, -1):
items[i] = items[i-1]
items[new_pos] = item
def unpack_string_to_character_literals(literal):
chars = []
pos = literal.pos
stype = literal.__class__
sval = literal.value
sval_type = sval.__class__
for char in sval:
cval = sval_type(char)
chars.append(stype(pos, value=cval, constant_result=cval))
return chars
def flatten_parallel_assignments(input, output):
# The input is a list of expression nodes, representing the LHSs
# and RHS of one (possibly cascaded) assignment statement. For
# sequence constructors, rearranges the matching parts of both
# sides into a list of equivalent assignments between the
# individual elements. This transformation is applied
# recursively, so that nested structures get matched as well.
rhs = input[-1]
if (not (rhs.is_sequence_constructor or isinstance(rhs, ExprNodes.UnicodeNode))
or not sum([lhs.is_sequence_constructor for lhs in input[:-1]])):
output.append(input)
return
complete_assignments = []
if rhs.is_sequence_constructor:
rhs_args = rhs.args
elif rhs.is_string_literal:
rhs_args = unpack_string_to_character_literals(rhs)
rhs_size = len(rhs_args)
lhs_targets = [ [] for _ in xrange(rhs_size) ]
starred_assignments = []
for lhs in input[:-1]:
if not lhs.is_sequence_constructor:
if lhs.is_starred:
error(lhs.pos, "starred assignment target must be in a list or tuple")
complete_assignments.append(lhs)
continue
lhs_size = len(lhs.args)
starred_targets = sum([1 for expr in lhs.args if expr.is_starred])
if starred_targets > 1:
error(lhs.pos, "more than 1 starred expression in assignment")
output.append([lhs,rhs])
continue
elif lhs_size - starred_targets > rhs_size:
error(lhs.pos, "need more than %d value%s to unpack"
% (rhs_size, (rhs_size != 1) and 's' or ''))
output.append([lhs,rhs])
continue
elif starred_targets:
map_starred_assignment(lhs_targets, starred_assignments,
lhs.args, rhs_args)
elif lhs_size < rhs_size:
error(lhs.pos, "too many values to unpack (expected %d, got %d)"
% (lhs_size, rhs_size))
output.append([lhs,rhs])
continue
else:
for targets, expr in zip(lhs_targets, lhs.args):
targets.append(expr)
if complete_assignments:
complete_assignments.append(rhs)
output.append(complete_assignments)
# recursively flatten partial assignments
for cascade, rhs in zip(lhs_targets, rhs_args):
if cascade:
cascade.append(rhs)
flatten_parallel_assignments(cascade, output)
# recursively flatten starred assignments
for cascade in starred_assignments:
if cascade[0].is_sequence_constructor:
flatten_parallel_assignments(cascade, output)
else:
output.append(cascade)
def map_starred_assignment(lhs_targets, starred_assignments, lhs_args, rhs_args):
# Appends the fixed-position LHS targets to the target list that
# appear left and right of the starred argument.
#
# The starred_assignments list receives a new tuple
# (lhs_target, rhs_values_list) that maps the remaining arguments
# (those that match the starred target) to a list.
# left side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets, lhs_args)):
if expr.is_starred:
starred = i
lhs_remaining = len(lhs_args) - i - 1
break
targets.append(expr)
else:
raise InternalError("no starred arg found when splitting starred assignment")
# right side of the starred target
for i, (targets, expr) in enumerate(zip(lhs_targets[-lhs_remaining:],
lhs_args[starred + 1:])):
targets.append(expr)
# the starred target itself, must be assigned a (potentially empty) list
target = lhs_args[starred].target # unpack starred node
starred_rhs = rhs_args[starred:]
if lhs_remaining:
starred_rhs = starred_rhs[:-lhs_remaining]
if starred_rhs:
pos = starred_rhs[0].pos
else:
pos = target.pos
starred_assignments.append([
target, ExprNodes.ListNode(pos=pos, args=starred_rhs)])
class PxdPostParse(CythonTransform, SkipDeclarations):
"""
Basic interpretation/validity checking that should only be
done on pxd trees.
A lot of this checking currently happens in the parser; but
what is listed below happens here.
- "def" functions are let through only if they fill the
getbuffer/releasebuffer slots
- cdef functions are let through only if they are on the
top level and are declared "inline"
"""
ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'"
ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'"
def __call__(self, node):
self.scope_type = 'pxd'
return super(PxdPostParse, self).__call__(node)
def visit_CClassDefNode(self, node):
old = self.scope_type
self.scope_type = 'cclass'
self.visitchildren(node)
self.scope_type = old
return node
def visit_FuncDefNode(self, node):
# FuncDefNode always come with an implementation (without
# an imp they are CVarDefNodes..)
err = self.ERR_INLINE_ONLY
if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass'
and node.name in ('__getbuffer__', '__releasebuffer__')):
err = None # allow these slots
if isinstance(node, Nodes.CFuncDefNode):
if (u'inline' in node.modifiers and
self.scope_type in ('pxd', 'cclass')):
node.inline_in_pxd = True
if node.visibility != 'private':
err = self.ERR_NOGO_WITH_INLINE % node.visibility
elif node.api:
err = self.ERR_NOGO_WITH_INLINE % 'api'
else:
err = None # allow inline function
else:
err = self.ERR_INLINE_ONLY
if err:
self.context.nonfatal_error(PostParseError(node.pos, err))
return None
else:
return node
class InterpretCompilerDirectives(CythonTransform, SkipDeclarations):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
- Command-line arguments overriding these
- @cython.directivename decorators
- with cython.directivename: statements
This transform is responsible for interpreting these various sources
and store the directive in two ways:
- Set the directives attribute of the ModuleNode for global directives.
- Use a CompilerDirectivesNode to override directives for a subtree.
(The first one is primarily to not have to modify with the tree
structure, so that ModuleNode stay on top.)
The directives are stored in dictionaries from name to value in effect.
Each such dictionary is always filled in for all possible directives,
using default values where no value is given by the user.
The available directives are controlled in Options.py.
Note that we have to run this prior to analysis, and so some minor
duplication of functionality has to occur: We manually track cimports
and which names the "cython" module may have been imported to.
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
# For backwards compatability.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
special_methods = set(['declare', 'union', 'struct', 'typedef',
'sizeof', 'cast', 'pointer', 'compiled',
'NULL', 'fused_type', 'parallel'])
special_methods.update(unop_method_nodes.keys())
valid_parallel_directives = set([
"parallel",
"prange",
"threadid",
# "threadsavailable",
])
def __init__(self, context, compilation_directive_defaults):
super(InterpretCompilerDirectives, self).__init__(context)
self.compilation_directive_defaults = {}
for key, value in compilation_directive_defaults.items():
self.compilation_directive_defaults[unicode(key)] = copy.deepcopy(value)
self.cython_module_names = set()
self.directive_names = {}
self.parallel_directives = {}
def check_directive_scope(self, pos, directive, scope):
legal_scopes = Options.directive_scopes.get(directive, None)
if legal_scopes and scope not in legal_scopes:
self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
'is not allowed in %s scope' % (directive, scope)))
return False
else:
if (directive not in Options.directive_defaults
and directive not in Options.directive_types):
error(pos, "Invalid directive: '%s'." % (directive,))
return True
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key, value in node.directive_comments.items():
if not self.check_directive_scope(node.pos, key, 'module'):
self.wrong_scope_error(node.pos, key, 'module')
del node.directive_comments[key]
self.module_scope = node.scope
directives = copy.deepcopy(Options.directive_defaults)
directives.update(copy.deepcopy(self.compilation_directive_defaults))
directives.update(node.directive_comments)
self.directives = directives
node.directives = directives
node.parallel_directives = self.parallel_directives
self.visitchildren(node)
node.cython_module_names = self.cython_module_names
return node
# The following four functions track imports and cimports that
# begin with "cython"
def is_cython_directive(self, name):
return (name in Options.directive_types or
name in self.special_methods or
PyrexTypes.parse_basic_type(name))
def is_parallel_directive(self, full_name, pos):
"""
Checks to see if fullname (e.g. cython.parallel.prange) is a valid
parallel directive. If it is a star import it also updates the
parallel_directives.
"""
result = (full_name + ".").startswith("cython.parallel.")
if result:
directive = full_name.split('.')
if full_name == u"cython.parallel":
self.parallel_directives[u"parallel"] = u"cython.parallel"
elif full_name == u"cython.parallel.*":
for name in self.valid_parallel_directives:
self.parallel_directives[name] = u"cython.parallel.%s" % name
elif (len(directive) != 3 or
directive[-1] not in self.valid_parallel_directives):
error(pos, "No such directive: %s" % full_name)
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
return result
def visit_CImportStatNode(self, node):
if node.module_name == u"cython":
self.cython_module_names.add(node.as_name or u"cython")
elif node.module_name.startswith(u"cython."):
if node.module_name.startswith(u"cython.parallel."):
error(node.pos, node.module_name + " is not a module")
if node.module_name == u"cython.parallel":
if node.as_name and node.as_name != u"cython":
self.parallel_directives[node.as_name] = node.module_name
else:
self.cython_module_names.add(u"cython")
self.parallel_directives[
u"cython.parallel"] = node.module_name
self.module_scope.use_utility_code(
UtilityCode.load_cached("InitThreads", "ModuleSetupCode.c"))
elif node.as_name:
self.directive_names[node.as_name] = node.module_name[7:]
else:
self.cython_module_names.add(u"cython")
# if this cimport was a compiler directive, we don't
# want to leave the cimport node sitting in the tree
return None
return node
def visit_FromCImportStatNode(self, node):
if (node.module_name == u"cython") or \
node.module_name.startswith(u"cython."):
submodule = (node.module_name + u".")[7:]
newimp = []
for pos, name, as_name, kind in node.imported_names:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
# from cython cimport parallel, or
# from cython.parallel cimport parallel, prange, ...
self.parallel_directives[as_name or name] = qualified_name
elif self.is_cython_directive(full_name):
if as_name is None:
as_name = full_name
self.directive_names[as_name] = full_name
if kind is not None:
self.context.nonfatal_error(PostParseError(pos,
"Compiler directive imports must be plain imports"))
else:
newimp.append((pos, name, as_name, kind))
if not newimp:
return None
node.imported_names = newimp
return node
def visit_FromImportStatNode(self, node):
if (node.module.module_name.value == u"cython") or \
node.module.module_name.value.startswith(u"cython."):
submodule = (node.module.module_name.value + u".")[7:]
newimp = []
for name, name_node in node.items:
full_name = submodule + name
qualified_name = u"cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
self.parallel_directives[name_node.name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[name_node.name] = full_name
else:
newimp.append((name, name_node))
if not newimp:
return None
node.items = newimp
return node
def visit_SingleAssignmentNode(self, node):
if isinstance(node.rhs, ExprNodes.ImportNode):
module_name = node.rhs.module_name.value
is_parallel = (module_name + u".").startswith(u"cython.parallel.")
if module_name != u"cython" and not is_parallel:
return node
module_name = node.rhs.module_name.value
as_name = node.lhs.name
node = Nodes.CImportStatNode(node.pos,
module_name = module_name,
as_name = as_name)
node = self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
def visit_NameNode(self, node):
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
node.cython_attribute = self.directive_names.get(node.name)
return node
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
# Otherwise, returns None
if isinstance(node, ExprNodes.CallNode):
self.visit(node.function)
optname = node.function.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype:
args, kwds = node.explicit_args_kwds()
directives = []
key_value_pairs = []
if kwds is not None and directivetype is not dict:
for keyvalue in kwds.key_value_pairs:
key, value = keyvalue
sub_optname = "%s.%s" % (optname, key.value)
if Options.directive_types.get(sub_optname):
directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
else:
key_value_pairs.append(keyvalue)
if not key_value_pairs:
kwds = None
else:
kwds.key_value_pairs = key_value_pairs
if directives and not kwds and not args:
return directives
directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
return directives
elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
self.visit(node)
optname = node.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
return [(optname, True)]
elif directivetype is None:
return [(optname, None)]
else:
raise PostParseError(
node.pos, "The '%s' directive should be used as a function call." % optname)
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.directive_defaults[optname]
elif directivetype is bool:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(pos,
'The %s directive takes one compile-time boolean argument' % optname)
return (optname, args[0].value)
elif directivetype is int:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
raise PostParseError(pos,
'The %s directive takes one compile-time integer argument' % optname)
return (optname, int(args[0].value))
elif directivetype is str:
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, str(args[0].value))
elif directivetype is type:
if kwds is not None or len(args) != 1:
raise PostParseError(pos,
'The %s directive takes one type argument' % optname)
return (optname, args[0])
elif directivetype is dict:
if len(args) != 0:
raise PostParseError(pos,
'The %s directive takes no prepositional arguments' % optname)
return optname, dict([(key.value, value) for key, value in kwds.key_value_pairs])
elif directivetype is list:
if kwds and len(kwds) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
elif callable(directivetype):
if kwds is not None or len(args) != 1 or not isinstance(
args[0], (ExprNodes.StringNode, ExprNodes.UnicodeNode)):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, directivetype(optname, str(args[0].value)))
else:
assert False
def visit_with_directives(self, body, directives):
olddirectives = self.directives
newdirectives = copy.copy(olddirectives)
newdirectives.update(directives)
self.directives = newdirectives
assert isinstance(body, Nodes.StatListNode), body
retbody = self.visit_Node(body)
directive = Nodes.CompilerDirectivesNode(pos=retbody.pos, body=retbody,
directives=newdirectives)
self.directives = olddirectives
return directive
# Handle decorators
def visit_FuncDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CVarDefNode(self, node):
directives = self._extract_directives(node, 'function')
if not directives:
return node
for name, value in directives.iteritems():
if name == 'locals':
node.directive_locals = value
elif name != 'final':
self.context.nonfatal_error(PostParseError(
node.pos,
"Cdef functions can only take cython.locals() "
"or final decorators, got %s." % name))
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_CClassDefNode(self, node):
directives = self._extract_directives(node, 'cclass')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def visit_PyClassDefNode(self, node):
directives = self._extract_directives(node, 'class')
if not directives:
return self.visit_Node(node)
body = Nodes.StatListNode(node.pos, stats=[node])
return self.visit_with_directives(body, directives)
def _extract_directives(self, node, scope_name):
if not node.decorators:
return {}
# Split the decorators into two lists -- real decorators and directives
directives = []
realdecs = []
for dec in node.decorators:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
if self.check_directive_scope(node.pos, directive[0], scope_name):
directives.append(directive)
else:
realdecs.append(dec)
if realdecs and isinstance(node, (Nodes.CFuncDefNode, Nodes.CClassDefNode, Nodes.CVarDefNode)):
raise PostParseError(realdecs[0].pos, "Cdef functions/classes cannot take arbitrary decorators.")
else:
node.decorators = realdecs
# merge or override repeated directives
optdict = {}
directives.reverse() # Decorators coming first take precedence
for directive in directives:
name, value = directive
if name in optdict:
old_value = optdict[name]
# keywords and arg lists can be merged, everything
# else overrides completely
if isinstance(old_value, dict):
old_value.update(value)
elif isinstance(old_value, list):
old_value.extend(value)
else:
optdict[name] = value
else:
optdict[name] = value
return optdict
# Handle with statements
def visit_WithStatNode(self, node):
directive_dict = {}
for directive in self.try_to_parse_directives(node.manager) or []:
if directive is not None:
if node.target is not None:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
else:
name, value = directive
if name in ('nogil', 'gil'):
# special case: in pure mode, "with nogil" spells "with cython.nogil"
node = Nodes.GILStatNode(node.pos, state = name, body = node.body)
return self.visit_Node(node)
if self.check_directive_scope(node.pos, name, 'with statement'):
directive_dict[name] = value
if directive_dict:
return self.visit_with_directives(node.body, directive_dict)
return self.visit_Node(node)
class ParallelRangeTransform(CythonTransform, SkipDeclarations):
"""
Transform cython.parallel stuff. The parallel_directives come from the
module node, set there by InterpretCompilerDirectives.
x = cython.parallel.threadavailable() -> ParallelThreadAvailableNode
with nogil, cython.parallel.parallel(): -> ParallelWithBlockNode
print cython.parallel.threadid() -> ParallelThreadIdNode
for i in cython.parallel.prange(...): -> ParallelRangeNode
...
"""
# a list of names, maps 'cython.parallel.prange' in the code to
# ['cython', 'parallel', 'prange']
parallel_directive = None
# Indicates whether a namenode in an expression is the cython module
namenode_is_cython_module = False
# Keep track of whether we are the context manager of a 'with' statement
in_context_manager_section = False
# One of 'prange' or 'with parallel'. This is used to disallow closely
# nested 'with parallel:' blocks
state = None
directive_to_node = {
u"cython.parallel.parallel": Nodes.ParallelWithBlockNode,
# u"cython.parallel.threadsavailable": ExprNodes.ParallelThreadsAvailableNode,
u"cython.parallel.threadid": ExprNodes.ParallelThreadIdNode,
u"cython.parallel.prange": Nodes.ParallelRangeNode,
}
def node_is_parallel_directive(self, node):
return node.name in self.parallel_directives or node.is_cython_module
def get_directive_class_node(self, node):
"""
Figure out which parallel directive was used and return the associated
Node class.
E.g. for a cython.parallel.prange() call we return ParallelRangeNode
"""
if self.namenode_is_cython_module:
directive = '.'.join(self.parallel_directive)
else:
directive = self.parallel_directives[self.parallel_directive[0]]
directive = '%s.%s' % (directive,
'.'.join(self.parallel_directive[1:]))
directive = directive.rstrip('.')
cls = self.directive_to_node.get(directive)
if cls is None and not (self.namenode_is_cython_module and
self.parallel_directive[0] != 'parallel'):
error(node.pos, "Invalid directive: %s" % directive)
self.namenode_is_cython_module = False
self.parallel_directive = None
return cls
def visit_ModuleNode(self, node):
"""
If any parallel directives were imported, copy them over and visit
the AST
"""
if node.parallel_directives:
self.parallel_directives = node.parallel_directives
return self.visit_Node(node)
# No parallel directives were imported, so they can't be used :)
return node
def visit_NameNode(self, node):
if self.node_is_parallel_directive(node):
self.parallel_directive = [node.name]
self.namenode_is_cython_module = node.is_cython_module
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if self.parallel_directive:
self.parallel_directive.append(node.attribute)
return node
def visit_CallNode(self, node):
self.visit(node.function)
if not self.parallel_directive:
return node
# We are a parallel directive, replace this node with the
# corresponding ParallelSomethingSomething node
if isinstance(node, ExprNodes.GeneralCallNode):
args = node.positional_args.args
kwargs = node.keyword_args
else:
args = node.args
kwargs = {}
parallel_directive_class = self.get_directive_class_node(node)
if parallel_directive_class:
# Note: in case of a parallel() the body is set by
# visit_WithStatNode
node = parallel_directive_class(node.pos, args=args, kwargs=kwargs)
return node
def visit_WithStatNode(self, node):
"Rewrite with cython.parallel.parallel() blocks"
newnode = self.visit(node.manager)
if isinstance(newnode, Nodes.ParallelWithBlockNode):
if self.state == 'parallel with':
error(node.manager.pos,
"Nested parallel with blocks are disallowed")
self.state = 'parallel with'
body = self.visit(node.body)
self.state = None
newnode.body = body
return newnode
elif self.parallel_directive:
parallel_directive_class = self.get_directive_class_node(node)
if not parallel_directive_class:
# There was an error, stop here and now
return None
if parallel_directive_class is Nodes.ParallelWithBlockNode:
error(node.pos, "The parallel directive must be called")
return None
node.body = self.visit(node.body)
return node
def visit_ForInStatNode(self, node):
"Rewrite 'for i in cython.parallel.prange(...):'"
self.visit(node.iterator)
self.visit(node.target)
in_prange = isinstance(node.iterator.sequence,
Nodes.ParallelRangeNode)
previous_state = self.state
if in_prange:
# This will replace the entire ForInStatNode, so copy the
# attributes
parallel_range_node = node.iterator.sequence
parallel_range_node.target = node.target
parallel_range_node.body = node.body
parallel_range_node.else_clause = node.else_clause
node = parallel_range_node
if not isinstance(node.target, ExprNodes.NameNode):
error(node.target.pos,
"Can only iterate over an iteration variable")
self.state = 'prange'
self.visit(node.body)
self.state = previous_state
self.visit(node.else_clause)
return node
def visit(self, node):
"Visit a node that may be None"
if node is not None:
return super(ParallelRangeTransform, self).visit(node)
class WithTransform(CythonTransform, SkipDeclarations):
def visit_WithStatNode(self, node):
self.visitchildren(node, 'body')
pos = node.pos
body, target, manager = node.body, node.target, node.manager
node.enter_call = ExprNodes.SimpleCallNode(
pos, function=ExprNodes.AttributeNode(
pos, obj=ExprNodes.CloneNode(manager),
attribute=EncodedString('__enter__'),
is_special_lookup=True),
args=[],
is_temp=True)
if target is not None:
body = Nodes.StatListNode(
pos, stats = [
Nodes.WithTargetAssignmentStatNode(
pos, lhs = target,
rhs = ResultRefNode(node.enter_call),
orig_rhs = node.enter_call),
body])
excinfo_target = ExprNodes.TupleNode(pos, slow=True, args=[
ExprNodes.ExcValueNode(pos) for _ in range(3)])
except_clause = Nodes.ExceptClauseNode(
pos, body=Nodes.IfStatNode(
pos, if_clauses=[
Nodes.IfClauseNode(
pos, condition=ExprNodes.NotNode(
pos, operand=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=False,
args=excinfo_target)),
body=Nodes.ReraiseStatNode(pos),
),
],
else_clause=None),
pattern=None,
target=None,
excinfo_target=excinfo_target,
)
node.body = Nodes.TryFinallyStatNode(
pos, body=Nodes.TryExceptStatNode(
pos, body=body,
except_clauses=[except_clause],
else_clause=None,
),
finally_clause=Nodes.ExprStatNode(
pos, expr=ExprNodes.WithExitCallNode(
pos, with_stat=node,
test_if_run=True,
args=ExprNodes.TupleNode(
pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]
))),
handle_error_case=False,
)
return node
def visit_ExprNode(self, node):
# With statements are never inside expressions.
return node
class DecoratorTransform(ScopeTrackingTransform, SkipDeclarations):
"""Originally, this was the only place where decorators were
transformed into the corresponding calling code. Now, this is
done directly in DefNode and PyClassDefNode to avoid reassignments
to the function/class name - except for cdef class methods. For
those, the reassignment is required as methods are originally
defined in the PyMethodDef struct.
The IndirectionNode allows DefNode to override the decorator
"""
def visit_DefNode(self, func_node):
scope_type = self.scope_type
func_node = self.visit_FuncDefNode(func_node)
if scope_type != 'cclass' or not func_node.decorators:
return func_node
return self.handle_decorators(func_node, func_node.decorators,
func_node.name)
def handle_decorators(self, node, decorators, name):
decorator_result = ExprNodes.NameNode(node.pos, name = name)
for decorator in decorators[::-1]:
decorator_result = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [decorator_result])
name_node = ExprNodes.NameNode(node.pos, name = name)
reassignment = Nodes.SingleAssignmentNode(
node.pos,
lhs = name_node,
rhs = decorator_result)
reassignment = Nodes.IndirectionNode([reassignment])
node.decorator_indirection = reassignment
return [node, reassignment]
class CnameDirectivesTransform(CythonTransform, SkipDeclarations):
"""
Only part of the CythonUtilityCode pipeline. Must be run before
DecoratorTransform in case this is a decorator for a cdef class.
It filters out @cname('my_cname') decorators and rewrites them to
CnameDecoratorNodes.
"""
def handle_function(self, node):
if not getattr(node, 'decorators', None):
return self.visit_Node(node)
for i, decorator in enumerate(node.decorators):
decorator = decorator.decorator
if (isinstance(decorator, ExprNodes.CallNode) and
decorator.function.is_name and
decorator.function.name == 'cname'):
args, kwargs = decorator.explicit_args_kwds()
if kwargs:
raise AssertionError(
"cname decorator does not take keyword arguments")
if len(args) != 1:
raise AssertionError(
"cname decorator takes exactly one argument")
if not (args[0].is_literal and
args[0].type == Builtin.str_type):
raise AssertionError(
"argument to cname decorator must be a string literal")
cname = args[0].compile_time_value(None).decode('UTF-8')
del node.decorators[i]
node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
cname=cname)
break
return self.visit_Node(node)
visit_FuncDefNode = handle_function
visit_CClassDefNode = handle_function
visit_CEnumDefNode = handle_function
visit_CStructOrUnionDefNode = handle_function
class ForwardDeclareTypes(CythonTransform):
def visit_CompilerDirectivesNode(self, node):
env = self.module_scope
old = env.directives
env.directives = node.directives
self.visitchildren(node)
env.directives = old
return node
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.module_scope.directives = node.directives
self.visitchildren(node)
return node
def visit_CDefExternNode(self, node):
old_cinclude_flag = self.module_scope.in_cinclude
self.module_scope.in_cinclude = 1
self.visitchildren(node)
self.module_scope.in_cinclude = old_cinclude_flag
return node
def visit_CEnumDefNode(self, node):
node.declare(self.module_scope)
return node
def visit_CStructOrUnionDefNode(self, node):
if node.name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
def visit_CClassDefNode(self, node):
if node.class_name not in self.module_scope.entries:
node.declare(self.module_scope)
return node
class AnalyseDeclarationsTransform(EnvTransform):
basic_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
""", level='c_class')
basic_pyobject_property = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
def __set__(self, value):
ATTR = value
def __del__(self):
ATTR = None
""", level='c_class')
basic_property_ro = TreeFragment(u"""
property NAME:
def __get__(self):
return ATTR
""", level='c_class')
struct_or_union_wrapper = TreeFragment(u"""
cdef class NAME:
cdef TYPE value
def __init__(self, MEMBER=None):
cdef int count
count = 0
INIT_ASSIGNMENTS
if IS_UNION and count > 1:
raise ValueError, "At most one union member should be specified."
def __str__(self):
return STR_FORMAT % MEMBER_TUPLE
def __repr__(self):
return REPR_FORMAT % MEMBER_TUPLE
""")
init_assignment = TreeFragment(u"""
if VALUE is not None:
ATTR = VALUE
count += 1
""")
fused_function = None
in_lambda = 0
def __call__(self, root):
# needed to determine if a cdef var is declared after it's used.
self.seen_vars_stack = []
self.fused_error_funcs = set()
super_class = super(AnalyseDeclarationsTransform, self)
self._super_visit_FuncDefNode = super_class.visit_FuncDefNode
return super_class.__call__(root)
def visit_NameNode(self, node):
self.seen_vars_stack[-1].add(node.name)
return node
def visit_ModuleNode(self, node):
self.seen_vars_stack.append(set())
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.seen_vars_stack.pop()
return node
def visit_LambdaNode(self, node):
self.in_lambda += 1
node.analyse_declarations(self.current_env())
self.visitchildren(node)
self.in_lambda -= 1
return node
def visit_CClassDefNode(self, node):
node = self.visit_ClassDefNode(node)
if node.scope and node.scope.implemented:
stats = []
for entry in node.scope.var_entries:
if entry.needs_property:
property = self.create_Property(entry)
property.analyse_declarations(node.scope)
self.visit(property)
stats.append(property)
if stats:
node.body.stats += stats
return node
def _handle_fused_def_decorators(self, old_decorators, env, node):
"""
Create function calls to the decorators and reassignments to
the function.
"""
# Delete staticmethod and classmethod decorators, this is
# handled directly by the fused function object.
decorators = []
for decorator in old_decorators:
func = decorator.decorator
if (not func.is_name or
func.name not in ('staticmethod', 'classmethod') or
env.lookup_here(func.name)):
# not a static or classmethod
decorators.append(decorator)
if decorators:
transform = DecoratorTransform(self.context)
def_node = node.node
_, reassignments = transform.handle_decorators(
def_node, decorators, def_node.name)
reassignments.analyse_declarations(env)
node = [node, reassignments]
return node
def _handle_def(self, decorators, env, node):
"Handle def or cpdef fused functions"
# Create PyCFunction nodes for each specialization
node.stats.insert(0, node.py_func)
node.py_func = self.visit(node.py_func)
node.update_fused_defnode_entry(env)
pycfunc = ExprNodes.PyCFunctionNode.from_defnode(node.py_func,
True)
pycfunc = ExprNodes.ProxyNode(pycfunc.coerce_to_temp(env))
node.resulting_fused_function = pycfunc
# Create assignment node for our def function
node.fused_func_assignment = self._create_assignment(
node.py_func, ExprNodes.CloneNode(pycfunc), env)
if decorators:
node = self._handle_fused_def_decorators(decorators, env, node)
return node
def _create_fused_function(self, env, node):
"Create a fused function for a DefNode with fused arguments"
from Cython.Compiler import FusedNode
if self.fused_function or self.in_lambda:
if self.fused_function not in self.fused_error_funcs:
if self.in_lambda:
error(node.pos, "Fused lambdas not allowed")
else:
error(node.pos, "Cannot nest fused functions")
self.fused_error_funcs.add(self.fused_function)
node.body = Nodes.PassStatNode(node.pos)
for arg in node.args:
if arg.type.is_fused:
arg.type = arg.type.get_fused_types()[0]
return node
decorators = getattr(node, 'decorators', None)
node = FusedNode.FusedCFuncDefNode(node, env)
self.fused_function = node
self.visitchildren(node)
self.fused_function = None
if node.py_func:
node = self._handle_def(decorators, env, node)
return node
def _handle_nogil_cleanup(self, lenv, node):
"Handle cleanup for 'with gil' blocks in nogil functions."
if lenv.nogil and lenv.has_with_gil_block:
# Acquire the GIL for cleanup in 'nogil' functions, by wrapping
# the entire function body in try/finally.
# The corresponding release will be taken care of by
# Nodes.FuncDefNode.generate_function_definitions()
node.body = Nodes.NogilTryFinallyStatNode(
node.body.pos,
body=node.body,
finally_clause=Nodes.EnsureGILNode(node.body.pos))
def _handle_fused(self, node):
if node.is_generator and node.has_fused_arguments:
node.has_fused_arguments = False
error(node.pos, "Fused generators not supported")
node.gbody = Nodes.StatListNode(node.pos,
stats=[],
body=Nodes.PassStatNode(node.pos))
return node.has_fused_arguments
def visit_FuncDefNode(self, node):
"""
Analyse a function and its body, as that hasn't happend yet. Also
analyse the directive_locals set by @cython.locals(). Then, if we are
a function with fused arguments, replace the function (after it has
declared itself in the symbol table!) with a FusedCFuncDefNode, and
analyse its children (which are in turn normal functions). If we're a
normal function, just analyse the body of the function.
"""
env = self.current_env()
self.seen_vars_stack.append(set())
lenv = node.local_scope
node.declare_arguments(lenv)
for var, type_node in node.directive_locals.items():
if not lenv.lookup_here(var): # don't redeclare args
type = type_node.analyse_as_type(lenv)
if type:
lenv.declare_var(var, type, type_node.pos)
else:
error(type_node.pos, "Not a type")
if self._handle_fused(node):
node = self._create_fused_function(env, node)
else:
node.body.analyse_declarations(lenv)
self._handle_nogil_cleanup(lenv, node)
self._super_visit_FuncDefNode(node)
self.seen_vars_stack.pop()
return node
def visit_DefNode(self, node):
node = self.visit_FuncDefNode(node)
env = self.current_env()
if (not isinstance(node, Nodes.DefNode) or
node.fused_py_func or node.is_generator_body or
not node.needs_assignment_synthesis(env)):
return node
return [node, self._synthesize_assignment(node, env)]
def visit_GeneratorBodyDefNode(self, node):
return self.visit_FuncDefNode(node)
def _synthesize_assignment(self, node, env):
# Synthesize assignment node and put it right after defnode
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if genv.is_closure_scope:
rhs = node.py_cfunc_node = ExprNodes.InnerFunctionNode(
node.pos, def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
code_object=ExprNodes.CodeObjectNode(node))
else:
binding = self.current_directives.get('binding')
rhs = ExprNodes.PyCFunctionNode.from_defnode(node, binding)
if env.is_py_class_scope:
rhs.binding = True
node.is_cyfunction = rhs.binding
return self._create_assignment(node, rhs, env)
def _create_assignment(self, def_node, rhs, env):
if def_node.decorators:
for decorator in def_node.decorators[::-1]:
rhs = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [rhs])
def_node.decorators = None
assmt = Nodes.SingleAssignmentNode(
def_node.pos,
lhs=ExprNodes.NameNode(def_node.pos, name=def_node.name),
rhs=rhs)
assmt.analyse_declarations(env)
return assmt
def visit_ScopedExprNode(self, node):
env = self.current_env()
node.analyse_declarations(env)
# the node may or may not have a local scope
if node.has_local_scope:
self.seen_vars_stack.append(set(self.seen_vars_stack[-1]))
self.enter_scope(node, node.expr_scope)
node.analyse_scoped_declarations(node.expr_scope)
self.visitchildren(node)
self.exit_scope()
self.seen_vars_stack.pop()
else:
node.analyse_scoped_declarations(env)
self.visitchildren(node)
return node
def visit_TempResultFromStatNode(self, node):
self.visitchildren(node)
node.analyse_declarations(self.current_env())
return node
def visit_CppClassNode(self, node):
if node.visibility == 'extern':
return None
else:
return self.visit_ClassDefNode(node)
def visit_CStructOrUnionDefNode(self, node):
# Create a wrapper node if needed.
# We want to use the struct type information (so it can't happen
# before this phase) but also create new objects to be declared
# (so it can't happen later).
# Note that we don't return the original node, as it is
# never used after this phase.
if True: # private (default)
return None
self_value = ExprNodes.AttributeNode(
pos = node.pos,
obj = ExprNodes.NameNode(pos=node.pos, name=u"self"),
attribute = EncodedString(u"value"))
var_entries = node.entry.type.scope.var_entries
attributes = []
for entry in var_entries:
attributes.append(ExprNodes.AttributeNode(pos = entry.pos,
obj = self_value,
attribute = entry.name))
# __init__ assignments
init_assignments = []
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
init_assignments.append(self.init_assignment.substitute({
u"VALUE": ExprNodes.NameNode(entry.pos, name = entry.name),
u"ATTR": attr,
}, pos = entry.pos))
# create the class
str_format = u"%s(%s)" % (node.entry.type.name, ("%s, " * len(attributes))[:-2])
wrapper_class = self.struct_or_union_wrapper.substitute({
u"INIT_ASSIGNMENTS": Nodes.StatListNode(node.pos, stats = init_assignments),
u"IS_UNION": ExprNodes.BoolNode(node.pos, value = not node.entry.type.is_struct),
u"MEMBER_TUPLE": ExprNodes.TupleNode(node.pos, args=attributes),
u"STR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format)),
u"REPR_FORMAT": ExprNodes.StringNode(node.pos, value = EncodedString(str_format.replace("%s", "%r"))),
}, pos = node.pos).stats[0]
wrapper_class.class_name = node.name
wrapper_class.shadow = True
class_body = wrapper_class.body.stats
# fix value type
assert isinstance(class_body[0].base_type, Nodes.CSimpleBaseTypeNode)
class_body[0].base_type.name = node.name
# fix __init__ arguments
init_method = class_body[1]
assert isinstance(init_method, Nodes.DefNode) and init_method.name == '__init__'
arg_template = init_method.args[1]
if not node.entry.type.is_struct:
arg_template.kw_only = True
del init_method.args[1]
for entry, attr in zip(var_entries, attributes):
arg = copy.deepcopy(arg_template)
arg.declarator.name = entry.name
init_method.args.append(arg)
# setters/getters
for entry, attr in zip(var_entries, attributes):
# TODO: branch on visibility
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
property = template.substitute({
u"ATTR": attr,
}, pos = entry.pos).stats[0]
property.name = entry.name
wrapper_class.body.stats.append(property)
wrapper_class.analyse_declarations(self.current_env())
return self.visit_CClassDefNode(wrapper_class)
# Some nodes are no longer needed after declaration
# analysis and can be dropped. The analysis was performed
# on these nodes in a seperate recursive process from the
# enclosing function or module, so we can simply drop them.
def visit_CDeclaratorNode(self, node):
# necessary to ensure that all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return node
def visit_CTypeDefNode(self, node):
return node
def visit_CBaseTypeNode(self, node):
return None
def visit_CEnumDefNode(self, node):
if node.visibility == 'public':
return node
else:
return None
def visit_CNameDeclaratorNode(self, node):
if node.name in self.seen_vars_stack[-1]:
entry = self.current_env().lookup(node.name)
if (entry is None or entry.visibility != 'extern'
and not entry.scope.is_c_class_scope):
warning(node.pos, "cdef variable '%s' declared after it is used" % node.name, 2)
self.visitchildren(node)
return node
def visit_CVarDefNode(self, node):
# to ensure all CNameDeclaratorNodes are visited.
self.visitchildren(node)
return None
def visit_CnameDecoratorNode(self, node):
child_node = self.visit(node.node)
if not child_node:
return None
if type(child_node) is list: # Assignment synthesized
node.child_node = child_node[0]
return [node] + child_node[1:]
node.node = child_node
return node
def create_Property(self, entry):
if entry.visibility == 'public':
if entry.type.is_pyobject:
template = self.basic_pyobject_property
else:
template = self.basic_property
elif entry.visibility == 'readonly':
template = self.basic_property_ro
property = template.substitute({
u"ATTR": ExprNodes.AttributeNode(pos=entry.pos,
obj=ExprNodes.NameNode(pos=entry.pos, name="self"),
attribute=entry.name),
}, pos=entry.pos).stats[0]
property.name = entry.name
property.doc = entry.doc
return property
class CalculateQualifiedNamesTransform(EnvTransform):
"""
Calculate and store the '__qualname__' and the global
module name on some nodes.
"""
def visit_ModuleNode(self, node):
self.module_name = self.global_scope().qualified_name
self.qualified_name = []
_super = super(CalculateQualifiedNamesTransform, self)
self._super_visit_FuncDefNode = _super.visit_FuncDefNode
self._super_visit_ClassDefNode = _super.visit_ClassDefNode
self.visitchildren(node)
return node
def _set_qualname(self, node, name=None):
if name:
qualname = self.qualified_name[:]
qualname.append(name)
else:
qualname = self.qualified_name
node.qualname = EncodedString('.'.join(qualname))
node.module_name = self.module_name
self.visitchildren(node)
return node
def _append_entry(self, entry):
if entry.is_pyglobal and not entry.is_pyclass_attr:
self.qualified_name = [entry.name]
else:
self.qualified_name.append(entry.name)
def visit_ClassNode(self, node):
return self._set_qualname(node, node.name)
def visit_PyClassNamespaceNode(self, node):
# class name was already added by parent node
return self._set_qualname(node)
def visit_PyCFunctionNode(self, node):
return self._set_qualname(node, node.def_node.name)
def visit_FuncDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
if getattr(node, 'name', None) == '<lambda>':
self.qualified_name.append('<lambda>')
else:
self._append_entry(node.entry)
self.qualified_name.append('<locals>')
self._super_visit_FuncDefNode(node)
self.qualified_name = orig_qualified_name
return node
def visit_ClassDefNode(self, node):
orig_qualified_name = self.qualified_name[:]
entry = (getattr(node, 'entry', None) or # PyClass
self.current_env().lookup_here(node.name)) # CClass
self._append_entry(entry)
self._super_visit_ClassDefNode(node)
self.qualified_name = orig_qualified_name
return node
class AnalyseExpressionsTransform(CythonTransform):
def visit_ModuleNode(self, node):
node.scope.infer_types()
node.body = node.body.analyse_expressions(node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
node.local_scope.infer_types()
node.body = node.body.analyse_expressions(node.local_scope)
self.visitchildren(node)
return node
def visit_ScopedExprNode(self, node):
if node.has_local_scope:
node.expr_scope.infer_types()
node = node.analyse_scoped_expressions(node.expr_scope)
self.visitchildren(node)
return node
def visit_IndexNode(self, node):
"""
Replace index nodes used to specialize cdef functions with fused
argument types with the Attribute- or NameNode referring to the
function. We then need to copy over the specialization properties to
the attribute or name node.
Because the indexing might be a Python indexing operation on a fused
function, or (usually) a Cython indexing operation, we need to
re-analyse the types.
"""
self.visit_Node(node)
if node.is_fused_index and not node.type.is_error:
node = node.base
elif node.memslice_ellipsis_noop:
# memoryviewslice[...] expression, drop the IndexNode
node = node.base
return node
class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node):
# Errors related to use in functions with fused args will already
# have been detected
if not node.has_fused_arguments:
if not node.is_generator_body and node.return_type.is_fused:
error(node.pos, "Return type is not specified as argument type")
else:
self.visitchildren(node)
return node
def visit_ExprNode(self, node):
if node.type and node.type.is_fused:
error(node.pos, "Invalid use of fused types, type cannot be specialized")
else:
self.visitchildren(node)
return node
class ExpandInplaceOperators(EnvTransform):
def visit_InPlaceAssignmentNode(self, node):
lhs = node.lhs
rhs = node.rhs
if lhs.type.is_cpp_class:
# No getting around this exact operator here.
return node
if isinstance(lhs, ExprNodes.IndexNode) and lhs.is_buffer_access:
# There is code to handle this case.
return node
env = self.current_env()
def side_effect_free_reference(node, setting=False):
if isinstance(node, ExprNodes.NameNode):
return node, []
elif node.type.is_pyobject and not setting:
node = LetRefNode(node)
return node, [node]
elif isinstance(node, ExprNodes.IndexNode):
if node.is_buffer_access:
raise ValueError("Buffer access")
base, temps = side_effect_free_reference(node.base)
index = LetRefNode(node.index)
return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index]
elif isinstance(node, ExprNodes.AttributeNode):
obj, temps = side_effect_free_reference(node.obj)
return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps
else:
node = LetRefNode(node)
return node, [node]
try:
lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True)
except ValueError:
return node
dup = lhs.__class__(**lhs.__dict__)
binop = ExprNodes.binop_node(node.pos,
operator = node.operator,
operand1 = dup,
operand2 = rhs,
inplace=True)
# Manually analyse types for new node.
lhs.analyse_target_types(env)
dup.analyse_types(env)
binop.analyse_operation(env)
node = Nodes.SingleAssignmentNode(
node.pos,
lhs = lhs,
rhs=binop.coerce_to(lhs.type, env))
# Use LetRefNode to avoid side effects.
let_ref_nodes.reverse()
for t in let_ref_nodes:
node = LetNode(t, node)
return node
def visit_ExprNode(self, node):
# In-place assignments can't happen within an expression.
return node
class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
"""
Adjust function and class definitions by the decorator directives:
@cython.cfunc
@cython.cclass
@cython.ccall
"""
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
if 'ccall' in self.directives:
node = node.as_cfunction(overridable=True, returns=self.directives.get('returns'))
return self.visit(node)
if 'cfunc' in self.directives:
if self.in_py_class:
error(node.pos, "cfunc directive is not allowed here")
else:
node = node.as_cfunction(overridable=False, returns=self.directives.get('returns'))
return self.visit(node)
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
if 'cclass' in self.directives:
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
class AlignFunctionDefinitions(CythonTransform):
"""
This class takes the signatures from a .pxd file and applies them to
the def methods in a .py file.
"""
def visit_ModuleNode(self, node):
self.scope = node.scope
self.directives = node.directives
self.imported_names = set() # hack, see visit_FromImportStatNode()
self.visitchildren(node)
return node
def visit_PyClassDefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def:
if pxd_def.is_cclass:
return self.visit_CClassDefNode(node.as_cclass(), pxd_def)
elif not pxd_def.scope or not pxd_def.scope.is_builtin_scope:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
return node
def visit_CClassDefNode(self, node, pxd_def=None):
if pxd_def is None:
pxd_def = self.scope.lookup(node.class_name)
if pxd_def:
outer_scope = self.scope
self.scope = pxd_def.type.scope
self.visitchildren(node)
if pxd_def:
self.scope = outer_scope
return node
def visit_DefNode(self, node):
pxd_def = self.scope.lookup(node.name)
if pxd_def and (not pxd_def.scope or not pxd_def.scope.is_builtin_scope):
if not pxd_def.is_cfunction:
error(node.pos, "'%s' redeclared" % node.name)
if pxd_def.pos:
error(pxd_def.pos, "previous declaration here")
return None
node = node.as_cfunction(pxd_def)
elif (self.scope.is_module_scope and self.directives['auto_cpdef']
and not node.name in self.imported_names
and node.is_cdef_func_compatible()):
# FIXME: cpdef-ing should be done in analyse_declarations()
node = node.as_cfunction(scope=self.scope)
# Enable this when nested cdef functions are allowed.
# self.visitchildren(node)
return node
def visit_FromImportStatNode(self, node):
# hack to prevent conditional import fallback functions from
# being cdpef-ed (global Python variables currently conflict
# with imports)
if self.scope.is_module_scope:
for name, _ in node.items:
self.imported_names.add(name)
return node
def visit_ExprNode(self, node):
# ignore lambdas and everything else that appears in expressions
return node
class RemoveUnreachableCode(CythonTransform):
def visit_StatListNode(self, node):
if not self.current_directives['remove_unreachable']:
return node
self.visitchildren(node)
for idx, stat in enumerate(node.stats):
idx += 1
if stat.is_terminator:
if idx < len(node.stats):
if self.current_directives['warn.unreachable']:
warning(node.stats[idx].pos, "Unreachable code", 2)
node.stats = node.stats[:idx]
node.is_terminator = True
break
return node
def visit_IfClauseNode(self, node):
self.visitchildren(node)
if node.body.is_terminator:
node.is_terminator = True
return node
def visit_IfStatNode(self, node):
self.visitchildren(node)
if node.else_clause and node.else_clause.is_terminator:
for clause in node.if_clauses:
if not clause.is_terminator:
break
else:
node.is_terminator = True
return node
def visit_TryExceptStatNode(self, node):
self.visitchildren(node)
if node.body.is_terminator and node.else_clause:
if self.current_directives['warn.unreachable']:
warning(node.else_clause.pos, "Unreachable code", 2)
node.else_clause = None
return node
class YieldNodeCollector(TreeVisitor):
def __init__(self):
super(YieldNodeCollector, self).__init__()
self.yields = []
self.returns = []
self.has_return_value = False
def visit_Node(self, node):
self.visitchildren(node)
def visit_YieldExprNode(self, node):
self.yields.append(node)
self.visitchildren(node)
def visit_ReturnStatNode(self, node):
self.visitchildren(node)
if node.value:
self.has_return_value = True
self.returns.append(node)
def visit_ClassDefNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_GeneratorExpressionNode(self, node):
pass
class MarkClosureVisitor(CythonTransform):
def visit_ModuleNode(self, node):
self.needs_closure = False
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
collector = YieldNodeCollector()
collector.visitchildren(node)
if collector.yields:
if isinstance(node, Nodes.CFuncDefNode):
# Will report error later
return node
for i, yield_expr in enumerate(collector.yields):
yield_expr.label_num = i + 1 # no enumerate start arg in Py2.4
for retnode in collector.returns:
retnode.in_generator = True
gbody = Nodes.GeneratorBodyDefNode(
pos=node.pos, name=node.name, body=node.body)
generator = Nodes.GeneratorDefNode(
pos=node.pos, name=node.name, args=node.args,
star_arg=node.star_arg, starstar_arg=node.starstar_arg,
doc=node.doc, decorators=node.decorators,
gbody=gbody, lambda_name=node.lambda_name)
return generator
return node
def visit_CFuncDefNode(self, node):
self.visit_FuncDefNode(node)
if node.needs_closure:
error(node.pos, "closures inside cdef functions not yet supported")
return node
def visit_LambdaNode(self, node):
self.needs_closure = False
self.visitchildren(node)
node.needs_closure = self.needs_closure
self.needs_closure = True
return node
def visit_ClassDefNode(self, node):
self.visitchildren(node)
self.needs_closure = True
return node
class CreateClosureClasses(CythonTransform):
# Output closure classes in module scope for all functions
# that really need it.
def __init__(self, context):
super(CreateClosureClasses, self).__init__(context)
self.path = []
self.in_lambda = False
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.visitchildren(node)
return node
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
for name, entry in node.local_scope.entries.items():
if entry.from_closure:
from_closure.append((name, entry))
elif entry.in_closure:
in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
for entry in node.local_scope.entries.values():
if not entry.from_closure:
entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
# Now from the begining
node.needs_closure = False
node.needs_outer_scope = False
func_scope = node.local_scope
cscope = node.entry.scope
while cscope.is_py_class_scope or cscope.is_c_class_scope:
cscope = cscope.outer_scope
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_self_code = False
node.needs_outer_scope = False
if node.is_generator:
pass
elif not in_closure and not from_closure:
return
elif not in_closure:
func_scope.is_passthrough = True
func_scope.scope_class = cscope.scope_class
node.needs_outer_scope = True
return
as_name = '%s_%s' % (
target_module_scope.next_id(Naming.closure_class_prefix),
node.entry.cname)
entry = target_module_scope.declare_c_class(
name=as_name, pos=node.pos, defining=True,
implementing=True)
entry.type.is_final_type = True
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
if from_closure:
assert cscope.is_closure_scope
class_scope.declare_var(pos=node.pos,
name=Naming.outer_scope_cname,
cname=Naming.outer_scope_cname,
type=cscope.scope_class.type,
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
closure_entry = class_scope.declare_var(pos=entry.pos,
name=entry.name,
cname=entry.cname,
type=entry.type,
is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
# Do it here because other classes are already checked
target_module_scope.check_c_class(func_scope.scope_class)
def visit_LambdaNode(self, node):
if not isinstance(node.def_node, Nodes.DefNode):
# fused function, an error has been previously issued
return node
was_in_lambda = self.in_lambda
self.in_lambda = True
self.create_class_from_scope(node.def_node, self.module_scope, node)
self.visitchildren(node)
self.in_lambda = was_in_lambda
return node
def visit_FuncDefNode(self, node):
if self.in_lambda:
self.visitchildren(node)
return node
if node.needs_closure or self.path:
self.create_class_from_scope(node, self.module_scope)
self.path.append(node)
self.visitchildren(node)
self.path.pop()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_CFuncDefNode(self, node):
self.visitchildren(node)
return node
class GilCheck(VisitorTransform):
"""
Call `node.gil_check(env)` on each node to make sure we hold the
GIL when we need it. Raise an error when on Python operations
inside a `nogil` environment.
Additionally, raise exceptions for closely nested with gil or with nogil
statements. The latter would abort Python.
"""
def __call__(self, root):
self.env_stack = [root.scope]
self.nogil = False
# True for 'cdef func() nogil:' functions, as the GIL may be held while
# calling this function (thus contained 'nogil' blocks may be valid).
self.nogil_declarator_only = False
return super(GilCheck, self).__call__(root)
def visit_FuncDefNode(self, node):
self.env_stack.append(node.local_scope)
was_nogil = self.nogil
self.nogil = node.local_scope.nogil
if self.nogil:
self.nogil_declarator_only = True
if self.nogil and node.nogil_check:
node.nogil_check(node.local_scope)
self.visitchildren(node)
# This cannot be nested, so it doesn't need backup/restore
self.nogil_declarator_only = False
self.env_stack.pop()
self.nogil = was_nogil
return node
def visit_GILStatNode(self, node):
if self.nogil and node.nogil_check:
node.nogil_check()
was_nogil = self.nogil
self.nogil = (node.state == 'nogil')
if was_nogil == self.nogil and not self.nogil_declarator_only:
if not was_nogil:
error(node.pos, "Trying to acquire the GIL while it is "
"already held.")
else:
error(node.pos, "Trying to release the GIL while it was "
"previously released.")
if isinstance(node.finally_clause, Nodes.StatListNode):
# The finally clause of the GILStatNode is a GILExitNode,
# which is wrapped in a StatListNode. Just unpack that.
node.finally_clause, = node.finally_clause.stats
self.visitchildren(node)
self.nogil = was_nogil
return node
def visit_ParallelRangeNode(self, node):
if node.nogil:
node.nogil = False
node = Nodes.GILStatNode(node.pos, state='nogil', body=node)
return self.visit_GILStatNode(node)
if not self.nogil:
error(node.pos, "prange() can only be used without the GIL")
# Forget about any GIL-related errors that may occur in the body
return None
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_ParallelWithBlockNode(self, node):
if not self.nogil:
error(node.pos, "The parallel section may only be used without "
"the GIL")
return None
if node.nogil_check:
# It does not currently implement this, but test for it anyway to
# avoid potential future surprises
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
return node
def visit_TryFinallyStatNode(self, node):
"""
Take care of try/finally statements in nogil code sections.
"""
if not self.nogil or isinstance(node, Nodes.GILStatNode):
return self.visit_Node(node)
node.nogil_check = None
node.is_try_finally_in_nogil = True
self.visitchildren(node)
return node
def visit_Node(self, node):
if self.env_stack and self.nogil and node.nogil_check:
node.nogil_check(self.env_stack[-1])
self.visitchildren(node)
node.in_nogil_context = self.nogil
return node
class TransformBuiltinMethods(EnvTransform):
def visit_SingleAssignmentNode(self, node):
if node.declaration_only:
return None
else:
self.visitchildren(node)
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
return self.visit_cython_attribute(node)
def visit_NameNode(self, node):
return self.visit_cython_attribute(node)
def visit_cython_attribute(self, node):
attribute = node.as_cython_attribute()
if attribute:
if attribute == u'compiled':
node = ExprNodes.BoolNode(node.pos, value=True)
elif attribute == u'__version__':
import Cython
node = ExprNodes.StringNode(node.pos, value=EncodedString(Cython.__version__))
elif attribute == u'NULL':
node = ExprNodes.NullNode(node.pos)
elif attribute in (u'set', u'frozenset'):
node = ExprNodes.NameNode(node.pos, name=EncodedString(attribute),
entry=self.current_env().builtin_scope().lookup_here(attribute))
elif PyrexTypes.parse_basic_type(attribute):
pass
elif self.context.cython_scope.lookup_qualified_name(attribute):
pass
else:
error(node.pos, u"'%s' not a valid cython attribute or is being used incorrectly" % attribute)
return node
def visit_ExecStatNode(self, node):
lenv = self.current_env()
self.visitchildren(node)
if len(node.args) == 1:
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_locals(self, node, func_name):
# locals()/dir()/vars() builtins
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry:
# not the builtin
return node
pos = node.pos
if func_name in ('locals', 'vars'):
if func_name == 'locals' and len(node.args) > 0:
error(self.pos, "Builtin 'locals()' called with wrong number of args, expected 0, got %d"
% len(node.args))
return node
elif func_name == 'vars':
if len(node.args) > 1:
error(self.pos, "Builtin 'vars()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
return node # nothing to do
return ExprNodes.LocalsExprNode(pos, self.current_scope_node(), lenv)
else: # dir()
if len(node.args) > 1:
error(self.pos, "Builtin 'dir()' called with wrong number of args, expected 0-1, got %d"
% len(node.args))
if len(node.args) > 0:
# optimised in Builtin.py
return node
if lenv.is_py_class_scope or lenv.is_module_scope:
if lenv.is_py_class_scope:
pyclass = self.current_scope_node()
locals_dict = ExprNodes.CloneNode(pyclass.dict)
else:
locals_dict = ExprNodes.GlobalsExprNode(pos)
return ExprNodes.SortedDictKeysNode(locals_dict)
local_names = [ var.name for var in lenv.entries.values() if var.name ]
items = [ ExprNodes.IdentifierStringNode(pos, value=var)
for var in local_names ]
return ExprNodes.ListNode(pos, args=items)
def visit_PrimaryCmpNode(self, node):
# special case: for in/not-in test, we do not need to sort locals()
self.visitchildren(node)
if node.operator in 'not_in': # in/not_in
if isinstance(node.operand2, ExprNodes.SortedDictKeysNode):
arg = node.operand2.arg
if isinstance(arg, ExprNodes.NoneCheckNode):
arg = arg.arg
node.operand2 = arg
return node
def visit_CascadedCmpNode(self, node):
return self.visit_PrimaryCmpNode(node)
def _inject_eval(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or len(node.args) != 1:
return node
# Inject globals and locals
node.args.append(ExprNodes.GlobalsExprNode(node.pos))
if not lenv.is_module_scope:
node.args.append(
ExprNodes.LocalsExprNode(
node.pos, self.current_scope_node(), lenv))
return node
def _inject_super(self, node, func_name):
lenv = self.current_env()
entry = lenv.lookup_here(func_name)
if entry or node.args:
return node
# Inject no-args super
def_node = self.current_scope_node()
if (not isinstance(def_node, Nodes.DefNode) or not def_node.args or
len(self.env_stack) < 2):
return node
class_node, class_scope = self.env_stack[-2]
if class_scope.is_py_class_scope:
def_node.requires_classobj = True
class_node.class_cell.is_active = True
node.args = [
ExprNodes.ClassCellNode(
node.pos, is_generator=def_node.is_generator),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
elif class_scope.is_c_class_scope:
node.args = [
ExprNodes.NameNode(
node.pos, name=class_node.scope.name,
entry=class_node.entry),
ExprNodes.NameNode(node.pos, name=def_node.args[0].name)
]
return node
def visit_SimpleCallNode(self, node):
# cython.foo
function = node.function.as_cython_attribute()
if function:
if function in InterpretCompilerDirectives.unop_method_nodes:
if len(node.args) != 1:
error(node.function.pos, u"%s() takes exactly one argument" % function)
else:
node = InterpretCompilerDirectives.unop_method_nodes[function](node.function.pos, operand=node.args[0])
elif function in InterpretCompilerDirectives.binop_method_nodes:
if len(node.args) != 2:
error(node.function.pos, u"%s() takes exactly two arguments" % function)
else:
node = InterpretCompilerDirectives.binop_method_nodes[function](node.function.pos, operand1=node.args[0], operand2=node.args[1])
elif function == u'cast':
if len(node.args) != 2:
error(node.function.pos, u"cast() takes exactly two arguments")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.TypecastNode(node.function.pos, type=type, operand=node.args[1])
else:
error(node.args[0].pos, "Not a type")
elif function == u'sizeof':
if len(node.args) != 1:
error(node.function.pos, u"sizeof() takes exactly one argument")
else:
type = node.args[0].analyse_as_type(self.current_env())
if type:
node = ExprNodes.SizeofTypeNode(node.function.pos, arg_type=type)
else:
node = ExprNodes.SizeofVarNode(node.function.pos, operand=node.args[0])
elif function == 'cmod':
if len(node.args) != 2:
error(node.function.pos, u"cmod() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '%', node.args[0], node.args[1])
node.cdivision = True
elif function == 'cdiv':
if len(node.args) != 2:
error(node.function.pos, u"cdiv() takes exactly two arguments")
else:
node = ExprNodes.binop_node(node.function.pos, '/', node.args[0], node.args[1])
node.cdivision = True
elif function == u'set':
node.function = ExprNodes.NameNode(node.pos, name=EncodedString('set'))
elif self.context.cython_scope.lookup_qualified_name(function):
pass
else:
error(node.function.pos,
u"'%s' not a valid cython language construct" % function)
self.visitchildren(node)
if isinstance(node, ExprNodes.SimpleCallNode) and node.function.is_name:
func_name = node.function.name
if func_name in ('dir', 'locals', 'vars'):
return self._inject_locals(node, func_name)
if func_name == 'eval':
return self._inject_eval(node, func_name)
if func_name == 'super':
return self._inject_super(node, func_name)
return node
class ReplaceFusedTypeChecks(VisitorTransform):
"""
This is not a transform in the pipeline. It is invoked on the specific
versions of a cdef function with fused argument types. It filters out any
type branches that don't match. e.g.
if fused_t is mytype:
...
elif fused_t in other_fused_type:
...
"""
def __init__(self, local_scope):
super(ReplaceFusedTypeChecks, self).__init__()
self.local_scope = local_scope
# defer the import until now to avoid circular import time dependencies
from Cython.Compiler import Optimize
self.transform = Optimize.ConstantFolding(reevaluate=True)
def visit_IfStatNode(self, node):
"""
Filters out any if clauses with false compile time type check
expression.
"""
self.visitchildren(node)
return self.transform(node)
def visit_PrimaryCmpNode(self, node):
type1 = node.operand1.analyse_as_type(self.local_scope)
type2 = node.operand2.analyse_as_type(self.local_scope)
if type1 and type2:
false_node = ExprNodes.BoolNode(node.pos, value=False)
true_node = ExprNodes.BoolNode(node.pos, value=True)
type1 = self.specialize_type(type1, node.operand1.pos)
op = node.operator
if op in ('is', 'is_not', '==', '!='):
type2 = self.specialize_type(type2, node.operand2.pos)
is_same = type1.same_as(type2)
eq = op in ('is', '==')
if (is_same and eq) or (not is_same and not eq):
return true_node
elif op in ('in', 'not_in'):
# We have to do an instance check directly, as operand2
# needs to be a fused type and not a type with a subtype
# that is fused. First unpack the typedef
if isinstance(type2, PyrexTypes.CTypedefType):
type2 = type2.typedef_base_type
if type1.is_fused:
error(node.operand1.pos, "Type is fused")
elif not type2.is_fused:
error(node.operand2.pos,
"Can only use 'in' or 'not in' on a fused type")
else:
types = PyrexTypes.get_specialized_types(type2)
for specialized_type in types:
if type1.same_as(specialized_type):
if op == 'in':
return true_node
else:
return false_node
if op == 'not_in':
return true_node
return false_node
return node
def specialize_type(self, type, pos):
try:
return type.specialize(self.local_scope.fused_to_specific)
except KeyError:
error(pos, "Type is not specific")
return type
def visit_Node(self, node):
self.visitchildren(node)
return node
class DebugTransform(CythonTransform):
"""
Write debug information for this Cython module.
"""
def __init__(self, context, options, result):
super(DebugTransform, self).__init__(context)
self.visited = set()
# our treebuilder and debug output writer
# (see Cython.Debugger.debug_output.CythonDebugWriter)
self.tb = self.context.gdb_debug_outputwriter
#self.c_output_file = options.output_file
self.c_output_file = result.c_file
# Closure support, basically treat nested functions as if the AST were
# never nested
self.nested_funcdefs = []
# tells visit_NameNode whether it should register step-into functions
self.register_stepinto = False
def visit_ModuleNode(self, node):
self.tb.module_name = node.full_module_name
attrs = dict(
module_name=node.full_module_name,
filename=node.pos[0].filename,
c_filename=self.c_output_file)
self.tb.start('Module', attrs)
# serialize functions
self.tb.start('Functions')
# First, serialize functions normally...
self.visitchildren(node)
# ... then, serialize nested functions
for nested_funcdef in self.nested_funcdefs:
self.visit_FuncDefNode(nested_funcdef)
self.register_stepinto = True
self.serialize_modulenode_as_function(node)
self.register_stepinto = False
self.tb.end('Functions')
# 2.3 compatibility. Serialize global variables
self.tb.start('Globals')
entries = {}
for k, v in node.scope.entries.iteritems():
if (v.qualified_name not in self.visited and not
v.name.startswith('__pyx_') and not
v.type.is_cfunction and not
v.type.is_extension_type):
entries[k]= v
self.serialize_local_variables(entries)
self.tb.end('Globals')
# self.tb.end('Module') # end Module after the line number mapping in
# Cython.Compiler.ModuleNode.ModuleNode._serialize_lineno_map
return node
def visit_FuncDefNode(self, node):
self.visited.add(node.local_scope.qualified_name)
if getattr(node, 'is_wrapper', False):
return node
if self.register_stepinto:
self.nested_funcdefs.append(node)
return node
# node.entry.visibility = 'extern'
if node.py_func is None:
pf_cname = ''
else:
pf_cname = node.py_func.entry.func_cname
attrs = dict(
name=node.entry.name or getattr(node, 'name', '<unknown>'),
cname=node.entry.func_cname,
pf_cname=pf_cname,
qualified_name=node.local_scope.qualified_name,
lineno=str(node.pos[1]))
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.local_scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
for arg in node.local_scope.arg_entries:
self.tb.start(arg.name)
self.tb.end(arg.name)
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
return node
def visit_NameNode(self, node):
if (self.register_stepinto and
node.type.is_cfunction and
getattr(node, 'is_called', False) and
node.entry.func_cname is not None):
# don't check node.entry.in_cinclude, as 'cdef extern: ...'
# declared functions are not 'in_cinclude'.
# This means we will list called 'cdef' functions as
# "step into functions", but this is not an issue as they will be
# recognized as Cython functions anyway.
attrs = dict(name=node.entry.func_cname)
self.tb.start('StepIntoFunction', attrs=attrs)
self.tb.end('StepIntoFunction')
self.visitchildren(node)
return node
def serialize_modulenode_as_function(self, node):
"""
Serialize the module-level code as a function so the debugger will know
it's a "relevant frame" and it will know where to set the breakpoint
for 'break modulename'.
"""
name = node.full_module_name.rpartition('.')[-1]
cname_py2 = 'init' + name
cname_py3 = 'PyInit_' + name
py2_attrs = dict(
name=name,
cname=cname_py2,
pf_cname='',
# Ignore the qualified_name, breakpoints should be set using
# `cy break modulename:lineno` for module-level breakpoints.
qualified_name='',
lineno='1',
is_initmodule_function="True",
)
py3_attrs = dict(py2_attrs, cname=cname_py3)
self._serialize_modulenode_as_function(node, py2_attrs)
self._serialize_modulenode_as_function(node, py3_attrs)
def _serialize_modulenode_as_function(self, node, attrs):
self.tb.start('Function', attrs=attrs)
self.tb.start('Locals')
self.serialize_local_variables(node.scope.entries)
self.tb.end('Locals')
self.tb.start('Arguments')
self.tb.end('Arguments')
self.tb.start('StepIntoFunctions')
self.register_stepinto = True
self.visitchildren(node)
self.register_stepinto = False
self.tb.end('StepIntoFunctions')
self.tb.end('Function')
def serialize_local_variables(self, entries):
for entry in entries.values():
if not entry.cname:
# not a local variable
continue
if entry.type.is_pyobject:
vartype = 'PythonObject'
else:
vartype = 'CObject'
if entry.from_closure:
# We're dealing with a closure where a variable from an outer
# scope is accessed, get it from the scope object.
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.outer_entry.cname)
qname = '%s.%s.%s' % (entry.scope.outer_scope.qualified_name,
entry.scope.name,
entry.name)
elif entry.in_closure:
cname = '%s->%s' % (Naming.cur_scope_cname,
entry.cname)
qname = entry.qualified_name
else:
cname = entry.cname
qname = entry.qualified_name
if not entry.pos:
# this happens for variables that are not in the user's code,
# e.g. for the global __builtins__, __doc__, etc. We can just
# set the lineno to 0 for those.
lineno = '0'
else:
lineno = str(entry.pos[1])
attrs = dict(
name=entry.name,
cname=cname,
qualified_name=qname,
type=vartype,
lineno=lineno)
self.tb.start('LocalVar', attrs)
self.tb.end('LocalVar')
|
openilabs/falconlab
|
env/lib/python2.7/site-packages/Cython/Compiler/ParseTreeTransforms.py
|
Python
|
mit
| 115,717
|
[
"VisIt"
] |
ddb2ccc2bb78d6d564b0c11fe411dffda8438a5f8e8a98c16c2294454a5c89b9
|
#
# GMSK modulation and demodulation.
#
#
# Copyright 2005-2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
from gnuradio import gr
from gnuradio import blocks
from gnuradio import analog
import modulation_utils
import digital_swig as digital
from math import pi
import numpy
from pprint import pprint
import inspect
try:
from gnuradio import filter
except ImportError:
import filter_swig as filter
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_bt = 0.35
_def_verbose = False
_def_log = False
_def_gain_mu = None
_def_mu = 0.5
_def_freq_error = 0.0
_def_omega_relative_limit = 0.005
# FIXME: Figure out how to make GMSK work with pfb_arb_resampler_fff for both
# transmit and receive so we don't require integer samples per symbol.
# /////////////////////////////////////////////////////////////////////////////
# GMSK modulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_mod(gr.hier_block2):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
Args:
samples_per_symbol: samples per baud >= 2 (integer)
bt: Gaussian filter bandwidth * symbol time (float)
verbose: Print information about modulator? (boolean)
debug: Print modulation data to files? (boolean)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
bt=_def_bt,
verbose=_def_verbose,
log=_def_log):
gr.hier_block2.__init__(self, "gmsk_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
samples_per_symbol = int(samples_per_symbol)
self._samples_per_symbol = samples_per_symbol
self._bt = bt
self._differential = False
if not isinstance(samples_per_symbol, int) or samples_per_symbol < 2:
raise TypeError, ("samples_per_symbol must be an integer >= 2, is %r" % (samples_per_symbol,))
ntaps = 4 * samples_per_symbol # up to 3 bits in filter at once
sensitivity = (pi / 2) / samples_per_symbol # phase change per bit = pi / 2
# Turn it into NRZ data.
#self.nrz = digital.bytes_to_syms()
self.unpack = blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST)
self.nrz = digital.chunks_to_symbols_bf([-1, 1], 1)
# Form Gaussian filter
# Generate Gaussian response (Needs to be convolved with window below).
self.gaussian_taps = filter.firdes.gaussian(
1, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
ntaps # number of taps
)
self.sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(self.gaussian_taps),numpy.array(self.sqwave))
self.gaussian_filter = filter.interp_fir_filter_fff(samples_per_symbol, self.taps)
# FM modulation
self.fmmod = analog.frequency_modulator_fc(sensitivity)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.unpack, self.nrz, self.gaussian_filter, self.fmmod, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method.
def _print_verbage(self):
print "bits per symbol = %d" % self.bits_per_symbol()
print "Gaussian filter bt = %.2f" % self._bt
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.nrz,
blocks.file_sink(gr.sizeof_float, "nrz.dat"))
self.connect(self.gaussian_filter,
blocks.file_sink(gr.sizeof_float, "gaussian_filter.dat"))
self.connect(self.fmmod,
blocks.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
def add_options(parser):
"""
Adds GMSK modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# GMSK demodulator
# /////////////////////////////////////////////////////////////////////////////
class gmsk_demod(gr.hier_block2):
"""
Hierarchical block for Gaussian Minimum Shift Key (GMSK)
demodulation.
The input is the complex modulated signal at baseband.
The output is a stream of bits packed 1 bit per byte (the LSB)
Args:
samples_per_symbol: samples per baud (integer)
verbose: Print information about modulator? (boolean)
log: Print modualtion data to files? (boolean)
gain_mu: controls rate of mu adjustment (float)
mu: fractional delay [0.0, 1.0] (float)
omega_relative_limit: sets max variation in omega (float)
freq_error: bit rate error as a fraction (float)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
gain_mu=_def_gain_mu,
mu=_def_mu,
omega_relative_limit=_def_omega_relative_limit,
freq_error=_def_freq_error,
verbose=_def_verbose,
log=_def_log):
gr.hier_block2.__init__(self, "gmsk_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._gain_mu = gain_mu
self._mu = mu
self._omega_relative_limit = omega_relative_limit
self._freq_error = freq_error
self._differential = False
if samples_per_symbol < 2:
raise TypeError, "samples_per_symbol >= 2, is %f" % samples_per_symbol
self._omega = samples_per_symbol*(1+self._freq_error)
if not self._gain_mu:
self._gain_mu = 0.175
self._gain_omega = .25 * self._gain_mu * self._gain_mu # critically damped
# Demodulate FM
sensitivity = (pi / 2) / samples_per_symbol
self.fmdemod = analog.quadrature_demod_cf(1.0 / sensitivity)
# the clock recovery block tracks the symbol clock and resamples as needed.
# the output of the block is a stream of soft symbols (float)
self.clock_recovery = digital.clock_recovery_mm_ff(self._omega, self._gain_omega,
self._mu, self._gain_mu,
self._omega_relative_limit)
# slice the floats at 0, outputting 1 bit (the LSB of the output byte) per sample
self.slicer = digital.binary_slicer_fb()
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect & Initialize base class
self.connect(self, self.fmdemod, self.clock_recovery, self.slicer, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self=None): # staticmethod that's also callable on an instance
return 1
bits_per_symbol = staticmethod(bits_per_symbol) # make it a static method.
def _print_verbage(self):
print "bits per symbol = %d" % self.bits_per_symbol()
print "M&M clock recovery omega = %f" % self._omega
print "M&M clock recovery gain mu = %f" % self._gain_mu
print "M&M clock recovery mu = %f" % self._mu
print "M&M clock recovery omega rel. limit = %f" % self._omega_relative_limit
print "frequency error = %f" % self._freq_error
def _setup_logging(self):
print "Demodulation logging turned on."
self.connect(self.fmdemod,
blocks.file_sink(gr.sizeof_float, "fmdemod.dat"))
self.connect(self.clock_recovery,
blocks.file_sink(gr.sizeof_float, "clock_recovery.dat"))
self.connect(self.slicer,
blocks.file_sink(gr.sizeof_char, "slicer.dat"))
def add_options(parser):
"""
Adds GMSK demodulation-specific options to the standard parser
"""
parser.add_option("", "--gain-mu", type="float", default=_def_gain_mu,
help="M&M clock recovery gain mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--mu", type="float", default=_def_mu,
help="M&M clock recovery mu [default=%default] (GMSK/PSK)")
parser.add_option("", "--omega-relative-limit", type="float", default=_def_omega_relative_limit,
help="M&M clock recovery omega relative limit [default=%default] (GMSK/PSK)")
parser.add_option("", "--freq-error", type="float", default=_def_freq_error,
help="M&M clock recovery frequency error [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(gmsk_demod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('gmsk', gmsk_mod)
modulation_utils.add_type_1_demod('gmsk', gmsk_demod)
|
Gabotero/GNURadioNext
|
gr-digital/python/gmsk.py
|
Python
|
gpl-3.0
| 11,094
|
[
"Gaussian"
] |
36c8baa2eee86c00b78af9c8ea933beff0823d908aa68111d75cbef3235e0bb5
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database of Hydrogen transfer reactions.
| Geometries from Bozkaya and Sherrill.
| Reference energies from Bozkaya and Sherrill.
- **benchmark**
- ``'<benchmark_name>'`` <Reference>.
- |dl| ``'<default_benchmark_name>'`` |dr| <Reference>.
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< HTR40 Database Module >>>
dbse = 'HTR40'
isOS = 'True'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
HRXN_SM = []
HRXN_LG = []
HTR15 = [1, 2, 3, 4, 11, 12, 13, 20, 21, 34, 35, 36, 37, 38, 39]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, 1 )] = ['%s-%s-reagent' % (dbse, 'ch3'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 1 )] = dict(zip(ACTV['%s-%s' % (dbse, 1)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 2 )] = ['%s-%s-reagent' % (dbse, 'c2h'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 2 )] = dict(zip(ACTV['%s-%s' % (dbse, 2)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 3 )] = ['%s-%s-reagent' % (dbse, 'c2h3'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 3 )] = dict(zip(ACTV['%s-%s' % (dbse, 3)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 4 )] = ['%s-%s-reagent' % (dbse, 't-butyl'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'isobutane'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 4 )] = dict(zip(ACTV['%s-%s' % (dbse, 4)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 5 )] = ['%s-%s-reagent' % (dbse, 'cfch2'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'chfch2'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 5 )] = dict(zip(ACTV['%s-%s' % (dbse, 5)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 6 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'ch3cho'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 6 )] = dict(zip(ACTV['%s-%s' % (dbse, 6)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 7 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'ch3cn'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 7 )] = dict(zip(ACTV['%s-%s' % (dbse, 7)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 8 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'ch3cch'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 8 )] = dict(zip(ACTV['%s-%s' % (dbse, 8)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 9 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'ch2chcn'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 9 )] = dict(zip(ACTV['%s-%s' % (dbse, 9)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 10 )] = ['%s-%s-reagent' % (dbse, 'allyl'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'ch3chch2'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 10 )] = dict(zip(ACTV['%s-%s' % (dbse, 10)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 11 )] = ['%s-%s-reagent' % (dbse, 'c2h'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 11 )] = dict(zip(ACTV['%s-%s' % (dbse, 11)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 12 )] = ['%s-%s-reagent' % (dbse, 'c2h3'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 12 )] = dict(zip(ACTV['%s-%s' % (dbse, 12)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 13 )] = ['%s-%s-reagent' % (dbse, 't-butyl'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'isobutane'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 13 )] = dict(zip(ACTV['%s-%s' % (dbse, 13)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 14 )] = ['%s-%s-reagent' % (dbse, 'cfch2'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'chfch2'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 14 )] = dict(zip(ACTV['%s-%s' % (dbse, 14)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 15 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'ch3cho'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 15 )] = dict(zip(ACTV['%s-%s' % (dbse, 15)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 16 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'ch3cn'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 16 )] = dict(zip(ACTV['%s-%s' % (dbse, 16)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 17 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'ch3cch'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 17 )] = dict(zip(ACTV['%s-%s' % (dbse, 17)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 18 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'ch2chcn'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 18 )] = dict(zip(ACTV['%s-%s' % (dbse, 18)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 19 )] = ['%s-%s-reagent' % (dbse, 'allyl'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'ch3chch2'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 19 )] = dict(zip(ACTV['%s-%s' % (dbse, 19)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 20 )] = ['%s-%s-reagent' % (dbse, 'c2h'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 20 )] = dict(zip(ACTV['%s-%s' % (dbse, 20)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 21 )] = ['%s-%s-reagent' % (dbse, 't-butyl'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'isobutane'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 21 )] = dict(zip(ACTV['%s-%s' % (dbse, 21)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 22 )] = ['%s-%s-reagent' % (dbse, 'cfch2'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'chfch2'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 22 )] = dict(zip(ACTV['%s-%s' % (dbse, 22)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 23 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch3cho'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 23 )] = dict(zip(ACTV['%s-%s' % (dbse, 23)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 24 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch3cn'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 24 )] = dict(zip(ACTV['%s-%s' % (dbse, 24)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 25 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch3cch'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 25 )] = dict(zip(ACTV['%s-%s' % (dbse, 25)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 26 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch2chcn'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 26 )] = dict(zip(ACTV['%s-%s' % (dbse, 26)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 27 )] = ['%s-%s-reagent' % (dbse, 'allyl'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'ch3chch2'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 27 )] = dict(zip(ACTV['%s-%s' % (dbse, 27)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 28 )] = ['%s-%s-reagent' % (dbse, 'cfch2'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'chfch2'),
'%s-%s-reagent' % (dbse, 'c2h') ]
RXNM['%s-%s' % (dbse, 28 )] = dict(zip(ACTV['%s-%s' % (dbse, 28)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 29 )] = ['%s-%s-reagent' % (dbse, 'ch2cho'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch3cho'),
'%s-%s-reagent' % (dbse, 'c2h') ]
RXNM['%s-%s' % (dbse, 29 )] = dict(zip(ACTV['%s-%s' % (dbse, 29)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 30 )] = ['%s-%s-reagent' % (dbse, 'ch2cn'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch3cn'),
'%s-%s-reagent' % (dbse, 'c2h') ]
RXNM['%s-%s' % (dbse, 30 )] = dict(zip(ACTV['%s-%s' % (dbse, 30)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 31 )] = ['%s-%s-reagent' % (dbse, 'ch2cch'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch3cch'),
'%s-%s-reagent' % (dbse, 'c2h') ]
RXNM['%s-%s' % (dbse, 31 )] = dict(zip(ACTV['%s-%s' % (dbse, 31)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 32 )] = ['%s-%s-reagent' % (dbse, 'ch2ccn'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch2chcn'),
'%s-%s-reagent' % (dbse, 'c2h') ]
RXNM['%s-%s' % (dbse, 32 )] = dict(zip(ACTV['%s-%s' % (dbse, 32)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 33 )] = ['%s-%s-reagent' % (dbse, 'allyl'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'ch3chch2'),
'%s-%s-reagent' % (dbse, 'c2h') ]
RXNM['%s-%s' % (dbse, 33 )] = dict(zip(ACTV['%s-%s' % (dbse, 33)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 34 )] = ['%s-%s-reagent' % (dbse, 'c2h'),
'%s-%s-reagent' % (dbse, 'isobutane'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 't-butyl') ]
RXNM['%s-%s' % (dbse, 34 )] = dict(zip(ACTV['%s-%s' % (dbse, 34)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 35 )] = ['%s-%s-reagent' % (dbse, 'c6h5'),
'%s-%s-reagent' % (dbse, 'h2'),
'%s-%s-reagent' % (dbse, 'c6h6'),
'%s-%s-reagent' % (dbse, 'h') ]
RXNM['%s-%s' % (dbse, 35 )] = dict(zip(ACTV['%s-%s' % (dbse, 35)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 36 )] = ['%s-%s-reagent' % (dbse, 'c6h5'),
'%s-%s-reagent' % (dbse, 'c2h4'),
'%s-%s-reagent' % (dbse, 'c6h6'),
'%s-%s-reagent' % (dbse, 'c2h3') ]
RXNM['%s-%s' % (dbse, 36 )] = dict(zip(ACTV['%s-%s' % (dbse, 36)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 37 )] = ['%s-%s-reagent' % (dbse, 'c6h5'),
'%s-%s-reagent' % (dbse, 'isobutane'),
'%s-%s-reagent' % (dbse, 'c6h6'),
'%s-%s-reagent' % (dbse, 't-butyl') ]
RXNM['%s-%s' % (dbse, 37 )] = dict(zip(ACTV['%s-%s' % (dbse, 37)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 38 )] = ['%s-%s-reagent' % (dbse, 'c2h'),
'%s-%s-reagent' % (dbse, 'c6h6'),
'%s-%s-reagent' % (dbse, 'c2h2'),
'%s-%s-reagent' % (dbse, 'c6h5') ]
RXNM['%s-%s' % (dbse, 38 )] = dict(zip(ACTV['%s-%s' % (dbse, 38)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 39 )] = ['%s-%s-reagent' % (dbse, 'c6h5'),
'%s-%s-reagent' % (dbse, 'ch4'),
'%s-%s-reagent' % (dbse, 'c6h6'),
'%s-%s-reagent' % (dbse, 'ch3') ]
RXNM['%s-%s' % (dbse, 39 )] = dict(zip(ACTV['%s-%s' % (dbse, 39)], [-1,-1,1,1]))
ACTV['%s-%s' % (dbse, 40 )] = ['%s-%s-reagent' % (dbse, 'c6h5'),
'%s-%s-reagent' % (dbse, 'ch3cn'),
'%s-%s-reagent' % (dbse, 'c6h6'),
'%s-%s-reagent' % (dbse, 'ch2cn') ]
RXNM['%s-%s' % (dbse, 40 )] = dict(zip(ACTV['%s-%s' % (dbse, 40)], [-1,-1,1,1]))
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, 1 )] = 0.000
BIND['%s-%s' % (dbse, 2 )] = 0.000
BIND['%s-%s' % (dbse, 3 )] = 0.000
BIND['%s-%s' % (dbse, 4 )] = 0.000
BIND['%s-%s' % (dbse, 5 )] = 0.000
BIND['%s-%s' % (dbse, 6 )] = 0.000
BIND['%s-%s' % (dbse, 7 )] = 0.000
BIND['%s-%s' % (dbse, 8 )] = 0.000
BIND['%s-%s' % (dbse, 9 )] = 0.000
BIND['%s-%s' % (dbse, 10 )] = 0.000
BIND['%s-%s' % (dbse, 11 )] = 0.000
BIND['%s-%s' % (dbse, 12 )] = 0.000
BIND['%s-%s' % (dbse, 13 )] = 0.000
BIND['%s-%s' % (dbse, 14 )] = 0.000
BIND['%s-%s' % (dbse, 15 )] = 0.000
BIND['%s-%s' % (dbse, 16 )] = 0.000
BIND['%s-%s' % (dbse, 17 )] = 0.000
BIND['%s-%s' % (dbse, 18 )] = 0.000
BIND['%s-%s' % (dbse, 19 )] = 0.000
BIND['%s-%s' % (dbse, 20 )] = 0.000
BIND['%s-%s' % (dbse, 21 )] = 0.000
BIND['%s-%s' % (dbse, 22 )] = 0.000
BIND['%s-%s' % (dbse, 23 )] = 0.000
BIND['%s-%s' % (dbse, 24 )] = 0.000
BIND['%s-%s' % (dbse, 25 )] = 0.000
BIND['%s-%s' % (dbse, 27 )] = 0.000
BIND['%s-%s' % (dbse, 28 )] = 0.000
BIND['%s-%s' % (dbse, 29 )] = 0.000
BIND['%s-%s' % (dbse, 30 )] = 0.000
BIND['%s-%s' % (dbse, 31 )] = 0.000
BIND['%s-%s' % (dbse, 32 )] = 0.000
BIND['%s-%s' % (dbse, 33 )] = 0.000
BIND['%s-%s' % (dbse, 34 )] = 0.000
BIND['%s-%s' % (dbse, 35 )] = 0.000
BIND['%s-%s' % (dbse, 36 )] = 0.000
BIND['%s-%s' % (dbse, 37 )] = 0.000
BIND['%s-%s' % (dbse, 38 )] = 0.000
BIND['%s-%s' % (dbse, 39 )] = 0.000
BIND['%s-%s' % (dbse, 40 )] = 0.000
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1 )] = """Reaction 1 """
TAGL['%s-%s' % (dbse, 2 )] = """Reaction 2 """
TAGL['%s-%s' % (dbse, 3 )] = """Reaction 3 """
TAGL['%s-%s' % (dbse, 4 )] = """Reaction 4 """
TAGL['%s-%s' % (dbse, 5 )] = """Reaction 5 """
TAGL['%s-%s' % (dbse, 6 )] = """Reaction 6 """
TAGL['%s-%s' % (dbse, 7 )] = """Reaction 7 """
TAGL['%s-%s' % (dbse, 8 )] = """Reaction 8 """
TAGL['%s-%s' % (dbse, 9 )] = """Reaction 9 """
TAGL['%s-%s' % (dbse, 10 )] = """Reaction 10 """
TAGL['%s-%s' % (dbse, 11 )] = """Reaction 11 """
TAGL['%s-%s' % (dbse, 12 )] = """Reaction 12 """
TAGL['%s-%s' % (dbse, 13 )] = """Reaction 13 """
TAGL['%s-%s' % (dbse, 14 )] = """Reaction 14 """
TAGL['%s-%s' % (dbse, 15 )] = """Reaction 15 """
TAGL['%s-%s' % (dbse, 16 )] = """Reaction 16 """
TAGL['%s-%s' % (dbse, 17 )] = """Reaction 17 """
TAGL['%s-%s' % (dbse, 18 )] = """Reaction 18 """
TAGL['%s-%s' % (dbse, 19 )] = """Reaction 19 """
TAGL['%s-%s' % (dbse, 20 )] = """Reaction 20 """
TAGL['%s-%s' % (dbse, 21 )] = """Reaction 21 """
TAGL['%s-%s' % (dbse, 22 )] = """Reaction 22 """
TAGL['%s-%s' % (dbse, 23 )] = """Reaction 23 """
TAGL['%s-%s' % (dbse, 24 )] = """Reaction 24 """
TAGL['%s-%s' % (dbse, 25 )] = """Reaction 25 """
TAGL['%s-%s' % (dbse, 26 )] = """Reaction 26 """
TAGL['%s-%s' % (dbse, 27 )] = """Reaction 27 """
TAGL['%s-%s' % (dbse, 28 )] = """Reaction 28 """
TAGL['%s-%s' % (dbse, 29 )] = """Reaction 29 """
TAGL['%s-%s' % (dbse, 30 )] = """Reaction 30 """
TAGL['%s-%s' % (dbse, 31 )] = """Reaction 31 """
TAGL['%s-%s' % (dbse, 32 )] = """Reaction 32 """
TAGL['%s-%s' % (dbse, 33 )] = """Reaction 33 """
TAGL['%s-%s' % (dbse, 34 )] = """Reaction 34 """
TAGL['%s-%s' % (dbse, 35 )] = """Reaction 35 """
TAGL['%s-%s' % (dbse, 36 )] = """Reaction 36 """
TAGL['%s-%s' % (dbse, 37 )] = """Reaction 37 """
TAGL['%s-%s' % (dbse, 38 )] = """Reaction 38 """
TAGL['%s-%s' % (dbse, 39 )] = """Reaction 39 """
TAGL['%s-%s' % (dbse, 40 )] = """Reaction 40 """
TAGL['%s-%s-reagent' % (dbse, 't-butyl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'cfch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h4' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2chcn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch4' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c2h3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'allyl' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3cch' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'h2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch3chch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'chfch2' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'h' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'isobutane' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cho' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2cch' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'ch2ccn' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c6h6' )] = """ """
TAGL['%s-%s-reagent' % (dbse, 'c6h5' )] = """ """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-%s' % (dbse, 't-butyl', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000073 -0.17324401 0.00000000
C -1.49313671 0.03293053 0.00000000
C 0.74656816 0.03293930 1.29309432
C 0.74656816 0.03293930 -1.29309432
H -1.75412591 1.11750215 0.00000000
H 0.87705536 1.11751245 1.51911455
H 0.87705536 1.11751245 -1.51911455
H -1.96447858 -0.41104829 0.89724346
H -1.96447858 -0.41104829 -0.89724346
H 1.75927771 -0.41103277 1.25266784
H 0.20520649 -0.41104076 2.14991136
H 0.20520649 -0.41104076 -2.14991136
H 1.75927771 -0.41103277 -1.25266784
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'cfch2', 'reagent')] = qcdb.Molecule("""
0 2
F 1.18236000 0.12888000 0.00008000
C -0.00743000 -0.42937000 -0.00017000
C -1.20008000 0.11938000 -0.00004000
H -2.08423000 -0.50156000 0.00065000
H -1.31194000 1.20159000 -0.00015000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h2', 'reagent')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 0.60249005
C 0.00000000 0.00000000 -0.60249005
H 0.00000000 -0.00000000 1.66141025
H 0.00000000 -0.00000000 -1.66141025
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cho', 'reagent')] = qcdb.Molecule("""
0 1
H 1.15548000 -1.23749000 -0.00007000
C 1.16885000 -0.14776000 -0.00001000
C -0.23563000 0.39721000 -0.00001000
H -0.30509000 1.50872000 0.00003000
H 1.70764000 0.22227000 0.87912000
H 1.70778000 0.22245000 -0.87897000
O -1.23314000 -0.27658000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cn', 'reagent')] = qcdb.Molecule("""
0 2
C 0.18728000 0.00000000 -0.00001000
N 1.35587000 0.00000000 0.00000000
C -1.19103000 0.00000000 0.00000000
H -1.73431000 0.93522000 0.00001000
H -1.73432000 -0.93521000 0.00001000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h4', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.66741206
C 0.00000000 0.00000000 -0.66741206
H -0.00000000 0.92046521 1.22998610
H 0.00000000 -0.92046521 1.22998610
H -0.00000000 0.92046521 -1.22998610
H 0.00000000 -0.92046521 -1.22998610
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2chcn', 'reagent')] = qcdb.Molecule("""
0 1
H -2.62698000 0.00343000 0.00128000
C -1.60538000 -0.35565000 0.00019000
C -0.58353000 0.50215000 -0.00012000
C 0.78296000 0.08953000 -0.00091000
N 1.89484000 -0.22376000 0.00060000
H -1.45042000 -1.42771000 -0.00098000
H -0.75076000 1.57438000 0.00050000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00000000
C 1.21283562 0.00000000 0.00000000
H -1.05818189 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch4', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.00000000
H 1.08613677 0.00000000 0.00000000
H -0.36204538 1.02401965 0.00000000
H -0.36204538 -0.51200982 -0.88682703
H -0.36204538 -0.51200982 0.88682703
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c2h3', 'reagent')] = qcdb.Molecule("""
0 2
C 0.02607811 0.69573733 0.00000000
C 0.02857044 -0.62089246 0.00000000
H -0.70268837 1.48374496 0.00000000
H -0.89678124 -1.18847800 0.00000000
H 0.94877872 -1.18643193 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cn', 'reagent')] = qcdb.Molecule("""
0 1
H 1.55303000 -0.19362000 1.00615000
C 1.17602000 0.00000000 0.00000000
C -0.28096000 0.00000000 -0.00001000
N -1.43280000 0.00000000 0.00000000
H 1.55307000 0.96815000 -0.33536000
H 1.55311000 -0.77453000 -0.67072000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'allyl', 'reagent')] = qcdb.Molecule("""
0 2
H 1.29620000 1.27815000 0.00032000
C 1.22748000 0.19570000 0.00008000
C 0.00002000 -0.44151000 -0.00012000
C -1.22747000 0.19573000 -0.00006000
H -2.15453000 -0.36307000 0.00092000
H 2.15460000 -0.36295000 -0.00001000
H -0.00013000 -1.52978000 -0.00012000
H -1.29630000 1.27818000 -0.00046000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3cch', 'reagent')] = qcdb.Molecule("""
0 1
H 1.62987000 -0.22419000 0.99609000
C 1.23812000 0.00000000 -0.00001000
C -0.21922000 -0.00003000 0.00000000
C -1.42012000 0.00006000 0.00007000
H -2.48217000 -0.00018000 -0.00029000
H 1.62978000 0.97478000 -0.30390000
H 1.62986000 -0.75054000 -0.69223000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3', 'reagent')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00000031
H -0.00000000 0.00000000 1.07554864
H -0.00000000 0.93145124 -0.53777618
H 0.00000000 -0.93145124 -0.53777618
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'h2', 'reagent')] = qcdb.Molecule("""
0 1
H 0.00000000 0.00000000 -0.37169941
H 0.00000000 0.00000000 0.37169941
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch3chch2', 'reagent')] = qcdb.Molecule("""
0 1
H -1.80765000 -0.15368000 -0.87813000
C -1.23352000 0.16234000 0.00003000
C 0.13465000 -0.45362000 -0.00010000
C 1.28048000 0.22043000 -0.00006000
H 2.23888000 -0.28641000 0.00025000
H -1.18210000 1.25365000 -0.00053000
H -1.80711000 -0.15289000 0.87881000
H 0.16668000 -1.54201000 0.00023000
H 1.30167000 1.30642000 0.00021000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'chfch2', 'reagent')] = qcdb.Molecule("""
0 1
F 1.15776000 -0.22307000 0.00001000
C -0.02076000 0.43302000 -0.00002000
C -1.18005000 -0.19843000 0.00000000
H 0.11660000 1.50810000 0.00003000
H -2.09896000 0.37141000 0.00003000
H -1.23262000 -1.27944000 -0.00001000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'h', 'reagent')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'isobutane', 'reagent')] = qcdb.Molecule("""
0 1
C 0.00000090 -0.36984652 0.00000000
H 0.00000133 -1.47997543 0.00000000
C -1.46235985 0.10584355 0.00000000
C 0.73117989 0.10584959 1.26644108
C 0.73117989 0.10584959 -1.26644108
H -1.50901508 1.21298165 0.00000000
H 0.75449342 1.21298769 1.30684890
H 0.75449342 1.21298769 -1.30684890
H -2.00230292 -0.25604001 0.89527086
H -2.00230292 -0.25604001 -0.89527086
H 1.77648250 -0.25602315 1.28640747
H 0.22582896 -0.25604150 2.18168063
H 0.22582896 -0.25604150 -2.18168063
H 1.77648250 -0.25602315 -1.28640747
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cho', 'reagent')] = qcdb.Molecule("""
0 2
H 1.26423000 -1.25075000 -0.00022000
C 1.16786000 -0.17142000 -0.00004000
C -0.13387000 0.40647000 -0.00006000
H -0.18501000 1.51210000 -0.00018000
H 2.05914000 0.44514000 0.00051000
O -1.16779000 -0.26460000 0.00006000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2cch', 'reagent')] = qcdb.Molecule("""
0 2
C 0.11561000 -0.00003000 -0.00001000
C 1.33791000 0.00003000 -0.00001000
H 2.40011000 -0.00005000 0.00008000
C -1.25132000 0.00001000 -0.00001000
H -1.80663000 0.93004000 0.00004000
H -1.80669000 -0.93000000 0.00004000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'ch2ccn', 'reagent')] = qcdb.Molecule("""
0 2
C -1.80740000 0.05630000 0.00027000
C -0.52077000 -0.14829000 -0.00057000
C 0.80722000 -0.01017000 0.00028000
N 1.98158000 0.04542000 0.00000000
H -2.22720000 1.06314000 -0.00029000
H -2.51811000 -0.76812000 0.00035000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c6h6', 'reagent')] = qcdb.Molecule("""
0 1
C -0.00000000 1.41066086 0.00000000
C 1.22166790 0.70533038 -0.00000000
C 1.22166790 -0.70533038 0.00000000
C 0.00000000 -1.41066086 0.00000000
C -1.22166790 -0.70533038 0.00000000
C -1.22166790 0.70533038 0.00000000
H -0.00000000 2.50726822 0.00000000
H 2.17135800 1.25363362 0.00000000
H 2.17135800 -1.25363362 0.00000000
H 0.00000000 -2.50726822 0.00000000
H -2.17135800 -1.25363362 0.00000000
H -2.17135800 1.25363362 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'c6h5', 'reagent')] = qcdb.Molecule("""
0 2
C -0.02911138 1.44968932 0.00000000
C 1.19145514 0.72918056 0.00000000
C 1.18256095 -0.68275137 0.00000000
C -0.03576037 -1.39642075 0.00000000
C -1.27002298 -0.69963430 0.00000000
C -1.20569095 0.69610546 0.00000000
H -0.03893619 2.54557277 0.00000000
H 2.14438286 1.27284078 0.00000000
H 2.13198633 -1.23090189 0.00000000
H -0.03011941 -2.49351046 0.00000000
H -2.22399795 -1.23906809 0.00000000
units angstrom
""")
|
kratman/psi4public
|
psi4/share/psi4/databases/HTR40.py
|
Python
|
gpl-2.0
| 41,536
|
[
"Psi4"
] |
98f66fdda7c86ac88c84517d6d5c09e2ffd945bf650167fd0cfd2ba1cf8e2e0b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2010-2015
# Christian Kohlöffel
# Jean-Paul Schouwstra
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
import os
import sys
from math import degrees, radians
from copy import copy, deepcopy
import logging
import argparse
import subprocess
import tempfile
from core.point import Point
from core.layercontent import LayerContent, Layers, Shapes
from core.entitycontent import EntityContent
from core.linegeo import LineGeo
from core.holegeo import HoleGeo
from core.project import Project
from globals.config import MyConfig
import globals.globals as g
from globals.logger import LoggerClass
from gui.treehandling import TreeHandler
from gui.popupdialog import PopUpDialog
from gui.aboutdialog import AboutDialog
from dxfimport.importer import ReadDXF
from postpro.postprocessor import MyPostProcessor
from postpro.tspoptimisation import TspOptimization
from globals.six import text_type, PY2
import globals.constants as c
if c.PYQT5notPYQT4:
from PyQt5.QtWidgets import QMainWindow, QGraphicsView, QFileDialog, QApplication, QMessageBox
from PyQt5.QtGui import QSurfaceFormat
from PyQt5 import QtCore
getOpenFileName = QFileDialog.getOpenFileName
getSaveFileName = QFileDialog.getSaveFileName
file_str = lambda filename: filename
else:
from PyQt4.QtGui import QMainWindow, QGraphicsView, QFileDialog, QApplication, QMessageBox
from PyQt4 import QtCore
getOpenFileName = QFileDialog.getOpenFileNameAndFilter
getSaveFileName = QFileDialog.getSaveFileNameAndFilter
file_str = lambda filename: unicode(filename.toUtf8(), encoding="utf-8")
if PY2:
str_encode = lambda exstr: exstr.encode('utf-8')
str_decode = lambda filename: filename.decode("utf-8")
else:
str_encode = lambda exstr: exstr
str_decode = lambda filename: filename
logger = logging.getLogger()
# Get folder of the main instance and write into globals
g.folder = os.path.dirname(os.path.abspath(sys.argv[0])).replace("\\", "/")
if os.path.islink(sys.argv[0]):
g.folder = os.path.dirname(os.readlink(sys.argv[0]))
class MainWindow(QMainWindow):
"""Main Class"""
def __init__(self, app):
"""
Initialization of the Main window. This is directly called after the
Logger has been initialized. The Function loads the GUI, creates the
used Classes and connects the actions to the GUI.
"""
QMainWindow.__init__(self)
self.app = app
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.canvas = self.ui.canvas
if g.config.mode3d:
self.canvas_scene = self.canvas
else:
self.canvas_scene = None
self.TreeHandler = TreeHandler(self.ui)
self.MyPostProcessor = MyPostProcessor()
self.d2g = Project(self)
self.createActions()
self.connectToolbarToConfig()
self.filename = ""
self.valuesDXF = None
self.shapes = Shapes([])
self.entityRoot = None
self.layerContents = Layers([])
self.newNumber = 1
self.cont_dx = 0.0
self.cont_dy = 0.0
self.cont_rotate = 0.0
self.cont_scale = 1.0
# self.readSettings()
def tr(self, string_to_translate):
"""
Translate a string using the QCoreApplication translation framework
@param: string_to_translate: a unicode string
@return: the translated unicode string if it was possible to translate
"""
return text_type(QtCore.QCoreApplication.translate('MainWindow',
string_to_translate))
def createActions(self):
"""
Create the actions of the main toolbar.
@purpose: Links the callbacks to the actions in the menu
"""
# File
self.ui.actionOpen.triggered.connect(self.open)
self.ui.actionReload.triggered.connect(self.reload)
self.ui.actionSaveProjectAs.triggered.connect(self.saveProject)
self.ui.actionClose.triggered.connect(self.close)
# Export
self.ui.actionOptimizePaths.triggered.connect(self.optimizeTSP)
self.ui.actionExportShapes.triggered.connect(self.exportShapes)
self.ui.actionOptimizeAndExportShapes.triggered.connect(self.optimizeAndExportShapes)
# View
self.ui.actionShowPathDirections.triggered.connect(self.setShowPathDirections)
self.ui.actionShowDisabledPaths.triggered.connect(self.setShowDisabledPaths)
self.ui.actionLiveUpdateExportRoute.triggered.connect(self.liveUpdateExportRoute)
self.ui.actionDeleteG0Paths.triggered.connect(self.deleteG0Paths)
self.ui.actionAutoscale.triggered.connect(self.canvas.autoscale)
if g.config.mode3d:
self.ui.actionTopView.triggered.connect(self.canvas.topView)
self.ui.actionIsometricView.triggered.connect(self.canvas.isometricView)
# Options
self.ui.actionTolerances.triggered.connect(self.setTolerances)
self.ui.actionRotateAll.triggered.connect(self.rotateAll)
self.ui.actionScaleAll.triggered.connect(self.scaleAll)
self.ui.actionMoveWorkpieceZero.triggered.connect(self.moveWorkpieceZero)
self.ui.actionSplitLineSegments.triggered.connect(self.d2g.small_reload)
self.ui.actionAutomaticCutterCompensation.triggered.connect(self.d2g.small_reload)
self.ui.actionMilling.triggered.connect(self.setMachineTypeToMilling)
self.ui.actionDragKnife.triggered.connect(self.setMachineTypeToDragKnife)
self.ui.actionLathe.triggered.connect(self.setMachineTypeToLathe)
self.ui.actionLaser_Cutter.triggered.connect(self.setMachineTypeToLaserCutter)
# Help
self.ui.actionAbout.triggered.connect(self.about)
def connectToolbarToConfig(self, project=False):
# View
if not project:
self.ui.actionShowDisabledPaths.setChecked(g.config.vars.General['show_disabled_paths'])
self.ui.actionLiveUpdateExportRoute.setChecked(g.config.vars.General['live_update_export_route'])
# Options
self.ui.actionSplitLineSegments.setChecked(g.config.vars.General['split_line_segments'])
self.ui.actionAutomaticCutterCompensation.setChecked(g.config.vars.General['automatic_cutter_compensation'])
self.updateMachineType()
def keyPressEvent(self, event):
"""
Rewritten KeyPressEvent to get other behavior while Shift is pressed.
@purpose: Changes to ScrollHandDrag while Control pressed
@param event: Event Parameters passed to function
"""
if event.isAutoRepeat():
return
if event.key() == QtCore.Qt.Key_Control:
self.canvas.isMultiSelect = True
elif event.key() == QtCore.Qt.Key_Shift:
if g.config.mode3d:
self.canvas.isPanning = True
self.canvas.setCursor(QtCore.Qt.OpenHandCursor)
else:
self.canvas.setDragMode(QGraphicsView.ScrollHandDrag)
elif event.key() == QtCore.Qt.Key_Alt:
if g.config.mode3d:
self.canvas.isRotating = True
self.canvas.setCursor(QtCore.Qt.PointingHandCursor)
def keyReleaseEvent(self, event):
"""
Rewritten KeyReleaseEvent to get other behavior while Shift is pressed.
@purpose: Changes to RubberBandDrag while Control released
@param event: Event Parameters passed to function
"""
if event.key() == QtCore.Qt.Key_Control:
self.canvas.isMultiSelect = False
elif event.key() == QtCore.Qt.Key_Shift:
if g.config.mode3d:
self.canvas.isPanning = False
self.canvas.unsetCursor()
else:
self.canvas.setDragMode(QGraphicsView.NoDrag)
elif event.key() == QtCore.Qt.Key_Alt:
if g.config.mode3d:
self.canvas.isRotating = False
if -5 < self.canvas.rotX < 5 and\
-5 < self.canvas.rotY < 5 and\
-5 < self.canvas.rotZ < 5:
self.canvas.rotX = 0
self.canvas.rotY = 0
self.canvas.rotZ = 0
self.canvas.update()
self.canvas.unsetCursor()
def enableToolbarButtons(self, status=True):
# File
self.ui.actionReload.setEnabled(status)
self.ui.actionSaveProjectAs.setEnabled(status)
# Export
self.ui.actionOptimizePaths.setEnabled(status)
self.ui.actionExportShapes.setEnabled(status)
self.ui.actionOptimizeAndExportShapes.setEnabled(status)
# View
self.ui.actionShowPathDirections.setEnabled(status)
self.ui.actionShowDisabledPaths.setEnabled(status)
self.ui.actionLiveUpdateExportRoute.setEnabled(status)
self.ui.actionAutoscale.setEnabled(status)
if g.config.mode3d:
self.ui.actionTopView.setEnabled(status)
self.ui.actionIsometricView.setEnabled(status)
# Options
self.ui.actionTolerances.setEnabled(status)
self.ui.actionRotateAll.setEnabled(status)
self.ui.actionScaleAll.setEnabled(status)
self.ui.actionMoveWorkpieceZero.setEnabled(status)
def deleteG0Paths(self):
"""
Deletes the optimisation paths from the scene.
"""
self.setCursor(QtCore.Qt.WaitCursor)
self.app.processEvents()
self.canvas_scene.delete_opt_paths()
self.ui.actionDeleteG0Paths.setEnabled(False)
self.canvas_scene.update()
self.unsetCursor()
def exportShapes(self, status=False, saveas=None):
"""
This function is called by the menu "Export/Export Shapes". It may open
a Save Dialog if used without LinuxCNC integration. Otherwise it's
possible to select multiple postprocessor files, which are located
in the folder.
"""
self.setCursor(QtCore.Qt.WaitCursor)
self.app.processEvents()
logger.debug(self.tr('Export the enabled shapes'))
# Get the export order from the QTreeView
self.TreeHandler.updateExportOrder()
self.updateExportRoute()
logger.debug(self.tr("Sorted layers:"))
for i, layer in enumerate(self.layerContents.non_break_layer_iter()):
logger.debug("LayerContents[%i] = %s" % (i, layer))
if not g.config.vars.General['write_to_stdout']:
# Get the name of the File to export
if not saveas:
MyFormats = ""
for i in range(len(self.MyPostProcessor.output_format)):
name = "%s " % (self.MyPostProcessor.output_text[i])
format_ = "(*%s);;" % (self.MyPostProcessor.output_format[i])
MyFormats = MyFormats + name + format_
filename = self.showSaveDialog(self.tr('Export to file'), MyFormats)
save_filename = file_str(filename[0])
else:
filename = [None, None]
save_filename = saveas
# If Cancel was pressed
if not save_filename:
self.unsetCursor()
return
(beg, ende) = os.path.split(save_filename)
(fileBaseName, fileExtension) = os.path.splitext(ende)
pp_file_nr = 0
for i in range(len(self.MyPostProcessor.output_format)):
name = "%s " % (self.MyPostProcessor.output_text[i])
format_ = "(*%s)" % (self.MyPostProcessor.output_format[i])
MyFormats = name + format_
if filename[1] == MyFormats:
pp_file_nr = i
if fileExtension != self.MyPostProcessor.output_format[pp_file_nr]:
if not QtCore.QFile.exists(save_filename):
save_filename += self.MyPostProcessor.output_format[pp_file_nr]
self.MyPostProcessor.getPostProVars(pp_file_nr)
else:
save_filename = ""
self.MyPostProcessor.getPostProVars(0)
"""
Export will be performed according to LayerContents and their order
is given in this variable too.
"""
self.MyPostProcessor.exportShapes(self.filename,
save_filename,
self.layerContents)
self.unsetCursor()
if g.config.vars.General['write_to_stdout']:
self.close()
def optimizeAndExportShapes(self):
"""
Optimize the tool path, then export the shapes
"""
self.optimizeTSP()
self.exportShapes()
def updateExportRoute(self):
"""
Update the drawing of the export route
"""
self.canvas_scene.delete_opt_paths()
self.canvas_scene.addexproutest()
for LayerContent in self.layerContents.non_break_layer_iter():
if len(LayerContent.exp_order) > 0:
self.canvas_scene.addexproute(LayerContent.exp_order, LayerContent.nr)
if len(self.canvas_scene.routearrows) > 0:
self.ui.actionDeleteG0Paths.setEnabled(True)
self.canvas_scene.addexprouteen()
self.canvas_scene.update()
def optimizeTSP(self):
"""
Method is called to optimize the order of the shapes. This is performed
by solving the TSP Problem.
"""
self.setCursor(QtCore.Qt.WaitCursor)
self.app.processEvents()
logger.debug(self.tr('Optimize order of enabled shapes per layer'))
self.canvas_scene.delete_opt_paths()
# Get the export order from the QTreeView
logger.debug(self.tr('Updating order according to TreeView'))
self.TreeHandler.updateExportOrder()
self.canvas_scene.addexproutest()
for LayerContent in self.layerContents.non_break_layer_iter():
# Initial values for the Lists to export.
shapes_to_write = []
shapes_fixed_order = []
shapes_st_en_points = []
# Check all shapes of Layer which shall be exported and create List for it.
logger.debug(self.tr("Nr. of Shapes %s; Nr. of Shapes in Route %s")
% (len(LayerContent.shapes), len(LayerContent.exp_order)))
logger.debug(self.tr("Export Order for start: %s") % LayerContent.exp_order)
for shape_nr in range(len(LayerContent.exp_order)):
if not self.shapes[LayerContent.exp_order[shape_nr]].send_to_TSP:
shapes_fixed_order.append(shape_nr)
shapes_to_write.append(shape_nr)
shapes_st_en_points.append(self.shapes[LayerContent.exp_order[shape_nr]].get_start_end_points())
# Perform Export only if the Number of shapes to export is bigger than 0
if len(shapes_to_write) > 0:
# Errechnen der Iterationen
# Calculate the iterations
iter_ = min(g.config.vars.Route_Optimisation['max_iterations'], len(shapes_to_write)*50)
# Adding the Start and End Points to the List.
x_st = g.config.vars.Plane_Coordinates['axis1_start_end']
y_st = g.config.vars.Plane_Coordinates['axis2_start_end']
start = Point(x_st, y_st)
ende = Point(x_st, y_st)
shapes_st_en_points.append([start, ende])
TSPs = TspOptimization(shapes_st_en_points, shapes_fixed_order)
logger.info(self.tr("TSP start values initialised for Layer %s") % LayerContent.name)
logger.debug(self.tr("Shapes to write: %s") % shapes_to_write)
logger.debug(self.tr("Fixed order: %s") % shapes_fixed_order)
for it_nr in range(iter_):
# Only show each 50th step.
if it_nr % 50 == 0:
TSPs.calc_next_iteration()
new_exp_order = [LayerContent.exp_order[nr] for nr in TSPs.opt_route[1:]]
logger.debug(self.tr("TSP done with result: %s") % TSPs)
LayerContent.exp_order = new_exp_order
self.canvas_scene.addexproute(LayerContent.exp_order, LayerContent.nr)
logger.debug(self.tr("New Export Order after TSP: %s") % new_exp_order)
self.app.processEvents()
else:
LayerContent.exp_order = []
if len(self.canvas_scene.routearrows) > 0:
self.ui.actionDeleteG0Paths.setEnabled(True)
self.canvas_scene.addexprouteen()
# Update order in the treeView, according to path calculation done by the TSP
self.TreeHandler.updateTreeViewOrder()
self.canvas_scene.update()
self.unsetCursor()
def automaticCutterCompensation(self):
if self.ui.actionAutomaticCutterCompensation.isEnabled() and\
self.ui.actionAutomaticCutterCompensation.isChecked():
for layerContent in self.layerContents.non_break_layer_iter():
if layerContent.automaticCutterCompensationEnabled():
outside_compensation = True
shapes_left = layerContent.shapes
while len(shapes_left) > 0:
shapes_left = [shape for shape in shapes_left
if not self.ifNotContainedChangeCutCor(shape, shapes_left, outside_compensation)]
outside_compensation = not outside_compensation
self.canvas_scene.update()
def ifNotContainedChangeCutCor(self, shape, shapes_left, outside_compensation):
for otherShape in shapes_left:
if shape != otherShape:
if shape != otherShape and\
otherShape.topLeft.x < shape.topLeft.x and shape.bottomRight.x < otherShape.bottomRight.x and\
otherShape.bottomRight.y < shape.bottomRight.y and shape.topLeft.y < otherShape.topLeft.y:
return False
if outside_compensation == shape.cw:
shape.cut_cor = 41
else:
shape.cut_cor = 42
self.canvas_scene.repaint_shape(shape)
return True
def showSaveDialog(self, title, MyFormats):
"""
This function is called by the menu "Export/Export Shapes" of the main toolbar.
It creates the selection dialog for the exporter
@return: Returns the filename of the selected file.
"""
(beg, ende) = os.path.split(self.filename)
(fileBaseName, fileExtension) = os.path.splitext(ende)
default_name = os.path.join(g.config.vars.Paths['output_dir'], fileBaseName)
selected_filter = self.MyPostProcessor.output_format[0]
filename = getSaveFileName(self,
title, default_name,
MyFormats, selected_filter)
logger.info(self.tr("File: %s selected") % filename[0])
return filename
def about(self):
"""
This function is called by the menu "Help/About" of the main toolbar and
creates the About Window
"""
message = self.tr("<html>"
"<h2><center>You are using</center></h2>"
"<body bgcolor="\
"<center><img src=':images/dxf2gcode_logo.png' border='1' color='white'></center></body>"
"<h2>Version:</h2>"
"<body>%s: %s<br>"
"Last change: %s<br>"
"Changed by: %s<br></body>"
"<h2>Where to get help:</h2>"
"For more information and updates, "
"please visit "
"<a href='http://sourceforge.net/projects/dxf2gcode/'>http://sourceforge.net/projects/dxf2gcode/</a><br>"
"For any questions on how to use dxf2gcode please use the "
"<a href='https://groups.google.com/forum/?fromgroups#!forum/dxf2gcode-users'>mailing list</a><br>"
"To log bugs, or request features please use the "
"<a href='http://sourceforge.net/projects/dxf2gcode/tickets/'>issue tracking system</a><br>"
"<h2>License and copyright:</h2>"
"<body>This program is written in Python and is published under the "
"<a href='http://www.gnu.org/licenses/'>GNU GPLv3 license.</a><br>"
"</body></html>") % (c.VERSION, c.REVISION, c.DATE, c.AUTHOR)
AboutDialog(title=self.tr("About DXF2GCODE"), message=message)
def setShowPathDirections(self):
"""
This function is called by the menu "Show all path directions" of the
main and forwards the call to Canvas.setShow_path_direction()
"""
flag = self.ui.actionShowPathDirections.isChecked()
self.canvas.setShowPathDirections(flag)
self.canvas_scene.update()
def setShowDisabledPaths(self):
"""
This function is called by the menu "Show disabled paths" of the
main and forwards the call to Canvas.setShow_disabled_paths()
"""
flag = self.ui.actionShowDisabledPaths.isChecked()
self.canvas_scene.setShowDisabledPaths(flag)
self.canvas_scene.update()
def liveUpdateExportRoute(self):
"""
This function is called by the menu "Live update tool path" of the
main and forwards the call to TreeHandler.setUpdateExportRoute()
"""
flag = self.ui.actionLiveUpdateExportRoute.isChecked()
self.TreeHandler.setLiveUpdateExportRoute(flag)
def setTolerances(self):
title = self.tr('Contour tolerances')
units = "in" if g.config.metric == 0 else "mm"
label = [self.tr("Tolerance for common points [%s]:") % units,
self.tr("Tolerance for curve fitting [%s]:") % units]
value = [g.config.point_tolerance,
g.config.fitting_tolerance]
logger.debug(self.tr("set Tolerances"))
SetTolDialog = PopUpDialog(title, label, value)
if SetTolDialog.result is None:
return
g.config.point_tolerance = float(SetTolDialog.result[0])
g.config.fitting_tolerance = float(SetTolDialog.result[1])
self.d2g.reload() # set tolerances requires a complete reload
def scaleAll(self):
title = self.tr('Scale Contour')
label = [self.tr("Scale Contour by factor:")]
value = [self.cont_scale]
ScaEntDialog = PopUpDialog(title, label, value)
if ScaEntDialog.result is None:
return
self.cont_scale = float(ScaEntDialog.result[0])
self.entityRoot.sca = self.cont_scale
self.d2g.small_reload()
def rotateAll(self):
title = self.tr('Rotate Contour')
label = [self.tr("Rotate Contour by deg:")] # TODO should we support radians for drawing unit non metric?
value = [degrees(self.cont_rotate)]
RotEntDialog = PopUpDialog(title, label, value)
if RotEntDialog.result is None:
return
self.cont_rotate = radians(float(RotEntDialog.result[0]))
self.entityRoot.rot = self.cont_rotate
self.d2g.small_reload()
def moveWorkpieceZero(self):
"""
This function is called when the Option=>Move WP Zero Menu is clicked.
"""
title = self.tr('Workpiece zero offset')
units = "[in]" if g.config.metric == 0 else "[mm]"
label = [self.tr("Offset %s axis %s:") % (g.config.vars.Axis_letters['ax1_letter'], units),
self.tr("Offset %s axis %s:") % (g.config.vars.Axis_letters['ax2_letter'], units)]
value = [self.cont_dx, self.cont_dy]
MoveWpzDialog = PopUpDialog(title, label, value, True)
if MoveWpzDialog.result is None:
return
if MoveWpzDialog.result == 'Auto':
minx = sys.float_info.max
miny = sys.float_info.max
for shape in self.shapes:
if not shape.isDisabled():
minx = min(minx, shape.topLeft.x)
miny = min(miny, shape.bottomRight.y)
self.cont_dx = self.entityRoot.p0.x - minx
self.cont_dy = self.entityRoot.p0.y - miny
else:
self.cont_dx = float(MoveWpzDialog.result[0])
self.cont_dy = float(MoveWpzDialog.result[1])
self.entityRoot.p0.x = self.cont_dx
self.entityRoot.p0.y = self.cont_dy
self.d2g.small_reload()
def setMachineTypeToMilling(self):
g.config.machine_type = 'milling'
self.updateMachineType()
self.d2g.small_reload()
def setMachineTypeToDragKnife(self):
g.config.machine_type = 'drag_knife'
self.updateMachineType()
self.d2g.small_reload()
def setMachineTypeToLathe(self):
g.config.machine_type = 'lathe'
self.updateMachineType()
self.d2g.small_reload()
def setMachineTypeToLaserCutter(self):
g.config.machine_type = 'laser_cutter'
self.updateMachineType()
self.d2g.small_reload()
def updateMachineType(self):
if g.config.machine_type == 'milling':
self.ui.actionAutomaticCutterCompensation.setEnabled(True)
self.ui.actionMilling.setChecked(True)
self.ui.actionDragKnife.setChecked(False)
self.ui.actionLathe.setChecked(False)
self.ui.actionLaser_Cutter.setChecked(False)
self.ui.label_9.setText(self.tr("Z Infeed depth"))
self.ui.label_15.setText(self.tr("Not Available"))
self.ui.label_16.setText(self.tr("Not Available"))
elif g.config.machine_type == 'lathe':
self.ui.actionAutomaticCutterCompensation.setEnabled(False)
self.ui.actionMilling.setChecked(False)
self.ui.actionDragKnife.setChecked(False)
self.ui.actionLathe.setChecked(True)
self.ui.actionLaser_Cutter.setChecked(False)
self.ui.label_9.setText(self.tr("No Z-Axis for lathe"))
self.ui.label_15.setText(self.tr("Not Available"))
self.ui.label_16.setText(self.tr("Not Available"))
elif g.config.machine_type == "drag_knife":
self.ui.actionAutomaticCutterCompensation.setEnabled(False)
self.ui.actionMilling.setChecked(False)
self.ui.actionDragKnife.setChecked(True)
self.ui.actionLathe.setChecked(False)
self.ui.actionLaser_Cutter.setChecked(False)
self.ui.label_9.setText(self.tr("Z Drag depth"))
self.ui.label_15.setText(self.tr("Not Available"))
self.ui.label_16.setText(self.tr("Not Available"))
if g.config.machine_type == 'laser_cutter':
self.ui.actionAutomaticCutterCompensation.setEnabled(True)
self.ui.actionMilling.setChecked(False)
self.ui.actionDragKnife.setChecked(False)
self.ui.actionLathe.setChecked(False)
self.ui.actionLaser_Cutter.setChecked(True)
self.ui.label_15.setText(self.tr("Laser Power"))
self.ui.label_16.setText(self.tr("Laser Pulses Per mm"))
self.ui.label_10.setText(self.tr("Not Available"))
self.ui.label_9.setText(self.tr("Not Available"))
self.ui.label_8.setText(self.tr("Not Available"))
self.ui.label_6.setText(self.tr("Not Available"))
self.ui.label_5.setText(self.tr("Not Available"))
self.ui.label_14.setText(self.tr("Not Available"))
def open(self):
"""
This function is called by the menu "File/Load File" of the main toolbar.
It creates the file selection dialog and calls the load function to
load the selected file.
"""
self.OpenFileDialog(self.tr("Open file"))
# If there is something to load then call the load function callback
if self.filename:
self.cont_dx = 0.0
self.cont_dy = 0.0
self.cont_rotate = 0.0
self.cont_scale = 1.0
self.load()
def OpenFileDialog(self, title):
self.filename, _ = getOpenFileName(self,
title,
g.config.vars.Paths['import_dir'],
self.tr("All supported files (*.dxf *.ps *.pdf *%s);;"
"DXF files (*.dxf);;"
"PS files (*.ps);;"
"PDF files (*.pdf);;"
"Project files (*%s);;"
"All types (*.*)") % (c.PROJECT_EXTENSION, c.PROJECT_EXTENSION))
# If there is something to load then call the load function callback
if self.filename:
self.filename = file_str(self.filename)
logger.info(self.tr("File: %s selected") % self.filename)
def load(self, plot=True):
"""
Loads the file given by self.filename. Also calls the command to
make the plot.
@param plot: if it should plot
"""
if not QtCore.QFile.exists(self.filename):
logger.info(self.tr("Cannot locate file: %s") % self.filename)
self.OpenFileDialog(self.tr("Manually open file: %s") % self.filename)
if not self.filename:
return False # cancelled
self.setCursor(QtCore.Qt.WaitCursor)
self.setWindowTitle("DXF2GCODE - [%s]" % self.filename)
self.canvas.resetAll()
self.app.processEvents()
(name, ext) = os.path.splitext(self.filename)
if ext.lower() == c.PROJECT_EXTENSION:
self.loadProject(self.filename)
return True # kill this load operation - we opened a new one
if ext.lower() == ".ps" or ext.lower() == ".pdf":
logger.info(self.tr("Sending Postscript/PDF to pstoedit"))
# Create temporary file which will be read by the program
self.filename = os.path.join(tempfile.gettempdir(), 'dxf2gcode_temp.dxf')
pstoedit_cmd = g.config.vars.Filters['pstoedit_cmd']
pstoedit_opt = g.config.vars.Filters['pstoedit_opt']
ps_filename = os.path.normcase(self.filename)
cmd = [('%s' % pstoedit_cmd)] + pstoedit_opt + [('%s' % ps_filename), ('%s' % self.filename)]
logger.debug(cmd)
try:
subprocess.call(cmd)
except FileNotFoundError as e:
logger.error(e.strerror)
self.unsetCursor()
QMessageBox.critical(self,
"ERROR",
self.tr("Please make sure you have installed pstoedit, and configured it in the config file."))
return True
subprocess.check_output() # If the return code was non-zero it raises a subprocess.CalledProcessError.
logger.info(self.tr('Loading file: %s') % self.filename)
self.valuesDXF = ReadDXF(self.filename)
# Output the information in the text window
logger.info(self.tr('Loaded layers: %s') % len(self.valuesDXF.layers))
logger.info(self.tr('Loaded blocks: %s') % len(self.valuesDXF.blocks.Entities))
for i in range(len(self.valuesDXF.blocks.Entities)):
layers = self.valuesDXF.blocks.Entities[i].get_used_layers()
logger.info(self.tr('Block %i includes %i Geometries, reduced to %i Contours, used layers: %s')
% (i, len(self.valuesDXF.blocks.Entities[i].geo), len(self.valuesDXF.blocks.Entities[i].cont), layers))
layers = self.valuesDXF.entities.get_used_layers()
insert_nr = self.valuesDXF.entities.get_insert_nr()
logger.info(self.tr('Loaded %i entity geometries; reduced to %i contours; used layers: %s; number of inserts %i')
% (len(self.valuesDXF.entities.geo), len(self.valuesDXF.entities.cont), layers, insert_nr))
if g.config.metric == 0:
logger.info(self.tr("Drawing units: inches"))
distance = self.tr("[in]")
speed = self.tr("[IPM]")
else:
logger.info(self.tr("Drawing units: millimeters"))
distance = self.tr("[mm]")
speed = self.tr("[mm/min]")
self.ui.unitLabel_3.setText(distance)
self.ui.unitLabel_4.setText(distance)
self.ui.unitLabel_5.setText(distance)
self.ui.unitLabel_6.setText(distance)
self.ui.unitLabel_7.setText(distance)
self.ui.unitLabel_8.setText(speed)
self.ui.unitLabel_9.setText(speed)
self.makeShapes()
if plot:
self.plot()
return True
def plot(self):
# Populate the treeViews
self.TreeHandler.buildEntitiesTree(self.entityRoot)
self.TreeHandler.buildLayerTree(self.layerContents)
# Paint the canvas
if not g.config.mode3d:
self.canvas_scene = MyGraphicsScene()
self.canvas.setScene(self.canvas_scene)
self.canvas_scene.plotAll(self.shapes)
self.setShowPathDirections()
self.setShowDisabledPaths()
self.liveUpdateExportRoute()
if not g.config.mode3d:
self.canvas.show()
self.canvas.setFocus()
self.canvas.autoscale()
# After all is plotted enable the Menu entities
self.enableToolbarButtons()
self.automaticCutterCompensation()
self.unsetCursor()
def reload(self):
"""
This function is called by the menu "File/Reload File" of the main toolbar.
It reloads the previously loaded file (if any)
"""
if self.filename:
logger.info(self.tr("Reloading file: %s") % self.filename)
self.load()
def makeShapes(self):
self.entityRoot = EntityContent(nr=0, name='Entities', parent=None,
p0=Point(self.cont_dx, self.cont_dy), pb=Point(),
sca=[self.cont_scale, self.cont_scale, self.cont_scale], rot=self.cont_rotate)
self.layerContents = Layers([])
self.shapes = Shapes([])
self.makeEntityShapes(self.entityRoot)
for layerContent in self.layerContents:
layerContent.overrideDefaults()
self.layerContents.sort(key=lambda x: x.nr)
self.newNumber = len(self.shapes)
def makeEntityShapes(self, parent, layerNr=-1):
"""
Instance is called prior to plotting the shapes. It creates
all shape classes which are plotted into the canvas.
@param parent: The parent of a shape is always an Entity. It may be the root
or, if it is a Block, this is the Block.
"""
if parent.name == "Entities":
entities = self.valuesDXF.entities
else:
ent_nr = self.valuesDXF.Get_Block_Nr(parent.name)
entities = self.valuesDXF.blocks.Entities[ent_nr]
# Assigning the geometries in the variables geos & contours in cont
ent_geos = entities.geo
# Loop for the number of contours
for cont in entities.cont:
# Query if it is in the contour of an insert or of a block
if ent_geos[cont.order[0][0]].Typ == "Insert":
ent_geo = ent_geos[cont.order[0][0]]
# Assign the base point for the block
new_ent_nr = self.valuesDXF.Get_Block_Nr(ent_geo.BlockName)
new_entities = self.valuesDXF.blocks.Entities[new_ent_nr]
pb = new_entities.basep
# Scaling, etc. assign the block
p0 = ent_geos[cont.order[0][0]].Point
sca = ent_geos[cont.order[0][0]].Scale
rot = ent_geos[cont.order[0][0]].rot
# Creating the new Entitie Contents for the insert
newEntityContent = EntityContent(nr=0,
name=ent_geo.BlockName,
parent=parent,
p0=p0,
pb=pb,
sca=sca,
rot=rot)
parent.append(newEntityContent)
self.makeEntityShapes(newEntityContent, ent_geo.Layer_Nr)
else:
# Loop for the number of geometries
tmp_shape = Shape(len(self.shapes),
cont.closed,
parent)
for ent_geo_nr in range(len(cont.order)):
ent_geo = ent_geos[cont.order[ent_geo_nr][0]]
if cont.order[ent_geo_nr][1]:
ent_geo.geo.reverse()
for geo in ent_geo.geo:
geo = copy(geo)
geo.reverse()
self.append_geo_to_shape(tmp_shape, geo)
ent_geo.geo.reverse()
else:
for geo in ent_geo.geo:
self.append_geo_to_shape(tmp_shape, copy(geo))
if len(tmp_shape.geos) > 0:
# All shapes have to be CW direction.
tmp_shape.AnalyseAndOptimize()
self.shapes.append(tmp_shape)
if g.config.vars.Import_Parameters['insert_at_block_layer'] and layerNr != -1:
self.addtoLayerContents(tmp_shape, layerNr)
else:
self.addtoLayerContents(tmp_shape, ent_geo.Layer_Nr)
parent.append(tmp_shape)
if not g.config.mode3d:
# Connect the shapeSelectionChanged and enableDisableShape signals to our treeView,
# so that selections of the shapes are reflected on the treeView
tmp_shape.setSelectionChangedCallback(self.TreeHandler.updateShapeSelection)
tmp_shape.setEnableDisableCallback(self.TreeHandler.updateShapeEnabling)
def append_geo_to_shape(self, shape, geo):
if -1e-5 <= geo.length < 1e-5: # TODO adjust import for this
return
if self.ui.actionSplitLineSegments.isChecked():
if isinstance(geo, LineGeo):
diff = (geo.Pe - geo.Ps) / 2.0
geo_b = deepcopy(geo)
geo_a = deepcopy(geo)
geo_b.Pe -= diff
geo_a.Ps += diff
shape.append(geo_b)
shape.append(geo_a)
else:
shape.append(geo)
else:
shape.append(geo)
if isinstance(geo, HoleGeo):
shape.type = 'Hole'
shape.closed = 1 # TODO adjust import for holes?
if g.config.machine_type == 'drag_knife':
shape.disabled = True
shape.allowedToChange = False
def addtoLayerContents(self, shape, lay_nr):
# Check if the layer already exists and add shape if it is.
for LayCon in self.layerContents:
if LayCon.nr == lay_nr:
LayCon.shapes.append(shape)
shape.parentLayer = LayCon
return
# If the Layer does not exist create a new one.
LayerName = self.valuesDXF.layers[lay_nr].name
self.layerContents.append(LayerContent(lay_nr, LayerName, [shape]))
shape.parentLayer = self.layerContents[-1]
def loadProject(self, filename):
"""
Load all variables from file
"""
# since Py3 has no longer execfile - we need to open it manually
file_ = open(filename, 'r')
str_ = file_.read()
file_.close()
self.d2g.load(str_)
def saveProject(self):
"""
Save all variables to file
"""
prj_filename = self.showSaveDialog(self.tr('Save project to file'), "Project files (*%s)" % c.PROJECT_EXTENSION)
save_prj_filename = file_str(prj_filename[0])
# If Cancel was pressed
if not save_prj_filename:
return
(beg, ende) = os.path.split(save_prj_filename)
(fileBaseName, fileExtension) = os.path.splitext(ende)
if fileExtension != c.PROJECT_EXTENSION:
if not QtCore.QFile.exists(save_prj_filename):
save_prj_filename += c.PROJECT_EXTENSION
pyCode = self.d2g.export()
try:
# File open and write
f = open(save_prj_filename, "w")
f.write(str_encode(pyCode))
f.close()
logger.info(self.tr("Save project to FILE was successful"))
except IOError:
QMessageBox.warning(g.window,
self.tr("Warning during Save Project As"),
self.tr("Cannot Save the File"))
def closeEvent(self, e):
logger.debug(self.tr("Closing"))
# self.writeSettings()
e.accept()
def readSettings(self):
settings = QtCore.QSettings("dxf2gcode", "dxf2gcode")
settings.beginGroup("MainWindow")
self.resize(settings.value("size", QtCore.QSize(800, 600)).toSize())
self.move(settings.value("pos", QtCore.QPoint(200, 200)).toPoint())
settings.endGroup()
def writeSettings(self):
settings = QtCore.QSettings("dxf2gcode", "dxf2gcode")
settings.beginGroup("MainWindow")
settings.setValue("size", self.size())
settings.setValue("pos", self.pos())
settings.endGroup()
if __name__ == "__main__":
"""
The main function which is executed after program start.
"""
Log = LoggerClass(logger)
g.config = MyConfig()
Log.set_console_handler_loglevel()
Log.add_file_logger()
app = QApplication(sys.argv)
# Get local language and install if available.
locale = QtCore.QLocale.system().name()
logger.debug("locale: %s" %locale)
translator = QtCore.QTranslator()
if translator.load("dxf2gcode_" + locale, "./i18n"):
app.installTranslator(translator)
# Delay imports - needs to be done after logger and config initialization; and before the main window
if c.PYQT5notPYQT4:
from dxf2gcode_ui5 import Ui_MainWindow
else:
from dxf2gcode_ui4 import Ui_MainWindow
if g.config.mode3d:
from core.shape import Shape
# multi-sampling has been introduced in PyQt5
fmt = QSurfaceFormat()
fmt.setSamples(4)
QSurfaceFormat.setDefaultFormat(fmt)
else:
from gui.canvas2d import MyGraphicsScene
from gui.canvas2d import ShapeGUI as Shape
window = MainWindow(app)
g.window = window
Log.add_window_logger(window.ui.messageBox)
# command line options
parser = argparse.ArgumentParser()
parser.add_argument("filename", nargs="?")
# parser.add_argument("-f", "--file", dest = "filename",
# help = "read data from FILENAME")
parser.add_argument("-e", "--export", dest="export_filename",
help="export data to FILENAME")
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet", help="no GUI")
# parser.add_option("-v", "--verbose",
# action = "store_true", dest = "verbose")
options = parser.parse_args()
# (options, args) = parser.parse_args()
logger.debug("Started with following options:\n%s" % parser)
if not options.quiet:
window.show()
if options.filename is not None:
window.filename = str_decode(options.filename)
window.load()
if options.export_filename is not None:
window.exportShapes(None, options.export_filename)
if not options.quiet:
# It's exec_ because exec is a reserved word in Python
sys.exit(app.exec_())
|
Poofjunior/dxf2gcode
|
dxf2gcode.py
|
Python
|
gpl-3.0
| 46,284
|
[
"VisIt"
] |
b6387b1a9da68a0dde60bf0f243702ba2352986c46086f99d3653c3468945bd0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import time
from trac.admin.api import console_date_format
from trac.core import TracError, Component, implements
from trac.util import hex_entropy
from trac.util.text import print_table
from trac.util.translation import _
from trac.util.datefmt import format_date, parse_date, to_datetime, \
to_timestamp
from trac.admin.api import IAdminCommandProvider, AdminCommandError
UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day
PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle
COOKIE_KEY = 'trac_session'
# Note: as we often manipulate both the `session` and the
# `session_attribute` tables, there's a possibility of table
# deadlocks (#9705). We try to prevent them to happen by always
# accessing the tables in the same order within the transaction,
# first `session`, then `session_attribute`.
class DetachedSession(dict):
def __init__(self, env, sid):
dict.__init__(self)
self.env = env
self.sid = None
if sid:
self.get_session(sid, authenticated=True)
else:
self.authenticated = False
self.last_visit = 0
self._new = True
self._old = {}
def __setitem__(self, key, value):
dict.__setitem__(self, key, unicode(value))
def get_session(self, sid, authenticated=False):
self.env.log.debug("Retrieving session for ID %r", sid)
with self.env.db_query as db:
self.sid = sid
self.authenticated = authenticated
self.clear()
for last_visit, in db("""
SELECT last_visit FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, int(authenticated))):
self._new = False
self.last_visit = int(last_visit or 0)
self.update(db("""
SELECT name, value FROM session_attribute
WHERE sid=%s and authenticated=%s
""", (sid, int(authenticated))))
self._old = self.copy()
break
else:
self.last_visit = 0
self._new = True
self._old = {}
def save(self):
items = self.items()
if not self._old and not items:
# The session doesn't have associated data, so there's no need to
# persist it
return
authenticated = int(self.authenticated)
now = int(time.time())
# We can't do the session management in one big transaction,
# as the intertwined changes to both the session and
# session_attribute tables are prone to deadlocks (#9705).
# Therefore we first we save the current session, then we
# eventually purge the tables.
session_saved = False
with self.env.db_transaction as db:
# Try to save the session if it's a new one. A failure to
# do so is not critical but we nevertheless skip the
# following steps.
if self._new:
self.last_visit = now
self._new = False
# The session might already exist even if _new is True since
# it could have been created by a concurrent request (#3563).
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s,%s,%s)
""", (self.sid, self.last_visit, authenticated))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Session %s already exists', self.sid)
db.rollback()
return
# Remove former values for session_attribute and save the
# new ones. The last concurrent request to do so "wins".
if self._old != self:
if not items and not authenticated:
# No need to keep around empty unauthenticated sessions
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(self.sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (self.sid, authenticated))
self._old = dict(self.items())
# The session variables might already have been updated by a
# concurrent request.
try:
db.executemany("""
INSERT INTO session_attribute
(sid,authenticated,name,value)
VALUES (%s,%s,%s,%s)
""", [(self.sid, authenticated, k, v)
for k, v in items])
except self.env.db_exc.IntegrityError:
self.env.log.warning('Attributes for session %s already '
'updated', self.sid)
db.rollback()
return
session_saved = True
# Purge expired sessions. We do this only when the session was
# changed as to minimize the purging.
if session_saved and now - self.last_visit > UPDATE_INTERVAL:
self.last_visit = now
mintime = now - PURGE_AGE
with self.env.db_transaction as db:
# Update the session last visit time if it is over an
# hour old, so that session doesn't get purged
self.env.log.info("Refreshing session %s", self.sid)
db("""UPDATE session SET last_visit=%s
WHERE sid=%s AND authenticated=%s
""", (self.last_visit, self.sid, authenticated))
self.env.log.debug('Purging old, expired, sessions.')
db("""DELETE FROM session_attribute
WHERE authenticated=0 AND sid IN (
SELECT sid FROM session
WHERE authenticated=0 AND last_visit < %s
)
""", (mintime,))
# Avoid holding locks on lot of rows on both session_attribute
# and session tables
with self.env.db_transaction as db:
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit < %s
""", (mintime,))
class Session(DetachedSession):
"""Basic session handling and per-session storage."""
def __init__(self, env, req):
super(Session, self).__init__(env, None)
self.req = req
if req.authname == 'anonymous':
if not req.incookie.has_key(COOKIE_KEY):
self.sid = hex_entropy(24)
self.bake_cookie()
else:
sid = req.incookie[COOKIE_KEY].value
self.get_session(sid)
else:
if req.incookie.has_key(COOKIE_KEY):
sid = req.incookie[COOKIE_KEY].value
self.promote_session(sid)
self.get_session(req.authname, authenticated=True)
def bake_cookie(self, expires=PURGE_AGE):
assert self.sid, 'Session ID not set'
self.req.outcookie[COOKIE_KEY] = self.sid
self.req.outcookie[COOKIE_KEY]['path'] = self.req.base_path or '/'
self.req.outcookie[COOKIE_KEY]['expires'] = expires
if self.env.secure_cookies:
self.req.outcookie[COOKIE_KEY]['secure'] = True
def get_session(self, sid, authenticated=False):
refresh_cookie = False
if self.sid and sid != self.sid:
refresh_cookie = True
super(Session, self).get_session(sid, authenticated)
if self.last_visit and time.time() - self.last_visit > UPDATE_INTERVAL:
refresh_cookie = True
# Refresh the session cookie if this is the first visit after a day
if not authenticated and refresh_cookie:
self.bake_cookie()
def change_sid(self, new_sid):
assert self.req.authname == 'anonymous', \
'Cannot change ID of authenticated session'
assert new_sid, 'Session ID cannot be empty'
if new_sid == self.sid:
return
with self.env.db_transaction as db:
if db("SELECT sid FROM session WHERE sid=%s", (new_sid,)):
raise TracError(_("Session '%(id)s' already exists. "
"Please choose a different session ID.",
id=new_sid),
_("Error renaming session"))
self.env.log.debug("Changing session ID %s to %s", self.sid,
new_sid)
db("UPDATE session SET sid=%s WHERE sid=%s AND authenticated=0",
(new_sid, self.sid))
db("""UPDATE session_attribute SET sid=%s
WHERE sid=%s and authenticated=0
""", (new_sid, self.sid))
self.sid = new_sid
self.bake_cookie()
def promote_session(self, sid):
"""Promotes an anonymous session to an authenticated session, if there
is no preexisting session data for that user name.
"""
assert self.req.authname != 'anonymous', \
"Cannot promote session of anonymous user"
with self.env.db_transaction as db:
authenticated_flags = [authenticated for authenticated, in db(
"SELECT authenticated FROM session WHERE sid=%s OR sid=%s",
(sid, self.req.authname))]
if len(authenticated_flags) == 2:
# There's already an authenticated session for the user,
# we simply delete the anonymous session
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=0
""", (sid,))
elif len(authenticated_flags) == 1:
if not authenticated_flags[0]:
# Update the anomymous session records so the session ID
# becomes the user name, and set the authenticated flag.
self.env.log.debug("Promoting anonymous session %s to "
"authenticated session for user %s",
sid, self.req.authname)
db("""UPDATE session SET sid=%s, authenticated=1
WHERE sid=%s AND authenticated=0
""", (self.req.authname, sid))
db("""UPDATE session_attribute SET sid=%s, authenticated=1
WHERE sid=%s
""", (self.req.authname, sid))
else:
# We didn't have an anonymous session for this sid. The
# authenticated session might have been inserted between the
# SELECT above and here, so we catch the error.
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s, %s, 1)
""", (self.req.authname, int(time.time())))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Authenticated session for %s '
'already exists', self.req.authname)
db.rollback()
self._new = False
self.sid = sid
self.bake_cookie(0) # expire the cookie
class SessionAdmin(Component):
"""trac-admin command provider for session management"""
implements(IAdminCommandProvider)
def get_admin_commands(self):
yield ('session list', '[sid[:0|1]] [...]',
"""List the name and email for the given sids
Specifying the sid 'anonymous' lists all unauthenticated
sessions, and 'authenticated' all authenticated sessions.
'*' lists all sessions, and is the default if no sids are
given.
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default).""",
self._complete_list, self._do_list)
yield ('session add', '<sid[:0|1]> [name] [email]',
"""Create a session for the given sid
Populates the name and email attributes for the given session.
Adding a suffix ':0' to the sid makes the session
unauthenticated, and a suffix ':1' makes it authenticated (the
default if no suffix is specified).""",
None, self._do_add)
yield ('session set', '<name|email> <sid[:0|1]> <value>',
"""Set the name or email attribute of the given sid
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default).""",
self._complete_set, self._do_set)
yield ('session delete', '<sid[:0|1]> [...]',
"""Delete the session of the specified sid
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default). Specifying the sid 'anonymous' will delete all
anonymous sessions.""",
self._complete_delete, self._do_delete)
yield ('session purge', '<age>',
"""Purge all anonymous sessions older than the given age
Age may be specified as a relative time like "90 days ago", or
in YYYYMMDD format.""",
None, self._do_purge)
def _split_sid(self, sid):
if sid.endswith(':0'):
return (sid[:-2], 0)
elif sid.endswith(':1'):
return (sid[:-2], 1)
else:
return (sid, 1)
def _get_sids(self):
rows = self.env.db_query("SELECT sid, authenticated FROM session")
return ['%s:%d' % (sid, auth) for sid, auth in rows]
def _get_list(self, sids):
all_anon = 'anonymous' in sids or '*' in sids
all_auth = 'authenticated' in sids or '*' in sids
sids = set(self._split_sid(sid) for sid in sids
if sid not in ('anonymous', 'authenticated', '*'))
rows = self.env.db_query("""
SELECT DISTINCT s.sid, s.authenticated, s.last_visit,
n.value, e.value
FROM session AS s
LEFT JOIN session_attribute AS n
ON (n.sid=s.sid AND n.authenticated=s.authenticated
AND n.name='name')
LEFT JOIN session_attribute AS e
ON (e.sid=s.sid AND e.authenticated=s.authenticated
AND e.name='email')
ORDER BY s.sid, s.authenticated
""")
for sid, authenticated, last_visit, name, email in rows:
if all_anon and not authenticated or all_auth and authenticated \
or (sid, authenticated) in sids:
yield (sid, authenticated, last_visit, name, email)
def _complete_list(self, args):
all_sids = self._get_sids() + ['*', 'anonymous', 'authenticated']
return set(all_sids) - set(args)
def _complete_set(self, args):
if len(args) == 1:
return ['name', 'email']
elif len(args) == 2:
return self._get_sids()
def _complete_delete(self, args):
all_sids = self._get_sids() + ['anonymous']
return set(all_sids) - set(args)
def _do_list(self, *sids):
if not sids:
sids = ['*']
print_table([(r[0], r[1], format_date(to_datetime(r[2]),
console_date_format),
r[3], r[4])
for r in self._get_list(sids)],
[_('SID'), _('Auth'), _('Last Visit'), _('Name'),
_('Email')])
def _do_add(self, sid, name=None, email=None):
sid, authenticated = self._split_sid(sid)
with self.env.db_transaction as db:
try:
db("INSERT INTO session VALUES (%s, %s, %s)",
(sid, authenticated, int(time.time())))
except Exception:
raise AdminCommandError(_("Session '%(sid)s' already exists",
sid=sid))
if name is not None:
db("INSERT INTO session_attribute VALUES (%s,%s,'name',%s)",
(sid, authenticated, name))
if email is not None:
db("INSERT INTO session_attribute VALUES (%s,%s,'email',%s)",
(sid, authenticated, email))
def _do_set(self, attr, sid, val):
if attr not in ('name', 'email'):
raise AdminCommandError(_("Invalid attribute '%(attr)s'",
attr=attr))
sid, authenticated = self._split_sid(sid)
with self.env.db_transaction as db:
if not db("""SELECT sid FROM session
WHERE sid=%s AND authenticated=%s""",
(sid, authenticated)):
raise AdminCommandError(_("Session '%(sid)s' not found",
sid=sid))
db("""
DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s AND name=%s
""", (sid, authenticated, attr))
db("INSERT INTO session_attribute VALUES (%s, %s, %s, %s)",
(sid, authenticated, attr, val))
def _do_delete(self, *sids):
with self.env.db_transaction as db:
for sid in sids:
sid, authenticated = self._split_sid(sid)
if sid == 'anonymous':
db("DELETE FROM session WHERE authenticated=0")
db("DELETE FROM session_attribute WHERE authenticated=0")
else:
db("""
DELETE FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, authenticated))
db("""
DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (sid, authenticated))
def _do_purge(self, age):
when = parse_date(age)
with self.env.db_transaction as db:
ts = to_timestamp(when)
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit<%s
""", (ts,))
db("""
DELETE FROM session_attribute
WHERE authenticated=0
AND sid NOT IN (SELECT sid FROM session
WHERE authenticated=0)
""")
|
moreati/trac-gitsvn
|
trac/web/session.py
|
Python
|
bsd-3-clause
| 20,065
|
[
"VisIt"
] |
f8e5129799a4d6cae087d78b4bc494cea5e0df13097b0a6812fcf43c2acf3403
|
from models import Base, CityModel, StateModel, UserModel, VisitModel
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('mysql://:@localhost/test', echo=False)
# allow our db models (Base) to be accessible via the session
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
class CityService(object):
''' This is a CRUD service class for the City model.'''
def __init__(self):
self.session = DBSession()
def read_all(self):
try:
data = [city.to_json() for city in self.session.query(CityModel).all()]
return (True, data)
except Exception as x:
return (False, x)
def read(self, id=None, name=None):
try:
city = self.session.query(CityModel).filter(
(CityModel.id == id) |
(CityModel.name == name)
).first()
return (True, city)
except Exception as x:
return (False, x)
# def create(self, name, state_id, status, latitude, longitude):
# try:
# # insert new object
# city = CityModel(name=name, state_id=state_id, status=status,
# latitude=latitude, longitude=longitude)
# self.session.add(city)
# self.session.commit()
# # refresh new object with DB state
# self.session.refresh(city)
# return (True, city)
# except Exception as x:
# return (False, x)
# def update(self, id, name, state_id, status, latitude, longitude):
# try:
# city = self.session.query(CityModel).filter(CityModel.id == id).one()
# if name is not None:
# city.name = name
# if state_id is not None:
# city.state_id = state_id
# if status is not None:
# city.status = status
# if latitude is not None:
# city.latitude = latitude
# if longitude is not None:
# city.longitude = longitude
# self.session.add(city)
# self.session.commit()
# # refresh new object with DB state
# self.session.refresh(city)
# return (True, city)
# except Exception as x:
# return (False, x)
# def delete(self, id):
# try:
# city = self.session.query(CityModel).filter(CityModel.id == id).one()
# self.session.delete(city)
# self.session.commit()
# return (True, city)
# except Exception as x:
# return (False, x)
class StateService(object):
''' This is a CRUD service class for the State model.'''
def __init__(self):
self.session = DBSession()
def read_all(self):
try:
data = [state.to_json() for state in self.session.query(StateModel).all()]
return (True, data)
except Exception as x:
return (False, x)
def read(self, id, name):
try:
state = self.session.query(StateModel).filter(
(StateModel.id == id) |
(StateModel.name == name) |
(StateModel.abbreviation == name)
).one()
return (True, state)
except Exception as x:
return (False, x)
# def create(self, name, abbreviation):
# try:
# # insert new object
# state = StateModel(name=name, abbreviation=abbreviation)
# self.session.add(state)
# self.session.commit()
# # refresh new object with DB state
# self.session.refresh(state)
# return (True, state)
# except Exception as x:
# return (False, x)
# def update(self, id, name, abbreviation):
# try:
# state = self.session.query(StateModel).filter(StateModel.id == id).one()
# if name is not None:
# state.name = name
# if abbreviation is not None:
# state.abbreviation = abbreviation
# self.session.add(state)
# self.session.commit()
# # refresh new object with DB state
# self.session.refresh(state)
# return (True, state)
# except Exception as x:
# return (False, x)
# def delete(self, id):
# try:
# state = self.session.query(StateModel).filter(StateModel.id == id).one()
# self.session.delete(state)
# self.session.commit()
# return (True, state)
# except Exception as x:
# return (False, x)
class StateCityService(object):
def __init__(self):
self.session = DBSession()
def read(self, state):
try:
state = self.session.query(StateModel).filter(
(StateModel.name == state) |
(StateModel.abbreviation == state)).one()
cities = self.session.query(CityModel).filter(CityModel.state_id == state.id).all()
data = [city.to_json() for city in cities]
return (True, data)
except Exception as x:
return (False, x)
class UserService(object):
''' This is a CRUD service class for the User model.'''
def __init__(self):
self.session = DBSession()
def read_all(self):
try:
data = [user.to_json() for user in self.session.query(UserModel).all()]
return (True, data)
except Exception as x:
return (False, x)
def read(self, id):
try:
user = self.session.query(UserModel).filter(UserModel.id == id).one()
return (True, user)
except Exception as x:
return (False, x)
def create(self, first_name, last_name):
try:
# insert new user object
user = UserModel(first_name=first_name, last_name=last_name)
self.session.add(user)
self.session.commit()
# refresh new user object with DB state
self.session.refresh(user)
return (True, user)
except Exception as x:
return (False, x)
# def update(self, id, first_name, last_name):
# try:
# user = self.session.query(UserModel).filter(UserModel.id == id).one()
# if first_name is not None:
# user.first_name = first_name
# if last_name is not None:
# user.last_name = last_name
# self.session.add(user)
# self.session.commit()
# # refresh new user object with DB state
# self.session.refresh(user)
# return (True, user)
# except Exception as x:
# return (False, x)
# def delete(self, id):
# try:
# user = self.session.query(UserModel).filter(UserModel.id == id).one()
# self.session.delete(user)
# self.session.commit()
# return (True, user)
# except Exception as x:
# return (False, x)
class VisitService(object):
def __init__(self):
self.session = DBSession()
def read_all(self):
try:
visits = self.session.query(VisitModel).all()
data = [visit.to_json() for visit in visits]
return (True, data)
except Exception as x:
return (False, x)
def read(self, user_id):
try:
visits = self.session.query(VisitModel).filter(
(VisitModel.user_id == user_id)
).all()
data = [visit.to_json() for visit in visits]
return (True, data)
except Exception as x:
return (False, x)
def read_cities_visited(self, user_id):
try:
cities = (self.session.query(CityModel)
.join(VisitModel)
.filter(VisitModel.user_id == user_id)
.order_by(CityModel.name)).all()
data = [city.to_json() for city in cities]
return (True, data)
except Exception as x:
return (False, x)
def read_states_visited(self, user_id):
try:
states = (self.session.query(StateModel)
.join(VisitModel)
.filter(VisitModel.user_id == user_id)
.order_by(StateModel.name)).all()
data = [state.to_json() for state in states]
return (True, data)
except Exception as x:
return (False, x)
def create(self, user_id, state_id, city_id):
try:
visit = VisitModel(user_id=user_id, state_id=state_id, city_id=city_id)
self.session.add(visit)
self.session.commit()
self.session.refresh(visit)
return (True, visit)
except Exception as x:
return (False, x)
def delete(self, id):
try:
visit = self.session.query(VisitModel).filter(VisitModel.id == id).one()
self.session.delete(visit)
self.session.commit()
return (True, visit)
except Exception as x:
return (False, x)
|
boxofgoobers/rv-rest-api
|
services.py
|
Python
|
mit
| 7,553
|
[
"VisIt"
] |
ea280bbc295108525903aab3d8bfc95b02b306ab06426b64faa9b39eae2ed752
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @author:Hieda no Chiaki <i@wind.moe>
import logging
from src.md5 import md5
try:
import configparser
except ImportError:
import ConfigParser
logging.basicConfig(level=logging.INFO)
def uid():
try:
config = configparser.ConfigParser()
except:
config = ConfigParser.ConfigParser()
try:
config.read_file(open('./.env'))
except:
config.readfp(open('./.env'))
finally:
uid = config.get("Config", "UID")
key = config.get("Config", "KEY")
api = config.get("Config", "API")
return uid, key, api
def sign(_uid, _md5, _key, _title, _artist, _album):
str = _uid + _md5 + _title + _artist + _album + _key
sign_string = md5(str.encode('utf-8'), "str")
return sign_string
if __name__ == "__main__":
pass
|
forblackking/PyBiu
|
src/sign.py
|
Python
|
apache-2.0
| 854
|
[
"MOE"
] |
28d33ba356faf44a9ad93fd4b1fd0bbdd995ab552395e0abdd62796516eb2913
|
# remotefilelogserver.py - server logic for a remotefilelog server
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import stat
import time
import zlib
from mercurial.i18n import _
from mercurial.node import bin, hex
from mercurial.pycompat import open
from mercurial import (
changegroup,
changelog,
context,
error,
extensions,
match,
pycompat,
scmutil,
store,
streamclone,
util,
wireprotoserver,
wireprototypes,
wireprotov1server,
)
from . import (
constants,
shallowutil,
)
_sshv1server = wireprotoserver.sshv1protocolhandler
def setupserver(ui, repo):
"""Sets up a normal Mercurial repo so it can serve files to shallow repos."""
onetimesetup(ui)
# don't send files to shallow clients during pulls
def generatefiles(
orig, self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
):
caps = self._bundlecaps or []
if constants.BUNDLE2_CAPABLITY in caps:
# only send files that don't match the specified patterns
includepattern = None
excludepattern = None
for cap in self._bundlecaps or []:
if cap.startswith(b"includepattern="):
includepattern = cap[len(b"includepattern=") :].split(b'\0')
elif cap.startswith(b"excludepattern="):
excludepattern = cap[len(b"excludepattern=") :].split(b'\0')
m = match.always()
if includepattern or excludepattern:
m = match.match(
repo.root, b'', None, includepattern, excludepattern
)
changedfiles = list([f for f in changedfiles if not m(f)])
return orig(
self, changedfiles, linknodes, commonrevs, source, *args, **kwargs
)
extensions.wrapfunction(
changegroup.cgpacker, b'generatefiles', generatefiles
)
onetime = False
def onetimesetup(ui):
"""Configures the wireprotocol for both clients and servers."""
global onetime
if onetime:
return
onetime = True
# support file content requests
wireprotov1server.wireprotocommand(
b'x_rfl_getflogheads', b'path', permission=b'pull'
)(getflogheads)
wireprotov1server.wireprotocommand(
b'x_rfl_getfiles', b'', permission=b'pull'
)(getfiles)
wireprotov1server.wireprotocommand(
b'x_rfl_getfile', b'file node', permission=b'pull'
)(getfile)
class streamstate(object):
match = None
shallowremote = False
noflatmf = False
state = streamstate()
def stream_out_shallow(repo, proto, other):
includepattern = None
excludepattern = None
raw = other.get(b'includepattern')
if raw:
includepattern = raw.split(b'\0')
raw = other.get(b'excludepattern')
if raw:
excludepattern = raw.split(b'\0')
oldshallow = state.shallowremote
oldmatch = state.match
oldnoflatmf = state.noflatmf
try:
state.shallowremote = True
state.match = match.always()
state.noflatmf = other.get(b'noflatmanifest') == b'True'
if includepattern or excludepattern:
state.match = match.match(
repo.root, b'', None, includepattern, excludepattern
)
streamres = wireprotov1server.stream(repo, proto)
# Force the first value to execute, so the file list is computed
# within the try/finally scope
first = next(streamres.gen)
second = next(streamres.gen)
def gen():
yield first
yield second
for value in streamres.gen:
yield value
return wireprototypes.streamres(gen())
finally:
state.shallowremote = oldshallow
state.match = oldmatch
state.noflatmf = oldnoflatmf
wireprotov1server.commands[b'stream_out_shallow'] = (
stream_out_shallow,
b'*',
)
# don't clone filelogs to shallow clients
def _walkstreamfiles(orig, repo, matcher=None):
if state.shallowremote:
# if we are shallow ourselves, stream our local commits
if shallowutil.isenabled(repo):
striplen = len(repo.store.path) + 1
readdir = repo.store.rawvfs.readdir
visit = [os.path.join(repo.store.path, b'data')]
while visit:
p = visit.pop()
for f, kind, st in readdir(p, stat=True):
fp = p + b'/' + f
if kind == stat.S_IFREG:
if not fp.endswith(b'.i') and not fp.endswith(
b'.d'
):
n = util.pconvert(fp[striplen:])
d = store.decodedir(n)
t = store.FILETYPE_OTHER
yield (t, d, n, st.st_size)
if kind == stat.S_IFDIR:
visit.append(fp)
if scmutil.istreemanifest(repo):
for (t, u, e, s) in repo.store.datafiles():
if u.startswith(b'meta/') and (
u.endswith(b'.i') or u.endswith(b'.d')
):
yield (t, u, e, s)
# Return .d and .i files that do not match the shallow pattern
match = state.match
if match and not match.always():
for (t, u, e, s) in repo.store.datafiles():
f = u[5:-2] # trim data/... and .i/.d
if not state.match(f):
yield (t, u, e, s)
for x in repo.store.topfiles():
if state.noflatmf and x[1][:11] == b'00manifest.':
continue
yield x
elif shallowutil.isenabled(repo):
# don't allow cloning from a shallow repo to a full repo
# since it would require fetching every version of every
# file in order to create the revlogs.
raise error.Abort(
_(b"Cannot clone from a shallow repo to a full repo.")
)
else:
for x in orig(repo, matcher):
yield x
extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
# expose remotefilelog capabilities
def _capabilities(orig, repo, proto):
caps = orig(repo, proto)
if shallowutil.isenabled(repo) or ui.configbool(
b'remotefilelog', b'server'
):
if isinstance(proto, _sshv1server):
# legacy getfiles method which only works over ssh
caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
caps.append(b'x_rfl_getflogheads')
caps.append(b'x_rfl_getfile')
return caps
extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
def _adjustlinkrev(orig, self, *args, **kwargs):
# When generating file blobs, taking the real path is too slow on large
# repos, so force it to just return the linkrev directly.
repo = self._repo
if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
return self._filelog.linkrev(self._filelog.rev(self._filenode))
return orig(self, *args, **kwargs)
extensions.wrapfunction(
context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
)
def _iscmd(orig, cmd):
if cmd == b'x_rfl_getfiles':
return False
return orig(cmd)
extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
def _loadfileblob(repo, cachepath, path, node):
filecachepath = os.path.join(cachepath, path, hex(node))
if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
filectx = repo.filectx(path, fileid=node)
if filectx.node() == repo.nullid:
repo.changelog = changelog.changelog(repo.svfs)
filectx = repo.filectx(path, fileid=node)
text = createfileblob(filectx)
# TODO configurable compression engines
text = zlib.compress(text)
# everything should be user & group read/writable
oldumask = os.umask(0o002)
try:
dirname = os.path.dirname(filecachepath)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
f = None
try:
f = util.atomictempfile(filecachepath, b"wb")
f.write(text)
except (IOError, OSError):
# Don't abort if the user only has permission to read,
# and not write.
pass
finally:
if f:
f.close()
finally:
os.umask(oldumask)
else:
with open(filecachepath, b"rb") as f:
text = f.read()
return text
def getflogheads(repo, proto, path):
"""A server api for requesting a filelog's heads"""
flog = repo.file(path)
heads = flog.heads()
return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
def getfile(repo, proto, file, node):
"""A server api for requesting a particular version of a file. Can be used
in batches to request many files at once. The return protocol is:
<errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
non-zero for an error.
data is a compressed blob with revlog flag and ancestors information. See
createfileblob for its content.
"""
if shallowutil.isenabled(repo):
return b'1\0' + _(b'cannot fetch remote files from shallow repo')
cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
if not cachepath:
cachepath = os.path.join(repo.path, b"remotefilelogcache")
node = bin(node.strip())
if node == repo.nullid:
return b'0\0'
return b'0\0' + _loadfileblob(repo, cachepath, file, node)
def getfiles(repo, proto):
"""A server api for requesting particular versions of particular files."""
if shallowutil.isenabled(repo):
raise error.Abort(_(b'cannot fetch remote files from shallow repo'))
if not isinstance(proto, _sshv1server):
raise error.Abort(_(b'cannot fetch remote files over non-ssh protocol'))
def streamer():
fin = proto._fin
cachepath = repo.ui.config(b"remotefilelog", b"servercachepath")
if not cachepath:
cachepath = os.path.join(repo.path, b"remotefilelogcache")
while True:
request = fin.readline()[:-1]
if not request:
break
node = bin(request[:40])
if node == repo.nullid:
yield b'0\n'
continue
path = request[40:]
text = _loadfileblob(repo, cachepath, path, node)
yield b'%d\n%s' % (len(text), text)
# it would be better to only flush after processing a whole batch
# but currently we don't know if there are more requests coming
proto._fout.flush()
return wireprototypes.streamres(streamer())
def createfileblob(filectx):
"""
format:
v0:
str(len(rawtext)) + '\0' + rawtext + ancestortext
v1:
'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
metalist := metalist + '\n' + meta | meta
meta := sizemeta | flagmeta
sizemeta := METAKEYSIZE + str(len(rawtext))
flagmeta := METAKEYFLAG + str(flag)
note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
length of 1.
"""
flog = filectx.filelog()
frev = filectx.filerev()
revlogflags = flog._revlog.flags(frev)
if revlogflags == 0:
# normal files
text = filectx.data()
else:
# lfs, read raw revision data
text = flog.rawdata(frev)
repo = filectx._repo
ancestors = [filectx]
try:
repo.forcelinkrev = True
ancestors.extend([f for f in filectx.ancestors()])
ancestortext = b""
for ancestorctx in ancestors:
parents = ancestorctx.parents()
p1 = repo.nullid
p2 = repo.nullid
if len(parents) > 0:
p1 = parents[0].filenode()
if len(parents) > 1:
p2 = parents[1].filenode()
copyname = b""
rename = ancestorctx.renamed()
if rename:
copyname = rename[0]
linknode = ancestorctx.node()
ancestortext += b"%s%s%s%s%s\0" % (
ancestorctx.filenode(),
p1,
p2,
linknode,
copyname,
)
finally:
repo.forcelinkrev = False
header = shallowutil.buildfileblobheader(len(text), revlogflags)
return b"%s\0%s%s" % (header, text, ancestortext)
def gcserver(ui, repo):
if not repo.ui.configbool(b"remotefilelog", b"server"):
return
neededfiles = set()
heads = repo.revs(b"heads(tip~25000:) - null")
cachepath = repo.vfs.join(b"remotefilelogcache")
for head in heads:
mf = repo[head].manifest()
for filename, filenode in pycompat.iteritems(mf):
filecachepath = os.path.join(cachepath, filename, hex(filenode))
neededfiles.add(filecachepath)
# delete unneeded older files
days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
expiration = time.time() - (days * 24 * 60 * 60)
progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
progress.update(0)
for root, dirs, files in os.walk(cachepath):
for file in files:
filepath = os.path.join(root, file)
progress.increment()
if filepath in neededfiles:
continue
stat = os.stat(filepath)
if stat.st_mtime < expiration:
os.remove(filepath)
progress.complete()
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/hgext/remotefilelog/remotefilelogserver.py
|
Python
|
apache-2.0
| 14,542
|
[
"VisIt"
] |
b893af18cf0396d3d16e39aa1b8024307a6dd30bfbdc3c5bc668851e0285cefc
|
"""
neural_network v0.01
Feed forward neural network class library
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
#feed forward NN implementation
import math
def sigmoid(x):
return 1 / (1 + math.exp(-x))
class Neuron:
def __init__(self,id,num_inputs):
self.num_inputs = num_inputs
self.id = id + '_'+ str(self.num_inputs).zfill(3)
self.input_weights = list(range(self.num_inputs))
self.bias = 1
self.output = 0
def create_neuron(self):
return
def get_num_weights(self):
return self.num_inputs + 1
def get_weights(self):
return self.input_weights + [self.bias]
def set_weights(self,weights):
self.input_weights = weights[:-1]
self.bias = weights[-1]
return
def set_inputs(self,inputs):
#verify the inputs
#if len(inputs) != self.num_inputs:
# print "ERROR: incorrect number of required ("+str(self.num_inputs)+ ") inputs received: "+str(len(inputs))
# return
#calculate and set the output
#sum the inputs * weights
output = 0
for i in xrange(self.num_inputs):
output += inputs[i] * self.input_weights[i] #apply input weighting
output *= self.bias #apply the neuron bias
self.output = 1 / (1 + math.exp(-output)) #apply sigmoid
return self.output
def get_output(self):
return self.output
class NeuronLayer:
def __init__(self,id,num_neurons,num_inputs):
self.num_neurons = num_neurons
self.num_inputs = num_inputs
self.neurons = []
self.id = id.zfill(3)
def create_layer(self):
for i in xrange(self.num_neurons):
self.neurons.append(Neuron(self.id + '_' + str(i).zfill(3),self.num_inputs))
self.neurons[-1].create_neuron()
return
def get_neuron_ids(self):
nids = []
for n in self.neurons:
nids.append(n.id)
return nids
class NeuralNet:
def __init__(self,num_inputs,num_outputs,num_hidden_layers,num_neurons_per_hidden_layer):
#define input layer, hidden layer(s) and output layer
self.num_inputs = num_inputs #input layer neurons (each neuron takes only one input each)
self.num_outputs = num_outputs #output layer neurons, num of inputs are defined by num_neurons_per_layer
self.num_hidden_layers = num_hidden_layers
self.num_neurons_per_hidden_layer = num_neurons_per_hidden_layer
self.neuron_layers = []
self.create_network()
def create_network(self):
#build the input layer
#- number of input layer neurons equals the number of NN inputs with each neuron taking one NN input
self.neuron_layers = []
self.neuron_layers.append(NeuronLayer(str(0),self.num_inputs,1))
self.neuron_layers[-1].create_layer()
#build the hidden layers
#- number of inputs equals the number of neurons from the previous layer
for i in xrange(self.num_hidden_layers):
self.neuron_layers.append(NeuronLayer(str(i+1),self.num_neurons_per_hidden_layer,self.neuron_layers[-1].num_neurons))
self.neuron_layers[-1].create_layer()
#build the output layer
#- number of inputs equals the number of neurons from the previous layer
#- number of neurons equals the number of outputs
self.neuron_layers.append(NeuronLayer(str(self.num_hidden_layers + 1),self.num_outputs,self.neuron_layers[-1].num_neurons))
self.neuron_layers[-1].create_layer()
return
def get_neuron_ids(self):
#each ID is formated as layer_neuron_inputs: 1_2_3 is layer 1, neuron 2 which takes 3 inputs
nids = []
for nl in self.neuron_layers:
nids += nl.get_neuron_ids()
return nids
def get_weighting_params(self):
ids = self.get_neuron_ids()
params = []
for aid in ids:
layer,neuron,inp = aid.split('_')
for i in xrange(int(inp) + 1):#the plus one is for the bias weighting for the neuron itself
params.append('_'.join((layer,neuron,str(i).zfill(3))))
return params
def get_num_weights(self):
num_weights = 0
ids = self.get_neuron_ids()
for aid in ids:
layer,neuron,inp = aid.split('_')
num_weights += int(inp) + 1
return num_weights
def get_weights(self):
weights = []
for layer in self.neuron_layers:
for neuron in layer.neurons:
weights += neuron.get_weights()
return weights
def set_weights(self,weights):
if len(weights) != self.get_num_weights():
print "Error: NeuralNet:set_weights incorrect number if weights supplied"
return
for layer in self.neuron_layers:
for neuron in layer.neurons:
num = neuron.get_num_weights()
neuron.set_weights(weights[:num])
weights = weights[num:]
return
def set_inputs(self,inputs):
#if len(inputs) != self.num_inputs:
# print "Error: wrong number of inputs"
outputs = []
#set the input layer (one neuron for each input)
for i in xrange(len(inputs)):
outputs.append(self.neuron_layers[0].neurons[i].set_inputs([inputs[i]]))
inputs = outputs
#sets the inputs and returns the outputs for intermediate & final layers
for layer in self.neuron_layers[1:]:
outputs = []
for neuron in layer.neurons:
outputs.append(neuron.set_inputs(inputs))
#set the intermediate layer outputs as the next layers inputs
inputs = outputs
#return the final layer outputs
return outputs
#ga-bitbot specific NN utils
def generate_gene_def_template(nn,filename):
template = """{
"name":"REPLACE THIS WITH A NAME",
"version":"0.9",
"description":"REPLACE THIS WITH A DESCRIPTION",
"fitness_script":"%{FITNESS_NAME}",
"fitness_config":
{
"set" :
{
"input_file_name": "./datafeed/bcfeed_mtgoxUSD_1min.csv",
"nlsf": 1.0,
"stbf": 1.025,
"commision": 0.006,
"atr_depth": 60,
"max_length" : 300000,
"enable_flash_crash_protection" : 1,
"flash_crash_protection_delay" : 240
}
},
"set" :
{
"prune_threshold" : 0.30,
"max_prune_threshold" : 0.20,
"min_prune_threshold" : 0.03,
"step_prune_threshold_rate" : 0.03,
"mutate" : 0.10,
"max_mutate" : 0.20,
"min_mutate" : 0.00,
"step_mutate_rate" : 0.0001,
"multiple_parent" : 0.05,
"max_multiple_parents" : 7,
"niche_trigger" : 3,
"niche_min_iteration" : 7,
"bit_sweep_rate" : 0.40,
"bit_sweep_min_iteration" : 5,
"pool_size" : 50,
"pool_family_ratio" : 0.99,
"pool_max_survivor_ratio" : 0.3,
"kill_score" : -10000,
"max_iteration" : 60000,
"local_optima_trigger" : 8
},
"call" :
{
"add_numvar":
[
%{NUMVARS}
]
}
}"""
params = nn.get_weighting_params()
numvars = ""
#num var config of : 11,3,-1.0235,1.0
#gives:
#resolution: 2048 values
#step size: 0.001
#min value: -1.0235
#max value: 1.0235
for p in params:
numvars += '\t\t\t["_NN_'+p+'",11,3,-1.0235,1.0],\n'
numvars = numvars.rstrip(',\n')
template = template.replace('%{FITNESS_NAME}',filename.split('.')[0])
template = template.replace('%{NUMVARS}',numvars)
template = template.replace('\t',' ')
#f = open(filename,'w')
#f.write(template)
#f.close()
return template
def generate_fitness_template(nn,filename):
import neural_network_fitness_template
#f = open('neural_network_fitness_template.pyt','r')
template = neural_network_fitness_template.template #f.read()
#f.close()
params = nn.get_weighting_params()
output = ""
for item in params:
output += "\t\tself._NN_"+item+" = 1\n"
output = output.replace('\t',' ')
template = template.replace('#%{NN_WEIGHTS}',output)
template = template.replace('#%{NN_NUM_INPUTS}',str(nn.num_inputs))
template = template.replace('#%{NN_NUM_OUTPUTS}',str(nn.num_outputs))
template = template.replace('#%{NN_NUM_HIDDEN_LAYERS}',str(nn.num_hidden_layers))
template = template.replace('#%{NN_NUM_NEURONS_PER_HIDDEN_LAYER}',str(nn.num_neurons_per_hidden_layer))
#f = open(filename,'w')
#f.write(template)
#f.close()
return template
if __name__ == "__main__":
num_inputs = 8
num_outputs = 3
num_hidden_layers = 4
num_neurons_per_hidden_layer = 8
nn = NeuralNet(num_inputs,num_outputs,num_hidden_layers,num_neurons_per_hidden_layer)
nn.create_network()
print "Neuron IDs:"
print nn.get_neuron_ids()
print "\n\nNeuron weighting & bias parameters:"
params = nn.get_weighting_params()
#print params
print "\n\nNeuron weighting & bias parameter count:"
num_weights = nn.get_num_weights()
print num_weights
print "\n\nNeuron initalized weights & bias parameters:"
#print nn.get_weights()
print "\n\nNeuron set get weights & bias parameters test:"
weights = list(range(num_weights))
nn.set_weights(weights)
got_weights = nn.get_weights()
if got_weights == weights:
print "passed."
else:
print "failed."
#print got_weights
print "\n\nNeuron set inputs test:"
print "outputs: " + str(nn.set_inputs(range(num_inputs)))
print "\n\nNeuron speed test:"
import time
num_runs = 3000
inputs = range(num_inputs)
start = time.time()
for i in xrange(num_runs):
nn.set_inputs(inputs)
t = time.time() - start
print "updates/sec :",num_runs/t
#build a gene_def template
generate_gene_def_template(nn,'test_scafold.json')
#build scafolding for a NN fitness class
generate_fitness_template(nn,'test_fitness.py')
|
OndroNR/ga-bitbot
|
libs/neural_network.py
|
Python
|
gpl-3.0
| 10,579
|
[
"Brian",
"NEURON"
] |
bc5e7c16d49ce303d3268c08999e0a93da8d69d1175737e1a9cbc91521f34727
|
__RCSID__ = "$Id: UsersAndGroups.py 34413 2011-02-19 06:10:18Z rgracian $"
"""
Update Users and Groups from VOMS on CS
"""
import os
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.Core.Security.VOMSService import VOMSService
from DIRAC.Core.Security import Locations, X509Chain
from DIRAC.Core.Utilities import List, Subprocess
from DIRAC import S_OK, S_ERROR, gConfig
class UsersAndGroups( AgentModule ):
def initialize( self ):
self.am_setOption( "PollingTime", 3600 * 6 ) # Every 6 hours
self.vomsSrv = VOMSService()
self.proxyLocation = os.path.join( self.am_getWorkDirectory(), ".volatileId" )
self.__adminMsgs = {}
# print self.getLFCRegisteredDNs()
return S_OK()
def __generateProxy( self ):
self.log.info( "Generating proxy..." )
certLoc = Locations.getHostCertificateAndKeyLocation()
if not certLoc:
self.log.error( "Can not find certificate!" )
return False
chain = X509Chain.X509Chain()
result = chain.loadChainFromFile( certLoc[0] )
if not result[ 'OK' ]:
self.log.error( "Can not load certificate file", "%s : %s" % ( certLoc[0], result[ 'Message' ] ) )
return False
result = chain.loadKeyFromFile( certLoc[1] )
if not result[ 'OK' ]:
self.log.error( "Can not load key file", "%s : %s" % ( certLoc[1], result[ 'Message' ] ) )
return False
result = chain.generateProxyToFile( self.proxyLocation, 3600 )
if not result[ 'OK' ]:
self.log.error( "Could not generate proxy file", result[ 'Message' ] )
return False
self.log.info( "Proxy generated" )
return True
def getLFCRegisteredDNs( self ):
#Request a proxy
if gConfig.useServerCertificate():
if not self.__generateProxy():
return False
#Execute the call
cmdEnv = dict( os.environ )
cmdEnv['LFC_HOST'] = 'lfc-egee.in2p3.fr'
if os.path.isfile( self.proxyLocation ):
cmdEnv[ 'X509_USER_PROXY' ] = self.proxyLocation
lfcDNs = []
try:
retlfc = Subprocess.systemCall( 30, ( 'lfc-listusrmap', ), env = cmdEnv )
if not retlfc['OK']:
self.log.fatal( 'Can not get LFC User List', retlfc['Message'] )
return retlfc
if retlfc['Value'][0]:
self.log.fatal( 'Can not get LFC User List', retlfc['Value'][2] )
return S_ERROR( "lfc-listusrmap failed" )
else:
for item in List.fromChar( retlfc['Value'][1], '\n' ):
dn = item.split( ' ', 1 )[1]
lfcDNs.append( dn )
return S_OK( lfcDNs )
finally:
if os.path.isfile( self.proxyLocation ):
self.log.info( "Destroying proxy..." )
os.unlink( self.proxyLocation )
def checkLFCRegisteredUsers( self, usersData ):
self.log.info( "Checking LFC registered users" )
usersToBeRegistered = {}
result = self.getLFCRegisteredDNs()
if not result[ 'OK' ]:
self.log.error( "Could not get a list of registered DNs from LFC", result[ 'Message' ] )
return result
lfcDNs = result[ 'Value' ]
for user in usersData:
for userDN in usersData[ user ][ 'DN' ]:
if userDN not in lfcDNs:
self.log.info( 'DN "%s" need to be registered in LFC for user %s' % ( userDN, user ) )
if user not in usersToBeRegistered:
usersToBeRegistered[ user ] = []
usersToBeRegistered[ user ].append( userDN )
address = self.am_getOption( 'MailTo', 'graciani@ecm.ub.es' )
fromAddress = self.am_getOption( 'mailFrom', 'graciani@ecm.ub.es' )
if usersToBeRegistered:
subject = 'New LFC Users found'
self.log.info( subject, ", ".join( usersToBeRegistered ) )
body = 'Command to add new entries into LFC: \n'
body += 'login to volhcbXX and run : \n'
body += 'source /afs/cern.ch/lhcb/software/releases/LBSCRIPTS/prod/InstallArea/scripts/LbLogin.csh \n\n'
for lfcuser in usersToBeRegistered:
for lfc_dn in usersToBeRegistered[lfcuser]:
print lfc_dn
body += 'add_DN_LFC --userDN="' + lfc_dn.strip() + '" --nickname=' + lfcuser + '\n'
NotificationClient().sendMail( address, 'UsersAndGroupsAgent: %s' % subject, body, fromAddress )
return S_OK()
def execute( self ):
result = self.__syncCSWithVOMS()
mailMsg = ""
if self.__adminMsgs[ 'Errors' ]:
mailMsg += "\nErrors list:\n %s" % "\n ".join( self.__adminMsgs[ 'Errors' ] )
if self.__adminMsgs[ 'Info' ]:
mailMsg += "\nRun result:\n %s" % "\n ".join( self.__adminMsgs[ 'Info' ] )
NotificationClient().sendMail( self.am_getOption( 'MailTo', 'graciani@ecm.ub.es' ),
"UsersAndGroupsAgent run log", mailMsg,
self.am_getOption( 'mailFrom', 'graciani@ecm.ub.es' ) )
return result
def __syncCSWithVOMS( self ):
self.__adminMsgs = { 'Errors' : [], 'Info' : [] }
#Get DIRAC VOMS Mapping
self.log.info( "Getting DIRAC VOMS mapping" )
mappingSection = '/Registry/VOMS/Mapping'
ret = gConfig.getOptionsDict( mappingSection )
if not ret['OK']:
self.log.fatal( 'No VOMS to DIRAC Group Mapping Available' )
return ret
vomsMapping = ret['Value']
self.log.info( "There are %s registered voms mappings in DIRAC" % len( vomsMapping ) )
#Get VOMS VO name
self.log.info( "Getting VOMS VO name" )
result = self.vomsSrv.admGetVOName()
if not ret['OK']:
self.log.fatal( 'Could not retrieve VOMS VO name' )
voNameInVOMS = result[ 'Value' ]
self.log.info( "VOMS VO Name is %s" % voNameInVOMS )
#Get VOMS roles
self.log.info( "Getting the list of registered roles in VOMS" )
result = self.vomsSrv.admListRoles()
if not ret['OK']:
self.log.fatal( 'Could not retrieve registered roles in VOMS' )
rolesInVOMS = result[ 'Value' ]
self.log.info( "There are %s registered roles in VOMS" % len( rolesInVOMS ) )
print rolesInVOMS
rolesInVOMS.append( '' )
#Map VOMS roles
vomsRoles = {}
for role in rolesInVOMS:
if role:
role = "%s/%s" % ( voNameInVOMS, role )
else:
role = voNameInVOMS
groupsForRole = []
for group in vomsMapping:
if vomsMapping[ group ] == role:
groupsForRole.append( group )
if groupsForRole:
vomsRoles[ role ] = { 'Groups' : groupsForRole, 'Users' : [] }
self.log.info( "DIRAC valid VOMS roles are:\n\t", "\n\t ".join( vomsRoles.keys() ) )
#Get DIRAC users
self.log.info( "Getting the list of registered users in DIRAC" )
csapi = CSAPI()
ret = csapi.listUsers()
if not ret['OK']:
self.log.fatal( 'Could not retrieve current list of Users' )
return ret
currentUsers = ret['Value']
ret = csapi.describeUsers( currentUsers )
if not ret['OK']:
self.log.fatal( 'Could not retrieve current User description' )
return ret
currentUsers = ret['Value']
self.__adminMsgs[ 'Info' ].append( "There are %s registered users in DIRAC" % len( currentUsers ) )
self.log.info( "There are %s registered users in DIRAC" % len( currentUsers ) )
#Get VOMS user entries
self.log.info( "Getting the list of registered user entries in VOMS" )
result = self.vomsSrv.admListMembers()
if not ret['OK']:
self.log.fatal( 'Could not retrieve registered user entries in VOMS' )
usersInVOMS = result[ 'Value' ]
self.__adminMsgs[ 'Info' ].append( "There are %s registered user entries in VOMS" % len( usersInVOMS ) )
self.log.info( "There are %s registered user entries in VOMS" % len( usersInVOMS ) )
#Consolidate users by nickname
usersData = {}
newUserNames = []
knownUserNames = []
obsoleteUserNames = []
self.log.info( "Retrieving usernames..." )
usersInVOMS.sort()
for iUPos in range( len( usersInVOMS ) ):
userName = ''
user = usersInVOMS[ iUPos ]
for oldUser in currentUsers:
if user[ 'DN' ].strip() in List.fromChar( currentUsers[oldUser][ 'DN' ] ):
userName = oldUser
if not userName:
result = self.vomsSrv.attGetUserNickname( user[ 'DN' ], user[ 'CA' ] )
if result[ 'OK' ]:
userName = result[ 'Value' ]
else:
self.__adminMsgs[ 'Errors' ].append( "Could not retrieve nickname for DN %s" % user[ 'DN' ] )
self.log.error( "Could not get nickname for DN %s" % user[ 'DN' ] )
userName = user[ 'mail' ][:user[ 'mail' ].find( '@' )]
if not userName:
self.log.error( "Empty nickname for DN %s" % user[ 'DN' ] )
self.__adminMsgs[ 'Errors' ].append( "Empty nickname for DN %s" % user[ 'DN' ] )
continue
self.log.info( " (%02d%%) Found username %s : %s " % ( ( iUPos * 100 / len( usersInVOMS ) ), userName, user[ 'DN' ] ) )
if userName not in usersData:
usersData[ userName ] = { 'DN': [], 'CA': [], 'Email': [], 'Groups' : ['user'] }
for key in ( 'DN', 'CA', 'mail' ):
value = user[ key ]
if value:
if key == "mail":
List.appendUnique( usersData[ userName ][ 'Email' ], value )
else:
usersData[ userName ][ key ].append( value.strip() )
if userName not in currentUsers:
List.appendUnique( newUserNames, userName )
else:
List.appendUnique( knownUserNames, userName )
self.log.info( "Finished retrieving usernames" )
if newUserNames:
self.log.info( "There are %s new users" % len( newUserNames ) )
else:
self.log.info( "There are no new users" )
#Get the list of users for each group
result = csapi.listGroups()
if not result[ 'OK' ]:
self.log.error( "Could not get the list of groups in DIRAC", result[ 'Message' ] )
return result
staticGroups = result[ 'Value' ]
vomsGroups = []
self.log.info( "Mapping users in VOMS to groups" )
for vomsRole in vomsRoles:
self.log.info( " Getting users for role %s" % vomsRole )
groupsForRole = vomsRoles[ vomsRole ][ 'Groups' ]
vomsMap = vomsRole.split( "Role=" )
for g in groupsForRole:
if g in staticGroups:
staticGroups.pop( staticGroups.index( g ) )
else:
vomsGroups.append( g )
if len( vomsMap ) == 1:
# no Role
users = usersInVOMS
else:
vomsGroup = "Role=".join( vomsMap[:-1] )
if vomsGroup[-1] == "/":
vomsGroup = vomsGroup[:-1]
vomsRole = "Role=%s" % vomsMap[-1]
result = self.vomsSrv.admListUsersWithRole( vomsGroup, vomsRole )
if not result[ 'OK' ]:
errorMsg = "Could not get list of users for VOMS %s" % ( vomsMapping[ group ] )
self.__adminMsgs[ 'Errors' ].append( errorMsg )
self.log.error( errorMsg, result[ 'Message' ] )
return result
users = result['Value']
numUsersInGroup = 0
for vomsUser in users:
for userName in usersData:
if vomsUser[ 'DN' ] in usersData[ userName ][ 'DN' ]:
numUsersInGroup += 1
usersData[ userName ][ 'Groups' ].extend( groupsForRole )
infoMsg = "There are %s users in group(s) %s for VOMS Role %s" % ( numUsersInGroup, ",".join( groupsForRole ), vomsRole )
self.__adminMsgs[ 'Info' ].append( infoMsg )
self.log.info( " %s" % infoMsg )
self.log.info( "Checking static groups" )
staticUsers = []
for group in staticGroups:
self.log.info( " Checking static group %s" % group )
numUsersInGroup = 0
result = csapi.listUsers( group )
if not result[ 'OK' ]:
self.log.error( "Could not get the list of users in DIRAC group %s" % group , result[ 'Message' ] )
return result
for userName in result[ 'Value' ]:
if userName in usersData:
numUsersInGroup += 1
usersData[ userName ][ 'Groups' ].append( group )
else:
if group not in vomsGroups and userName not in staticUsers:
staticUsers.append( userName )
infoMsg = "There are %s users in group %s" % ( numUsersInGroup, group )
self.__adminMsgs[ 'Info' ].append( infoMsg )
self.log.info( " %s" % infoMsg )
if staticUsers:
infoMsg = "There are %s static users: %s" % ( len( staticUsers ) , ', '.join( staticUsers ) )
self.__adminMsgs[ 'Info' ].append( infoMsg )
self.log.info( "%s" % infoMsg )
for user in currentUsers:
if user not in usersData and user not in staticUsers:
self.log.info( 'User %s is no longer valid' % user )
obsoleteUserNames.append( user )
#Do the CS Sync
self.log.info( "Updating CS..." )
ret = csapi.downloadCSData()
if not ret['OK']:
self.log.fatal( 'Can not update from CS', ret['Message'] )
return ret
usersWithMoreThanOneDN = {}
for user in usersData:
csUserData = dict( usersData[ user ] )
if len( csUserData[ 'DN' ] ) > 1:
usersWithMoreThanOneDN[ user ] = csUserData[ 'DN' ]
result = csapi.describeUsers( [ user ] )
if result[ 'OK' ]:
if result[ 'Value' ]:
prevUser = result[ 'Value' ][ user ]
prevDNs = List.fromChar( prevUser[ 'DN' ] )
newDNs = csUserData[ 'DN' ]
for DN in newDNs:
if DN not in prevDNs:
self.__adminMsgs[ 'Info' ].append( "User %s has new DN %s" % ( user, DN ) )
for DN in prevDNs:
if DN not in newDNs:
self.__adminMsgs[ 'Info' ].append( "User %s has lost a DN %s" % ( user, DN ) )
else:
newDNs = csUserData[ 'DN' ]
for DN in newDNs:
self.__adminMsgs[ 'Info' ].append( "New user %s has new DN %s" % ( user, DN ) )
for k in ( 'DN', 'CA', 'Email' ):
csUserData[ k ] = ", ".join( csUserData[ k ] )
result = csapi.modifyUser( user, csUserData, createIfNonExistant = True )
if not result[ 'OK' ]:
self.__adminMsgs[ 'Error' ].append( "Cannot modify user %s: %s" % ( user, result[ 'Message' ] ) )
self.log.error( "Cannot modify user %s" % user )
if usersWithMoreThanOneDN:
self.__adminMsgs[ 'Info' ].append( "\nUsers with more than one DN:" )
for uwmtod in sorted( usersWithMoreThanOneDN ):
self.__adminMsgs[ 'Info' ].append( " %s" % uwmtod )
self.__adminMsgs[ 'Info' ].append( " + DN list:" )
for DN in usersWithMoreThanOneDN[uwmtod]:
self.__adminMsgs[ 'Info' ].append( " - %s" % DN )
if obsoleteUserNames:
self.__adminMsgs[ 'Info' ].append( "\nObsolete users:" )
address = self.am_getOption( 'MailTo', 'graciani@ecm.ub.es' )
fromAddress = self.am_getOption( 'mailFrom', 'graciani@ecm.ub.es' )
subject = 'Obsolete LFC Users found'
body = 'Delete entries into LFC: \n'
for obsoleteUser in obsoleteUserNames:
self.log.info( subject, ", ".join( obsoleteUserNames ) )
body += 'for ' + obsoleteUser + '\n'
self.__adminMsgs[ 'Info' ].append( " %s" % obsoleteUser )
self.log.info( "Deleting %s users" % len( obsoleteUserNames ) )
NotificationClient().sendMail( address, 'UsersAndGroupsAgent: %s' % subject, body, fromAddress )
csapi.deleteUsers( obsoleteUserNames )
if newUserNames:
self.__adminMsgs[ 'Info' ].append( "\nNew users:" )
for newUser in newUserNames:
self.__adminMsgs[ 'Info' ].append( " %s" % newUser )
self.__adminMsgs[ 'Info' ].append( " + DN list:" )
for DN in usersData[newUser][ 'DN' ]:
self.__adminMsgs[ 'Info' ].append( " - %s" % DN )
self.__adminMsgs[ 'Info' ].append( " + EMail: %s" % usersData[newUser][ 'Email' ] )
result = csapi.commitChanges()
if not result[ 'OK' ]:
self.log.error( "Could not commit configuration changes", result[ 'Message' ] )
return result
self.log.info( "Configuration committed" )
#LFC Check
if self.am_getOption( "LFCCheckEnabled", True ):
result = self.checkLFCRegisteredUsers( usersData )
if not result[ 'OK' ]:
return result
return S_OK()
|
sposs/DIRAC
|
ConfigurationSystem/Agent/UsersAndGroups.py
|
Python
|
gpl-3.0
| 16,314
|
[
"DIRAC"
] |
3d1f5480a6e7585e373e8d42d712cfbb3b9937afc319caf8960771b26bc24026
|
import glob
import os
import pickle
import sys
import numpy as np
import subprocess as sp
from morphct.definitions import SINGLE_ORCA_RUN_FILE
from morphct.code import helper_functions as hf
class orcaError(Exception):
def __init__(self, file_name):
self.string = "".join(["No molecular orbital data present for ", file_name])
def __str__(self):
return self.string
def load_orca_output(file_name):
with open(file_name, "r") as orca_file:
data_file = orca_file.readlines()
record_MO_data = False
orbital_data = []
for line in data_file:
if "ORBITAL ENERGIES" in line:
# Next line begins the MO data
record_MO_data = True
continue
if record_MO_data is True:
if "MOLECULAR ORBITALS" in line:
# Don't need anything else from the output file
break
data_in_line = []
for element in line.split(" "):
if len(element) > 1:
try:
data_in_line.append(float(element))
except ValueError:
continue
if len(data_in_line) == 4:
orbital_data.append(data_in_line)
for i in range(len(orbital_data)):
if orbital_data[i][1] == 0:
# This line is the first unoccupied orbital - i.e. LUMO
LUMO = orbital_data[i][3]
HOMO = orbital_data[i - 1][3]
HOMO_1 = orbital_data[i - 2][3]
LUMO_1 = orbital_data[i + 1][3]
# Don't need any other orbitals
break
if record_MO_data is False:
# Molecular orbital data not present in this file
raise orcaError(file_name)
return [HOMO_1, HOMO, LUMO, LUMO_1]
def modify_orca_files(file_name, failed_file, failed_count, chromophore_list):
if failed_count == 3:
# Three lots of reruns without any successes, try to turn off SOSCF
print(
"".join(
[
file_name,
": Three lots of reruns without any success -"
" turning off SOSCF to see if that helps...",
]
)
)
turn_off_soscf(failed_file)
elif failed_count == 6:
# Still no joy - increase the number of SCF iterations and see if
# convergence was just slow
print(
"".join(
[
file_name,
": Six lots of reruns without any success -"
" increasing the number of SCF iterations to 500...",
]
)
)
increase_iterations(failed_file)
elif failed_count == 9:
# Finally, turn down the SCF tolerance
print(
"".join(
[
file_name,
": Nine lots of reruns without any success -"
" decreasing SCF tolerance (sloppySCF)...",
]
)
)
reduce_tolerance(failed_file)
elif failed_count == 12:
print(
"".join(
[
file_name,
": Failed to rerun orca 12 times, one final thing"
" that can be done is to change the numerical accuracy...",
]
)
)
revert_orca_files(failed_file)
increase_grid(failed_file)
elif failed_count == 15:
print(
"".join(
[
file_name,
": Failed to rerun orca 15 times. Will try high"
" numerical accuracy with no SOSCF as a last-ditch effort...",
]
)
)
increase_grid_no_soscf(failed_file)
elif failed_count == 18:
# SERIOUS PROBLEM
print(
"".join(
[
file_name,
": Failed to rerun orca 18 times, even with all"
" the input file tweaks. Examine the geometry - it is most likely"
" unreasonable.",
]
)
)
file_string = os.path.splitext(os.path.split(file_name)[1])[0]
for chromo_string in file_string.split("-"):
chromo_ID = int(chromo_string)
print("AAIDs for chromophore", chromo_ID)
print(chromophore_list[chromo_ID].AAIDs)
print("Reverting {:s} back to its original state...".format(file_name))
revert_orca_files(failed_file)
return 1
return 0
def turn_off_soscf(input_file):
with open(input_file, "r") as file_name:
original_lines = file_name.readlines()
original_lines[3] = "!ZINDO/S NOSOSCF\n"
with open(input_file, "w+") as file_name:
file_name.writelines(original_lines)
def reduce_tolerance(input_file):
with open(input_file, "r") as file_name:
original_lines = file_name.readlines()
original_lines[3] = "!ZINDO/S NoSOSCF SloppySCF\n"
with open(input_file, "w+") as file_name:
file_name.writelines(original_lines)
def increase_iterations(input_file):
with open(input_file, "r") as file_name:
original_lines = file_name.readlines()
original_lines.append("\n%scf MaxIter 500 end")
with open(input_file, "w+") as file_name:
file_name.writelines(original_lines)
def increase_grid(input_file):
with open(input_file, "r") as file_name:
original_lines = file_name.readlines()
original_lines[3] = "!ZINDO/S SlowConv Grid7 NoFinalGrid\n"
with open(input_file, "w+") as file_name:
file_name.writelines(original_lines)
def increase_grid_no_soscf(input_file):
with open(input_file, "r") as file_name:
original_lines = file_name.readlines()
original_lines[3] = "!ZINDO/S SlowConv Grid7 NoFinalGrid NoSOSCF SloppySCF\n"
original_lines.append("\n%scf MaxIter 500 end")
with open(input_file, "w+") as file_name:
file_name.writelines(original_lines)
def revert_orca_files(input_file):
with open(input_file, "r") as file_name:
original_lines = file_name.readlines()
original_lines[3] = "! ZINDO/S\n"
for line_no in range(len(original_lines)):
# REMOVE THE SCF ITER
if "%scf MaxIter" in original_lines[line_no]:
original_lines.pop(line_no)
break
with open(input_file, "w+") as file_name:
file_name.writelines(original_lines)
def rerun_fails(failed_chromo_files, parameter_dict, chromophore_list):
print("")
print(failed_chromo_files)
number_of_fails = len(list(failed_chromo_files.keys()))
if number_of_fails == 1:
print("There was 1 failed job.")
else:
print("There were {:d} failed jobs.".format(number_of_fails))
proc_IDs = parameter_dict["proc_IDs"]
pop_list = []
permanently_failed = {}
# Firstly, modify the input files to see if numerical tweaks make orca
# happier
for failed_file, failed_data in failed_chromo_files.items():
failed_count = failed_data[0]
error_code = modify_orca_files(
failed_file,
os.path.join(
parameter_dict["output_orca_directory"],
"chromophores",
"input_orca",
failed_file.replace(".out", ".inp"),
),
failed_count,
chromophore_list,
)
if error_code == 1:
# Don't delete the elements from the list here because we're still
# trying to iterate over this dict and it cannot change length!
pop_list.append(failed_file)
permanently_failed[failed_file] = failed_data
# Now pop the correct elements from the failed_chromo_files dict
for failed_file in pop_list:
failed_chromo_files.pop(failed_file)
# If there are no files left, then everything has failed so this function
# has completed its task
if len(failed_chromo_files) == 0:
return failed_chromo_files, permanently_failed
# Otherwise, rerun those failed files.
# First, find the correct locations of the input Files
input_files = [
os.path.join(
parameter_dict["output_orca_directory"],
"chromophores",
"input_orca",
file_name.replace(".out", ".inp"),
)
for file_name in list(failed_chromo_files.keys())
]
# As before, split the list of reruns based on the number of processors
jobs_list = [
input_files[i : i + (int(np.ceil(len(input_files) / len(proc_IDs)))) + 1]
for i in range(
0, len(input_files), int(np.ceil(len(input_files) / float(len(proc_IDs))))
)
]
print(jobs_list)
# Write the jobs pickle for single_core_run_orca to obtain
with open(
os.path.join(
parameter_dict["output_orca_directory"], "chromophores", "orca_jobs.pickle"
),
"wb+",
) as pickle_file:
pickle.dump(jobs_list, pickle_file)
# Now rerun orca
if len(jobs_list) <= len(proc_IDs):
proc_IDs = proc_IDs[: len(jobs_list)]
running_jobs = []
for CPU_rank in proc_IDs:
# The final argument here tells orca to ignore the presence of the
# output file and recalculate
running_jobs.append(
sp.Popen(
[
"python",
SINGLE_ORCA_RUN_FILE,
parameter_dict["output_orca_directory"],
parameter_dict["output_morphology_directory"],
str(CPU_rank),
"1",
"0",
]
)
)
# Wait for running jobs to finish
[p.wait() for p in running_jobs]
# Finally, return the failed files list to the main failure handler to see
# if we need to iterate
return failed_chromo_files, permanently_failed
def calculate_delta_E(chromophore_list, chromo1_ID, chromo2_ID):
chromo1 = chromophore_list[chromo1_ID]
chromo2 = chromophore_list[chromo2_ID]
# NOTE: SANITY CHECK
if (chromo1.ID != chromo1_ID) or (chromo2.ID != chromo2_ID):
raise SystemError(
"chromo1.ID ({0:d}) != chromo1_ID ({1:d}), or chromo2.ID ({2:d}) != chromo2_ID ({3:d})! CHECK CODE!".format(
chromo1.ID, chromo1_ID, chromo2.ID, chromo2_ID
)
)
# END OF SANITY CHECK
if chromo1.species.lower() == "donor":
# Hole transporter
chromo1_E = chromo1.HOMO
elif chromo1.species.lower() == "acceptor":
# Electron transporter
chromo1_E = chromo1.LUMO
if chromo2.species.lower() == "donor":
# Hole transporter
chromo2_E = chromo2.HOMO
elif chromo2.species.lower() == "acceptor":
# Electron transporter
chromo2_E = chromo2.LUMO
return chromo2_E - chromo1_E
def calculate_TI(orbital_splitting, delta_E):
# Use the energy splitting in dimer method to calculate the electronic
# transfer integral in eV
if delta_E ** 2 > orbital_splitting ** 2:
# Avoid an imaginary TI by returning zero.
# (Could use KOOPMAN'S APPROXIMATION here if desired)
TI = 0
else:
TI = 0.5 * np.sqrt((orbital_splitting ** 2) - (delta_E ** 2))
return TI
def update_single_chromophore_list(chromophore_list, parameter_dict):
orca_output_dir = os.path.join(
parameter_dict["output_orca_directory"], "chromophores", "output_orca"
)
# NOTE: This can possibly be done by recursively iterating through the
# neighbourlist of each chromophore, but I imagine Python will whinge about
# the levels of recursion, so for now I'll just go through every
# chromophore twice.
# Firstly, set the energy levels for each single chromophore, rerunning
# them if they fail.
# failed_single_chromos has the form {'file_name': [fail_count,
# location_in_chromophore_list]}
failed_single_chromos = {}
for chromo_location, chromophore in enumerate(chromophore_list):
file_name = "single/{:05d}.out".format(chromophore.ID)
print("\rDetermining energy levels for", file_name, end=" ")
if sys.stdout is not None:
sys.stdout.flush()
# Update the chromophores in the chromophore_list with their
# energy_levels
try:
energy_levels = load_orca_output(os.path.join(orca_output_dir, file_name))
chromophore.HOMO_1 = energy_levels[0]
chromophore.HOMO = energy_levels[1]
chromophore.LUMO = energy_levels[2]
chromophore.LUMO_1 = energy_levels[3]
if parameter_dict["remove_orca_outputs"] is True:
try:
os.remove(os.path.join(orca_output_dir, file_name))
except FileNotFoundError:
# Already deleted
pass
# If this file had originally failed, then we can safely remove it
# from the fail list.
if file_name in failed_single_chromos.keys():
failed_single_chromos.pop(file_name)
except orcaError:
failed_single_chromos[file_name] = [1, chromo_location]
continue
print("")
# Rerun any failed orca jobs
while len(failed_single_chromos) > 0:
failed_single_chromos, permanently_failed = rerun_fails(
failed_single_chromos, parameter_dict, chromophore_list
)
if len(permanently_failed) > 0:
print(permanently_failed)
print("--== CRITICAL ERROR ==--")
print(
"THE ABOVE SINGLE-CHROMOPHORE SYSTEMS FAILED PERMANENTLY. THESE NEED"
" FIXING/REMOVING FROM THE SYSTEM BEFORE ANY FURTHER DATA CAN BE"
" OBTAINED."
)
exit()
successful_reruns = []
# Now check all of the files to see if we can update the
# chromophore_list
for chromo_name, chromo_data in failed_single_chromos.items():
print("Checking previously failed", chromo_name)
chromo_ID = chromo_data[1]
try:
# Update the chromophore data in the chromophore_list
energy_levels = load_orca_output(
os.path.join(orca_output_dir + chromo_name)
)
chromophore_list[chromo_ID].HOMO_1 = energy_levels[0]
chromophore_list[chromo_ID].HOMO = energy_levels[1]
chromophore_list[chromo_ID].LUMO = energy_levels[2]
chromophore_list[chromo_ID].LUMO_1 = energy_levels[3]
# This chromophore didn't fail, so remove it from the failed
# list
successful_reruns.append(chromo_name)
if parameter_dict["remove_orca_outputs"] is True:
try:
os.remove(os.path.join(orca_output_dir, file_name))
except FileNotFoundError:
# Already deleted
pass
except orcaError:
# This chromophore failed so increment its fail counter
failed_single_chromos[chromo_name][0] += 1
continue
for chromo_name in successful_reruns:
failed_single_chromos.pop(chromo_name)
print("")
return chromophore_list
def update_pair_chromophore_list(chromophore_list, parameter_dict):
# Now that all the single chromophore energy levels are done, iterate
# through again and check the neighbours, rerunning the pair file if it
# failed (which it won't have done because all my chromophores are
# delicious now).
orca_output_dir = os.path.join(
parameter_dict["output_orca_directory"], "chromophores", "output_orca"
)
failed_pair_chromos = {}
for chromo_location, chromophore in enumerate(chromophore_list):
neighbour_IDs = [neighbour_data[0] for neighbour_data in chromophore.neighbours]
for neighbour_loc, neighbour_ID in enumerate(neighbour_IDs):
if chromophore.ID > neighbour_ID:
continue
file_name = "pair/{0:05d}-{1:05d}.out".format(chromophore.ID, neighbour_ID)
print("\rDetermining energy levels for", file_name, end=" ")
if sys.stdout is not None:
sys.stdout.flush()
try:
energy_levels = load_orca_output(
os.path.join(orca_output_dir, file_name)
)
dimer_HOMO_1 = energy_levels[0]
dimer_HOMO = energy_levels[1]
dimer_LUMO = energy_levels[2]
dimer_LUMO_1 = energy_levels[3]
if parameter_dict["remove_orca_outputs"] is True:
try:
os.remove(os.path.join(orca_output_dir, file_name))
except FileNotFoundError:
# Already deleted
pass
# If this file had originally failed, then we can safely remove it
# from the fail list.
if file_name in failed_pair_chromos.keys():
failed_pair_chromos.pop(file_name)
except orcaError:
failed_pair_chromos[file_name] = [1, chromo_location, neighbour_ID]
continue
# Calculate the delta_E between the two single chromophores
try:
if parameter_dict["use_koopmans_approximation"]:
delta_E = 0.0
else:
# Calculate Delta_E normally
raise KeyError
except KeyError:
delta_E = calculate_delta_E(
chromophore_list, chromophore.ID, neighbour_ID
)
# Check the chromophore species
assert (
chromophore_list[chromophore.ID].species
== chromophore_list[neighbour_ID].species
)
species = chromophore_list[chromophore.ID].species
# Calculate the TI using the ESD method
if species.lower() == "donor":
TI = calculate_TI(dimer_HOMO - dimer_HOMO_1, delta_E)
elif species.lower() == "acceptor":
TI = calculate_TI(dimer_LUMO - dimer_LUMO_1, delta_E)
# Get the location of the current chromophore.ID in the neighbour's
# neighbourList
reverse_loc = [
neighbour_data[0]
for neighbour_data in chromophore_list[neighbour_ID].neighbours
].index(chromophore.ID)
# Update both the current chromophore and the neighbour (for the
# reverse hop)
chromophore.neighbours_delta_E[neighbour_loc] = delta_E
chromophore_list[neighbour_ID].neighbours_delta_E[reverse_loc] = -delta_E
chromophore.neighbours_TI[neighbour_loc] = TI
chromophore_list[neighbour_ID].neighbours_TI[reverse_loc] = TI
# DEBUG ASSERTIONS
# Check list index corresponds to chromophore ID
assert chromo_location == chromophore_list[chromo_location].ID
assert chromo_location == chromophore.ID
# Check the neighbourLoc and reverseLoc give the correct
# chromophoreIDs
assert (
chromophore_list[chromophore.ID].neighbours[neighbour_loc][0]
== chromophore_list[neighbour_ID].ID
)
assert (
chromophore_list[neighbour_ID].neighbours[reverse_loc][0]
== chromophore_list[chromophore.ID].ID
)
# Check the chromophoreList has been updated after updating the
# chromophore instance
assert (
chromophore_list[chromophore.ID].neighbours_TI[neighbour_loc]
== chromophore.neighbours_TI[neighbour_loc]
)
# Check the TI of the forward and backward hops are the same
assert (
chromophore_list[chromophore.ID].neighbours_TI[neighbour_loc]
== chromophore_list[neighbour_ID].neighbours_TI[reverse_loc]
)
# Check the chromophoreList has been updated after updating the
# chromophore instance
assert (
chromophore_list[chromophore.ID].neighbours_delta_E[neighbour_loc]
== chromophore.neighbours_delta_E[neighbour_loc]
)
# Check the Delta_E of the forward and backward hops are *= -1
assert (
chromophore_list[chromophore.ID].neighbours_delta_E[neighbour_loc]
== -chromophore_list[neighbour_ID].neighbours_delta_E[reverse_loc]
)
# END DEBUG ASSERTIONS
print("")
while len(failed_pair_chromos) > 0:
failed_pair_chromos, permanently_failed = rerun_fails(
failed_pair_chromos, parameter_dict, chromophore_list
)
if len(permanently_failed) > 0:
print("--== WARNING ==--")
print(
"The above chromophore-pair systems failed permanently. Setting their"
" transfer integrals to zero, preventing these hops from ever taking"
" place in the KMC."
)
for file_name, chromo_data in permanently_failed.items():
chromo1_ID = chromo_data[1]
chromo2_ID = chromo_data[2]
TI = 0.0
delta_E = 0.0
# Get the location of the neighbour's ID in the current
# chromophores's neighbourList
neighbour_loc = [
neighbour_data[0]
for neighbour_data in chromophore_list[chromo1_ID].neighbours
].index(chromo2_ID)
# Get the location of the current chromophore's ID in the
# neighbour's neighbourList
reverse_loc = [
neighbour_data[0]
for neighbour_data in chromophore_list[chromo2_ID].neighbours
].index(chromo1_ID)
# Update both the current chromophore and the neighbour (for the reverse
# hop)
chromophore_list[chromo1_ID].neighbours_delta_E[neighbour_loc] = delta_E
chromophore_list[chromo2_ID].neighbours_delta_E[reverse_loc] = -delta_E
chromophore_list[chromo1_ID].neighbours_TI[neighbour_loc] = TI
chromophore_list[chromo2_ID].neighbours_TI[reverse_loc] = TI
successful_reruns = []
for file_name, chromo_data in failed_pair_chromos.items():
print("Checking previously failed", file_name)
chromo1_ID = chromo_data[1]
chromo2_ID = chromo_data[2]
try:
energy_levels = load_orca_output(
os.path.join(orca_output_dir, file_name)
)
dimer_HOMO_1 = energy_levels[0]
dimer_HOMO = energy_levels[1]
dimer_LUMO = energy_levels[2]
dimer_LUMO_1 = energy_levels[3]
except orcaError:
# This dimer failed so increment its fail counter
failed_pair_chromos[file_name][0] += 1
print(file_name, "still failed, incrementing counter")
continue
# Calculate the delta_E between the two single chromophores
try:
if parameter_dict["use_koopmans_approximation"]:
delta_E = 0.0
else:
# Calculate Delta_E normally
raise KeyError
except KeyError:
delta_E = calculate_delta_E(
chromophore_list, chromophore.ID, neighbour_ID
)
# Check the chromophore species
assert (
chromophore_list[chromophore.ID].species
== chromophore_list[neighbour_ID].species
)
species = chromophore_list[chromophore.ID].species
# Calculate the TI using the ESD method
if species.lower() == "donor":
TI = calculate_TI(dimer_HOMO - dimer_HOMO_1, delta_E)
elif species.lower() == "acceptor":
TI = calculate_TI(dimer_LUMO - dimer_LUMO_1, delta_E)
# Get the location of the neighbour's ID in the current
# chromophores's neighbourList
neighbour_loc = [
neighbour_data[0]
for neighbour_data in chromophore_list[chromo1_ID].neighbours
].index(chromo2_ID)
# Get the location of the current chromophore's ID in the
# neighbour's neighbourList
reverse_loc = [
neighbour_data[0]
for neighbour_data in chromophore_list[chromo2_ID].neighbours
].index(chromo1_ID)
# Update both the current chromophore and the neighbour (for the
# reverse hop)
chromophore_list[chromo1_ID].neighbours_delta_E[neighbour_loc] = delta_E
chromophore_list[chromo2_ID].neighbours_delta_E[reverse_loc] = -delta_E
chromophore_list[chromo1_ID].neighbours_TI[neighbour_loc] = TI
chromophore_list[chromo2_ID].neighbours_TI[reverse_loc] = TI
# This rerun was successful so remove this chromophore from the
# rerun list
successful_reruns.append(file_name)
print(file_name, "was successful!")
# DEBUG ASSERTIONS
# Check the neighbourLoc and reverseLoc give the correct
# chromophoreIDs
assert (
chromophore_list[chromo1_ID].neighbours[neighbour_loc][0]
== chromophore_list[chromo2_ID].ID
)
assert (
chromophore_list[chromo2_ID].neighbours[reverse_loc][0]
== chromophore_list[chromo1_ID].ID
)
# Check the TI of the forward and backward hops are the same
assert (
chromophore_list[chromo1_ID].neighbours_TI[neighbour_loc]
== chromophore_list[chromo2_ID].neighbours_TI[reverse_loc]
)
# Check the Delta_E of the forward and backward hops are *= -1
assert (
chromophore_list[chromo1_ID].neighbours_delta_E[neighbour_loc]
== -chromophore_list[chromo2_ID].neighbours_delta_E[reverse_loc]
)
# END DEBUG ASSERTIONS
for file_name in successful_reruns:
failed_pair_chromos.pop(file_name)
print("")
return chromophore_list
def scale_energies(chromophore_list, parameter_dict):
# Shorter chromophores have significantly deeper HOMOs because they are
# treated as small molecules instead of chain segments. To rectify this,
# find the average energy level for each chromophore and then map that
# average to the literature value.
# First, get the energy level data
chromophore_species = {k: [] for k in parameter_dict["chromophore_species"].keys()}
chromophore_MO_info = {k: {} for k in parameter_dict["chromophore_species"].keys()}
for chromo in chromophore_list:
chromophore_species[chromo.sub_species].append(chromo.get_MO_energy())
for sub_species, chromo_energy in chromophore_species.items():
lit_DOS_std = parameter_dict["chromophore_species"][sub_species][
"target_DOS_std"
]
lit_MO = parameter_dict["chromophore_species"][sub_species]["literature_MO"]
chromophore_MO_info[sub_species]["target_DOS_std"] = lit_DOS_std
chromophore_MO_info[sub_species]["av_MO"] = np.average(chromo_energy)
chromophore_MO_info[sub_species]["std_MO"] = np.std(chromo_energy)
chromophore_MO_info[sub_species]["E_shift"] = (
lit_MO - chromophore_MO_info[sub_species]["av_MO"]
)
for chromo in chromophore_list:
E_shift = chromophore_MO_info[chromo.sub_species]["E_shift"]
target_DOS_std = chromophore_MO_info[chromo.sub_species]["target_DOS_std"]
std_MO = chromophore_MO_info[chromo.sub_species]["std_MO"]
av_MO = chromophore_MO_info[chromo.sub_species]["av_MO"]
chromo.HOMO_1 += E_shift
chromo.HOMO += E_shift
chromo.LUMO += E_shift
chromo.LUMO_1 += E_shift
if (target_DOS_std is not None) and (target_DOS_std > std_MO):
# Determine how many sigmas away from the mean this datapoint is
sigma = (chromo.get_MO_energy() - av_MO) / std_MO
# Calculate the new deviation from the mean based on the target
# STD and sigma
newDeviation = target_DOS_std * sigma
# Work out the change in energy to be applied to meet this target
# energy level
delta_E = (av_MO + newDeviation) - chromo.get_MO_energy()
# Apply the energy level displacement
chromo.HOMO_1 += delta_E
chromo.HOMO += delta_E
chromo.LUMO += delta_E
chromo.LUMO_1 += delta_E
return chromophore_list
def main(
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
):
pickle_name = os.path.join(
parameter_dict["output_morphology_directory"],
"code",
"".join([os.path.splitext(parameter_dict["morphology"])[0], ".pickle"]),
)
# First, check that we need to examine the single chromophores
run_singles = False
if parameter_dict["overwrite_current_data"] is False:
# Only perform this check if the user hasn't already specified to
# overwrite the data (in which case it runs anyway)
# Run all singles if any of the single's data is missing (i.e. the
# HOMO level should suffice because all energy levels are updated at
# the same time, so we don't need to check all of them individually)
for chromophore in chromophore_list:
if chromophore.HOMO is None:
run_singles = True
if (run_singles is True) or (parameter_dict["overwrite_current_data"] is True):
print("Beginning analysis of single chromophores...")
chromophore_list = update_single_chromophore_list(
chromophore_list, parameter_dict
)
# Now include any scaling to narrow the DoS or modulate the mean to
# match the literature HOMO/LUMO levels (which helps to negate the
# effect of short chromophores with additional hydrogens/terminating
# groups)
print("Scaling energies...")
chromophore_list = scale_energies(chromophore_list, parameter_dict)
print("Single chromophore calculations completed. Saving...")
hf.write_pickle(
(
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
),
pickle_name,
)
else:
print("All single chromophore calculations already performed. Skipping...")
# Then, check the pairs
run_pairs = False
if parameter_dict["overwrite_current_data"] is False:
for chromophore in chromophore_list:
# Just check the first neighbour for each chromophore
for neighbour in chromophore.neighbours_TI:
if neighbour is None:
run_pairs = True
break
if (run_pairs is True) or (parameter_dict["overwrite_current_data"] is True):
print("Beginning analysis of chromophore pairs...")
chromophore_list = update_pair_chromophore_list(
chromophore_list, parameter_dict
)
# DEBUG Testing - you can remove these as the assertions in
# update_pair_chromophore_list should already cover them, however they
# are fast and will ensure that there are no errors in the
# chromophore_list after calculating the T_ij and Delta_E_ijs
T_ij_error = check_forward_backward_hop_T_ij(chromophore_list)
delta_E_error = check_forward_backward_hop_E_ij(chromophore_list)
if T_ij_error or delta_E_error:
raise SystemError("assertions failed, please address in code.")
# END OF DEBUG Testing
print("Pair chromophore calculations completed. Saving...")
hf.write_pickle(
(
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
),
pickle_name,
)
else:
print("All pair chromophore calculations already performed. Skipping...")
return (
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
)
def check_forward_backward_hop_T_ij(chromophore_list):
# Check reverse lookup: T_ij === T_ji
donor_errors = 0
acceptor_errors = 0
for chromo1 in chromophore_list:
chromo1_ID = chromo1.ID
for neighbour_index, chromo2_details in enumerate(chromo1.neighbours):
chromo2_ID = chromo2_details[0]
chromo1_to_2_TI = chromo1.neighbours_TI[neighbour_index]
# Sanity check
assert chromo2_ID == chromophore_list[chromo2_ID].ID
chromo2 = chromophore_list[chromo2_ID]
neighbour2_index = 0
for neighbour2_index, chromo1_details in enumerate(chromo2.neighbours):
if chromo1_details[0] != chromo1_ID:
continue
chromo2_to_1_TI = chromo2.neighbours_TI[neighbour2_index]
break
assert chromo1.species == chromo2.species
try:
assert chromo1_to_2_TI == chromo2_to_1_TI
# Put other assertions in here
except AssertionError:
print("\n<ERROR FOUND>")
print("1 to 2", chromo1_to_2_TI)
print("2 to 1", chromo2_to_1_TI)
print("Chromo 1 ID =", chromo1_ID, "Chromo 2 ID =", chromo2_ID)
print(
"Chromo1 Neighbours: Look for index =",
neighbour_index,
"in",
chromo1.neighbours,
)
print(
"Chromo2 Neighbours: Look for index =",
neighbour2_index,
"in",
chromo2.neighbours,
)
print(
"Chromo1 TIs: Look for index =",
neighbour_index,
"in",
chromo1.neighboursTI,
)
print(
"Chromo2 TIs: Look for index =",
neighbour2_index,
"in",
chromo2.neighboursTI,
)
if chromo1.species.lower() == "donor":
donor_errors += 1
elif chromo1.species.lower() == "acceptor":
acceptor_errors += 1
if (donor_errors > 0) or (acceptor_errors > 0):
print("--== CRITICAL ERROR ==--")
print(
"\nThere were",
donor_errors,
"cases where T_ij != T_ji in the donor chromophores.",
)
print(
"\nThere were",
acceptor_errors,
"cases where T_ij != T_ji in the acceptor chromophores.",
)
return 1
return 0
def check_forward_backward_hop_E_ij(chromophore_list):
# Check reverse lookup: Delta E_ij === -Delta E_ji
donor_errors = 0
acceptor_errors = 0
for chromophore in chromophore_list:
chromo_ID = chromophore.ID
for neighbour_loc, neighbour_deets in enumerate(chromophore.neighbours):
neighbour_ID = neighbour_deets[0]
assert chromo_ID == chromophore_list[chromo_ID].ID
assert neighbour_ID == chromophore_list[neighbour_ID].ID
# Get the location of the current chromophore.ID in the neighbour's
# neighbourList
reverse_loc = [
neighbour_data[0]
for neighbour_data in chromophore_list[neighbour_ID].neighbours
].index(chromophore.ID)
assert (
neighbour_ID == chromophore_list[chromo_ID].neighbours[neighbour_loc][0]
)
assert (
chromo_ID == chromophore_list[neighbour_ID].neighbours[reverse_loc][0]
)
# Update both the current chromophore and the neighbour (for the
# reverse hop)
try:
assert (
chromophore_list[chromo_ID].neighbours_delta_E[neighbour_loc]
== -chromophore_list[neighbour_ID].neighbours_delta_E[reverse_loc]
)
except AssertionError:
print("\nHOP FROM", chromo_ID, "TO", neighbour_ID)
print(
neighbour_ID,
"should be here",
chromophore.neighbours[neighbour_loc],
)
print(
chromo_ID,
"should be here",
chromophore_list[neighbour_ID].neighbours[reverse_loc],
)
print("--== Transfer Integrals ==--")
print(
"FORWARD:",
chromophore_list[chromo_ID].neighbours_TI[neighbour_loc],
"backward:",
chromophore_list[neighbour_ID].neighbours_TI[reverse_loc],
)
print("--== Delta E_ij ==--")
print(
"FORWARD:",
chromophore_list[chromo_ID].neighbours_delta_E[neighbour_loc],
"backward:",
chromophore_list[neighbour_ID].neighbours_delta_E[reverse_loc],
)
if chromophore.species.lower() == "donor":
donor_errors += 1
elif chromophore.species.lower() == "acceptor":
acceptor_errors += 1
if (donor_errors > 0) or (acceptor_errors > 0):
print("--== CRITICAL ERROR ==--")
print(
"\nThere were",
donor_errors,
"cases where E_ij != -E_ji in the donor chromophores.",
)
print(
"\nThere were",
acceptor_errors,
"cases where E_ij != -E_ji in the acceptor chromophores.",
)
return 1
return 0
if __name__ == "__main__":
try:
pickle_file = sys.argv[1]
except NameError:
print(
"Please specify the pickle file to load to continue the pipeline from this"
" point."
)
pickle_data = hf.load_pickle(pickle_file)
AA_morphology_dict = pickle_data[0]
CG_morphology_dict = pickle_data[1]
CG_to_AAID_master = pickle_data[2]
parameter_dict = pickle_data[3]
chromophore_list = pickle_data[4]
main(
AA_morphology_dict,
CG_morphology_dict,
CG_to_AAID_master,
parameter_dict,
chromophore_list,
)
|
matty-jones/MorphCT
|
morphct/code/transfer_integrals.py
|
Python
|
gpl-3.0
| 39,487
|
[
"ORCA"
] |
2556d13535983c461d8a67ce58fe1a917d39519d7237971d903c536a8ea94942
|
##
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing PSI, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
from distutils.version import LooseVersion
import glob
import os
import shutil
import tempfile
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import BUILD
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_PSI(CMakeMake):
"""
Support for building and installing PSI
"""
def __init__(self, *args, **kwargs):
"""Initialize class variables custom to PSI."""
super(EB_PSI, self).__init__(*args, **kwargs)
self.psi_srcdir = None
self.install_psi_objdir = None
self.install_psi_srcdir = None
@staticmethod
def extra_options():
"""Extra easyconfig parameters specific to PSI."""
extra_vars = CMakeMake.extra_options()
extra_vars.update({
# always include running PSI unit tests (takes about 2h or less)
'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD],
})
# Doesn't work with out-of-source build
extra_vars['separate_build_dir'][0] = False
return extra_vars
def configure_step(self):
"""
Configure build outside of source directory.
"""
try:
objdir = os.path.join(self.builddir, 'obj')
os.makedirs(objdir)
os.chdir(objdir)
except OSError as err:
raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err)
env.setvar('F77FLAGS', os.getenv('F90FLAGS'))
# In order to create new plugins with PSI, it needs to know the location of the source
# and the obj dir after install. These env vars give that information to the configure script.
self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep))
self.install_psi_objdir = os.path.join(self.installdir, 'obj')
self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir)
env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir)
env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir)
# explicitely specify Python binary to use
pythonroot = get_software_root('Python')
if not pythonroot:
raise EasyBuildError("Python module not loaded.")
# pre 4.0b5, they were using autotools, on newer it's CMake
if LooseVersion(self.version) <= LooseVersion("4.0b5") and self.name == "PSI":
# Use EB Boost
boostroot = get_software_root('Boost')
if not boostroot:
raise EasyBuildError("Boost module not loaded.")
self.log.info("Using configure based build")
env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python'))
env.setvar('USE_SYSTEM_BOOST', 'TRUE')
if self.toolchain.options.get('usempi', None):
# PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly
# we should always specify the sequential Fortran compiler,
# to avoid problems with -lmpi vs -lmpi_mt during linking
fcompvar = 'F77_SEQ'
else:
fcompvar = 'F77'
# update configure options
# using multi-threaded BLAS/LAPACK is important for performance,
# cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii
opt_vars = [
('cc', 'CC'),
('cxx', 'CXX'),
('fc', fcompvar),
('libdirs', 'LDFLAGS'),
('blas', 'LIBBLAS_MT'),
('lapack', 'LIBLAPACK_MT'),
]
for (opt, var) in opt_vars:
self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var)))
# -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers
# both define SEEK_SET, this makes the one for MPI be ignored
self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS'))
# specify location of Boost
self.cfg.update('configopts', "--with-boost=%s" % boostroot)
# enable support for plugins
self.cfg.update('configopts', "--with-plugins")
ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir'])
else:
self.log.info("Using CMake based build")
self.cfg.update('configopts', ' -DPYTHON_EXECUTABLE=%s' % os.path.join(pythonroot, 'bin', 'python'))
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
self.log.info("Remove the CMAKE_BUILD_TYPE test in PSI4 source and the downloaded dependencies!")
self.log.info("Use PATCH_COMMAND in the corresponding CMakeLists.txt")
self.cfg['build_type'] = 'EasyBuildRelease'
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', " -DENABLE_MPI=ON")
if get_software_root('imkl'):
self.cfg.update('configopts', " -DENABLE_CSR=ON -DBLAS_TYPE=MKL")
if self.name == 'PSI4':
pcmsolverroot = get_software_root('PCMSolver')
if pcmsolverroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
pcmsolver = 'PCMSolver'
else:
pcmsolver = 'PCMSOLVER'
self.cfg.update('configopts', " -DENABLE_%s=ON" % pcmsolver)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DPCMSOLVER_ROOT=%s" % pcmsolverroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_PCMSolver=ON "
"-DPCMSolver_DIR=%s/share/cmake/PCMSolver" % pcmsolverroot)
chempsroot = get_software_root('CheMPS2')
if chempsroot:
if LooseVersion(self.version) >= LooseVersion("1.1"):
chemps2 = 'CheMPS2'
else:
chemps2 = 'CHEMPS2'
self.cfg.update('configopts', " -DENABLE_%s=ON" % chemps2)
if LooseVersion(self.version) < LooseVersion("1.2"):
self.cfg.update('configopts', " -DCHEMPS2_ROOT=%s" % chempsroot)
else:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_CheMPS2=ON "
"-DCheMPS2_DIR=%s/share/cmake/CheMPS2" % chempsroot)
# Be aware, PSI4 wants exact versions of the following deps! built with CMake!!
# If you want to use non-CMake build versions, the you have to provide the
# corresponding Find<library-name>.cmake scripts
# In PSI4 version 1.2.1, you can check the corresponding CMakeLists.txt file
# in external/upstream/<library-name>/
if LooseVersion(self.version) >= LooseVersion("1.2"):
for dep in ['libxc', 'Libint', 'pybind11', 'gau2grid']:
deproot = get_software_root(dep)
if deproot:
self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_%s=ON" % dep)
dep_dir = os.path.join(deproot, 'share', 'cmake', dep)
self.cfg.update('configopts', " -D%s_DIR=%s " % (dep, dep_dir))
CMakeMake.configure_step(self, srcdir=self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for PSI."""
super(EB_PSI, self).install_step()
# the obj and unpacked sources must remain available for working with plugins
try:
for subdir in ['obj', self.psi_srcdir]:
# copy symlinks as symlinks to work around broken symlinks
shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir),
symlinks=True)
except OSError as err:
raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err)
def test_step(self):
"""
Run the testsuite of PSI4
"""
testdir = tempfile.mkdtemp()
env.setvar('PSI_SCRATCH', testdir)
if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"):
if self.cfg['runtest']:
paracmd = ''
# Run ctest parallel, but limit to maximum 4 jobs (in case of slow disks)
if self.cfg['parallel']:
if self.cfg['parallel'] > 4:
paracmd = '-j 4'
else:
paracmd = "-j %s" % self.cfg['parallel']
cmd = "ctest %s %s" % (paracmd, self.cfg['runtest'])
run_cmd(cmd, log_all=True, simple=False)
else:
super(EB_PSI, self).test_step()
try:
shutil.rmtree(testdir)
except OSError as err:
raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err)
def sanity_check_step(self):
"""Custom sanity check for PSI."""
custom_paths = {
'files': ['bin/psi4'],
'dirs': ['include', ('share/psi', 'share/psi4')],
}
super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom variables for PSI module."""
txt = super(EB_PSI, self).make_module_extra()
share_dir = os.path.join(self.installdir, 'share')
if os.path.exists(share_dir):
psi4datadir = glob.glob(os.path.join(share_dir, 'psi*'))
if len(psi4datadir) == 1:
txt += self.module_generator.set_environment('PSI4DATADIR', psi4datadir[0])
else:
raise EasyBuildError("Failed to find exactly one PSI4 data dir: %s", psi4datadir)
return txt
|
boegel/easybuild-easyblocks
|
easybuild/easyblocks/p/psi.py
|
Python
|
gpl-2.0
| 11,600
|
[
"Psi4"
] |
5a9cda75e0ed2d9a1b37812136d83e5bfaee9eda20e6232b7f3c46b3eb85d885
|
#!/usr/bin/env python
"""Download Stx1a gene sequences from Genbank
Example:
$ python blast_stx1a_genes.py .
"""
import argparse
import csv
import logging
import os
import re
from Bio import SeqIO
from utils import DownloadUtils, SubtypeParser, GeneFilter
from blast import Blast
__author__ = "Matthew Whiteside"
__copyright__ = "Copyright Government of Canada 2012-2015. Funded by the Government of Canada Genomics Research and Development Initiative"
__license__ = "APL"
__version__ = "2.0"
__maintainer__ = "Matthew Whiteside"
__email__ = "mdwhitesi@gmail.com"
logging.basicConfig(
filename='blast_stx1a_genes.log',
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode='w')
logger = logging.getLogger(__name__)
if __name__ == "__main__":
"""Run
"""
# Parse command-line args
parser = argparse.ArgumentParser(description='Download and store NCBI blast results')
parser.add_argument('fasta_file', action="store")
parser.add_argument('output_directory', action="store")
args = parser.parse_args()
tmp_fasta_file = os.path.join(args.output_directory, 'tmp.fasta')
# blast = Blast("Escherichia coli")
# blast.run(args.fasta_file, tmp_fasta_file)
# Initialize serotype parser
seq_tests = [lambda x: len(x) > 900]
gfilter = GeneFilter(sequence_tests=seq_tests)
# Initialize Subtype parser
subtype_names = '(1a|1b|1c|1d)\b'
pattern1 = "(?:stx)?[-_\s]?%s" % subtype_names
parser = SubtypeParser([re.compile(pattern1, flags=re.IGNORECASE)], source_fields=['organism','strain'],
annotation_fields=['source','organism'])
# Initialize Download object
dutil = DownloadUtils(args.output_directory, 'Escherichia coli', ['stx1a'], parser, gfilter)
# Perform Downloads
dutil.download_by_accession(tmp_fasta_file, fasta_format=True)
# Parse genbank files for H-types
dutil.parse_subtype()
# Generate final output
invalid = set([])
subtypes = {}
for row in csv.reader(open(dutil.subtypefile,'r'),delimiter='\t'):
name = row[0]
subt = row[1]
if not name in subtypes:
# Never seen before
subtypes[name] = subt
elif subtypes[name] == subt:
# Non-conflicting duplicates
logger.info("Duplicate instances of {}, subtype:{} in subtype file: {}".format(name,subt,dutil.subtypefile))
else:
# Conflict
logger.warning("Duplicate instances of {} with conflicting subtypes: {} vs {} in subtype file: {}. Removing entry.".format(name,subt,
subtypes[name],dutil.subtypefile))
invalid.add(name)
seqs = {}
invalid2 = set([])
with open(tmp_fasta_file, 'r') as infh:
for seqrecord in SeqIO.parse(infh, 'fasta'):
name = seqrecord.id
seq = seqrecord.seq
if not name in invalid:
if name in subtypes:
# Has matching subtype
if name in seqs:
# Duplicate in fasta file
if seqs[name] == seq:
logger.info("Duplicate instances of {} in fasta file: {}".format(name, dutil.fastafile))
else:
logger.warning("Duplicate instances of {} with conflicting sequences in fasta file: {}".format(name, dutil.fastafile))
invalid2.add(name)
else:
# novel sequence with subtype
seqs[name] = seq
else:
# No matching subtype
logger.info("No subtype for sequence {} in fasta file: {}".format(name, dutil.fastafile))
with open(dutil.fastafile, 'w') as outfh, open(dutil.subtypefile, 'w') as subtfh:
for name in seqs:
outfh.write('>{}\n{}\n'.format(name, seqs[name]))
subtfh.write('{}\t{}\n'.format(name, subtypes[name]))
|
superphy/insilico-subtyping
|
phylotyper/data/download/blast_stx1a_genes.py
|
Python
|
apache-2.0
| 4,135
|
[
"BLAST"
] |
670dfa6ac33c5e5ec49d16c7ba4c1036bcde7fbe8b18605052f9372fe8409127
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Roms(MakefilePackage):
"""ROMS is a free-surface, terrain-following,
primitive equations ocean model widely used by
the scientific community for a diverse range of applications"""
homepage = "https://www.myroms.org/"
url = "file://{0}/roms_3.8_source.tar.gz".format(os.getcwd())
manual_download = True
# TODO: ROMS v3.8 (svn version 986) require credentials to download and use
# Spack recipe expects ROMS source code in .tar.gz format
# checksum may differ from what is provided here.
# user can skip checksum verification by placing "--no-checksum"
# next to "spack install"
version('3.8', sha256='5da7a61b69bd3e1f84f33f894a9f418971f3ba61cf9f5ef0a806a722161e2c9a')
variant("openmp", default=False, description="Turn on shared-memory parallelization in ROMS")
variant("mpi", default=True, description="Turn on distributed-memory parallelization in ROMS")
variant(
'roms_application', default='benchmark',
description='Makefile to include its associated header file',
values=('upwelling', 'benchmark'), multi=False
)
variant("debug", default=False, description="Turn on symbolic debug information with no optimization")
depends_on("mpi", when="+mpi")
depends_on("netcdf-fortran")
depends_on("netcdf-c")
depends_on("hdf5+fortran")
depends_on("zlib")
depends_on("curl")
depends_on("amdlibm", when="%aocc")
# Note: you cannot set USE_OpenMP and USE_MPI at the same time
conflicts("+mpi+openmp")
def _copy_arch_file(self, lib):
"""AOCC compiler takes gfortran's makefile as reference"""
copy(
join_path('Compilers', 'Linux-gfortran.mk'),
join_path('Compilers', '{0}-{1}.mk'.format(self.arch, lib))
)
@property
def selected_roms_application(self):
"""
Application type that have been selected in this build
"""
return self.spec.variants['roms_application'].value
@property
def arch(self):
"""return target platform"""
plat = sys.platform
if plat.startswith("linux"):
plat = "Linux"
return plat
def _edit_arch(self, spec, prefix, lib):
"""
Edit Linux-flang.mk makefile to support AOCC compiler
"""
fflags = ['-fveclib=AMDLIBM', '-O3', '-ffast-math',
'-funroll-loops', '-Mstack_arrays', '-std=f2008']
make_aocc = join_path('Compilers',
'{0}-{1}.mk'.format(self.arch, lib))
filter_file(r'\sFC := gfortran*$', 'FC := {0}'.format(lib), make_aocc)
filter_file(r'\sFFLAGS\s:=.*$',
'FFLAGS := {0}'.format(' '.join(fflags)),
make_aocc)
filter_file(r'\sLIBS\s:= [$]',
'LIBS := {0} $'.format(spec['amdlibm'].libs.ld_flags),
make_aocc)
filter_file(r'\sFREEFLAGS\s:=.*',
'FREEFLAGS := -ffree-form',
make_aocc)
def edit(self, spec, prefix):
# ROMS doesn't have support for AOCC out of the box
# Support extended to AOCC from below steps
if '%aocc' in self.spec:
lib_info = os.path.basename(spack_fc)
self._copy_arch_file(lib_info)
self._edit_arch(spec, prefix, lib_info)
makefile = FileFilter('makefile')
app_type = self.selected_roms_application
makefile.filter(r'ROMS_APPLICATION.*?=.*',
'ROMS_APPLICATION = {0}'.format(app_type.upper()))
makefile.filter(r'\sFORT\s[?]=.*',
'FORT = {0}'.format(os.path.basename(spack_fc)))
makefile.filter(r'\sUSE_NETCDF4\s[?]=.*', 'USE_NETCDF4 = on')
# Build MPI variant of ROMS
if "+mpi" in self.spec:
makefile.filter(r'\sUSE_MPI\s[?]=.*', 'USE_MPI = on')
makefile.filter(r'\sUSE_MPIF90\s[?]=.*', 'USE_MPIF90 = on')
makefile.filter(r'\sUSE_OpenMP\s[?]=.*', 'USE_OpenMP =')
# Build OpenMP variant of ROMS
if "+openmp" in self.spec:
makefile.filter(r'\sUSE_OpenMP\s[?]=.*', 'USE_OpenMP = on')
makefile.filter(r'\sUSE_MPI\s[?]=.*', 'USE_MPI =')
makefile.filter(r'\sUSE_MPIF90\s[?]=.*', 'USE_MPIF90 =')
# Build Debug variant of ROMS
if "+debug" in self.spec:
makefile.filter(r'\sUSE_DEBUG\s[?]=.*', 'USE_DEBUG = on')
def setup_build_environment(self, spack_env):
spec = self.spec
netcdf_include = spec['netcdf-fortran'].prefix.include
nf_config = join_path(spec['netcdf-fortran'].prefix.bin, 'nf-config')
spack_env.set('NF_CONFIG', nf_config)
spack_env.set('NETCDF_INCDIR', netcdf_include)
spack_env.set('HDF5_INCDIR', spec['hdf5'].prefix.include)
spack_env.set('HDF5_LIBDIR', spec['hdf5'].prefix.libs)
def build(self, spec, prefix):
make(parallel=False)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('roms*', prefix.bin)
|
LLNL/spack
|
var/spack/repos/builtin/packages/roms/package.py
|
Python
|
lgpl-2.1
| 5,319
|
[
"NetCDF"
] |
ac3e3e5f12530870a3f27034017d26a2cbecfba75c550592ab11870557816aa1
|
'''
Broken (piecewise continuous) random field generation using rft1d.randn1d
Note:
When FWHM gets large (2FWHM>nNodes), the data should be padded
using the *pad* keyword.
'''
import numpy as np
from matplotlib import pyplot
import rft1d
#(0) Set parameters:
np.random.seed(12345)
nResponses = 5
nNodes = 101
FWHM = 20.0
### create a boolean mask:
nodes = np.array([True]*nNodes) #nothing masked out
nodes[20:30] = False #this region will be masked out
nodes[60:80] = False #this region will be masked out
#(1) Generate Gaussian 1D fields:
y = rft1d.randn1d(nResponses, nodes, FWHM)
#(2) Plot:
pyplot.close('all')
pyplot.plot(y.T)
pyplot.plot([0,100], [0,0], 'k:')
pyplot.xlabel('Field position', size=16)
pyplot.ylabel('z', size=20)
pyplot.title('Broken (piecewise continuous) random fields', size=20)
pyplot.show()
|
0todd0000/rft1d
|
rft1d/examples/random_fields_broken_1.py
|
Python
|
gpl-3.0
| 862
|
[
"Gaussian"
] |
ecf0f83a202f9a375f3ca2939a0d2de30dcb4d24bb419905bdee0622bc81bc64
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in activation functions."""
import tensorflow.compat.v2 as tf
from keras import backend
import keras.layers.activation as activation_layers
from keras.utils.generic_utils import deserialize_keras_object
from keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.util.tf_export import keras_export
# b/123041942
# In TF 2.x, if the `tf.nn.softmax` is used as an activation function in Keras
# layers, it gets serialized as 'softmax_v2' instead of 'softmax' as the
# internal method name is returned in serialization. This results in errors in
# model exporting and loading as Keras can't find any activation function with
# the name of `softmax_v2`.
# This dict maps the activation function name from its v2 version to its
# canonical name.
_TF_ACTIVATIONS_V2 = {
'softmax_v2': 'softmax',
}
@keras_export('keras.activations.softmax')
@tf.__internal__.dispatch.add_dispatch_support
def softmax(x, axis=-1):
"""Softmax converts a vector of values to a probability distribution.
The elements of the output vector are in range (0, 1) and sum to 1.
Each vector is handled independently. The `axis` argument sets which axis
of the input the function is applied along.
Softmax is often used as the activation for the last
layer of a classification network because the result could be interpreted as
a probability distribution.
The softmax of each vector x is computed as
`exp(x) / tf.reduce_sum(exp(x))`.
The input values in are the log-odds of the resulting probability.
Args:
x : Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
Returns:
Tensor, output of softmax transformation (all values are non-negative
and sum to 1).
Examples:
**Example 1: standalone usage**
>>> inputs = tf.random.normal(shape=(32, 10))
>>> outputs = tf.keras.activations.softmax(inputs)
>>> tf.reduce_sum(outputs[0, :]) # Each sample in the batch now sums to 1
<tf.Tensor: shape=(), dtype=float32, numpy=1.0000001>
**Example 2: usage in a `Dense` layer**
>>> layer = tf.keras.layers.Dense(32, activation=tf.keras.activations.softmax)
"""
if x.shape.rank > 1:
if isinstance(axis, int):
output = tf.nn.softmax(x, axis=axis)
else:
# nn.softmax does not support tuple axis.
e = tf.exp(x - tf.reduce_max(x, axis=axis, keepdims=True))
s = tf.reduce_sum(e, axis=axis, keepdims=True)
output = e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D. '
f'Received input: {x}')
# Cache the logits to use for crossentropy loss.
output._keras_logits = x # pylint: disable=protected-access
return output
@keras_export('keras.activations.elu')
@tf.__internal__.dispatch.add_dispatch_support
def elu(x, alpha=1.0):
"""Exponential Linear Unit.
The exponential linear unit (ELU) with `alpha > 0` is:
`x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`
The ELU hyperparameter `alpha` controls the value to which an
ELU saturates for negative net inputs. ELUs diminish the
vanishing gradient effect.
ELUs have negative values which pushes the mean of the activations
closer to zero.
Mean activations that are closer to zero enable faster learning as they
bring the gradient closer to the natural gradient.
ELUs saturate to a negative value when the argument gets smaller.
Saturation means a small derivative which decreases the variation
and the information that is propagated to the next layer.
Example Usage:
>>> import tensorflow as tf
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',
... input_shape=(28, 28, 1)))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
>>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))
>>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))
<tensorflow.python.keras.engine.sequential.Sequential object ...>
Args:
x: Input tensor.
alpha: A scalar, slope of negative section. `alpha` controls the value to
which an ELU saturates for negative net inputs.
Returns:
The exponential linear unit (ELU) activation function: `x` if `x > 0` and
`alpha * (exp(x) - 1)` if `x < 0`.
Reference:
[Fast and Accurate Deep Network Learning by Exponential Linear Units
(ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)
"""
return backend.elu(x, alpha)
@keras_export('keras.activations.selu')
@tf.__internal__.dispatch.add_dispatch_support
def selu(x):
"""Scaled Exponential Linear Unit (SELU).
The Scaled Exponential Linear Unit (SELU) activation function is defined as:
- `if x > 0: return scale * x`
- `if x < 0: return scale * alpha * (exp(x) - 1)`
where `alpha` and `scale` are pre-defined constants
(`alpha=1.67326324` and `scale=1.05070098`).
Basically, the SELU activation function multiplies `scale` (> 1) with the
output of the `tf.keras.activations.elu` function to ensure a slope larger
than one for positive inputs.
The values of `alpha` and `scale` are
chosen so that the mean and variance of the inputs are preserved
between two consecutive layers as long as the weights are initialized
correctly (see `tf.keras.initializers.LecunNormal` initializer)
and the number of input units is "large enough"
(see reference paper for more information).
Example Usage:
>>> num_classes = 10 # 10-class problem
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal',
... activation='selu'))
>>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
Args:
x: A tensor or variable to compute the activation function for.
Returns:
The scaled exponential unit activation: `scale * elu(x, alpha)`.
Notes:
- To be used together with the
`tf.keras.initializers.LecunNormal` initializer.
- To be used together with the dropout variant
`tf.keras.layers.AlphaDropout` (not regular dropout).
References:
- [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
"""
return tf.nn.selu(x)
@keras_export('keras.activations.softplus')
@tf.__internal__.dispatch.add_dispatch_support
def softplus(x):
"""Softplus activation function, `softplus(x) = log(exp(x) + 1)`.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.softplus(a)
>>> b.numpy()
array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00,
2.0000000e+01], dtype=float32)
Args:
x: Input tensor.
Returns:
The softplus activation: `log(exp(x) + 1)`.
"""
return tf.math.softplus(x)
@keras_export('keras.activations.softsign')
@tf.__internal__.dispatch.add_dispatch_support
def softsign(x):
"""Softsign activation function, `softsign(x) = x / (abs(x) + 1)`.
Example Usage:
>>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32)
>>> b = tf.keras.activations.softsign(a)
>>> b.numpy()
array([-0.5, 0. , 0.5], dtype=float32)
Args:
x: Input tensor.
Returns:
The softsign activation: `x / (abs(x) + 1)`.
"""
return tf.math.softsign(x)
@keras_export('keras.activations.swish')
@tf.__internal__.dispatch.add_dispatch_support
def swish(x):
"""Swish activation function, `swish(x) = x * sigmoid(x)`.
Swish activation function which returns `x*sigmoid(x)`.
It is a smooth, non-monotonic function that consistently matches
or outperforms ReLU on deep networks, it is unbounded above and
bounded below.
Example Usage:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.swish(a)
>>> b.numpy()
array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,
2.0000000e+01], dtype=float32)
Args:
x: Input tensor.
Returns:
The swish activation applied to `x` (see reference paper for details).
Reference:
- [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
"""
return tf.nn.silu(x)
@keras_export('keras.activations.relu')
@tf.__internal__.dispatch.add_dispatch_support
def relu(x, alpha=0., max_value=None, threshold=0.):
"""Applies the rectified linear unit activation function.
With default values, this returns the standard ReLU activation:
`max(x, 0)`, the element-wise maximum of 0 and the input tensor.
Modifying default parameters allows you to use non-zero thresholds,
change the max value of the activation,
and to use a non-zero multiple of the input for values below the threshold.
For example:
>>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32)
>>> tf.keras.activations.relu(foo).numpy()
array([ 0., 0., 0., 5., 10.], dtype=float32)
>>> tf.keras.activations.relu(foo, alpha=0.5).numpy()
array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32)
>>> tf.keras.activations.relu(foo, max_value=5.).numpy()
array([0., 0., 0., 5., 5.], dtype=float32)
>>> tf.keras.activations.relu(foo, threshold=5.).numpy()
array([-0., -0., 0., 0., 10.], dtype=float32)
Args:
x: Input `tensor` or `variable`.
alpha: A `float` that governs the slope for values lower than the
threshold.
max_value: A `float` that sets the saturation threshold (the largest value
the function will return).
threshold: A `float` giving the threshold value of the activation function
below which values will be damped or set to zero.
Returns:
A `Tensor` representing the input tensor,
transformed by the relu activation function.
Tensor will be of the same shape and dtype of input `x`.
"""
return backend.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@keras_export('keras.activations.gelu', v1=[])
@tf.__internal__.dispatch.add_dispatch_support
def gelu(x, approximate=False):
"""Applies the Gaussian error linear unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.keras.activations.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.keras.activations.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
x: Input tensor.
approximate: A `bool`, whether to enable approximation.
Returns:
The gaussian error linear activation:
`0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
if `approximate` is `True` or
`x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
where `P(X) ~ N(0, 1)`,
if `approximate` is `False`.
Reference:
- [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
"""
return tf.nn.gelu(x, approximate)
@keras_export('keras.activations.tanh')
@tf.__internal__.dispatch.add_dispatch_support
def tanh(x):
"""Hyperbolic tangent activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.tanh(a)
>>> b.numpy()
array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor of same shape and dtype of input `x`, with tanh activation:
`tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`.
"""
return tf.tanh(x)
@keras_export('keras.activations.sigmoid')
@tf.__internal__.dispatch.add_dispatch_support
def sigmoid(x):
"""Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`.
Applies the sigmoid activation function. For small values (<-5),
`sigmoid` returns a value close to zero, and for large values (>5)
the result of the function gets close to 1.
Sigmoid is equivalent to a 2-element Softmax, where the second element is
assumed to be zero. The sigmoid function always returns a value between
0 and 1.
For example:
>>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)
>>> b = tf.keras.activations.sigmoid(a)
>>> b.numpy()
array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01,
1.0000000e+00], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor with the sigmoid activation: `1 / (1 + exp(-x))`.
"""
output = tf.sigmoid(x)
# Cache the logits to use for crossentropy loss.
output._keras_logits = x # pylint: disable=protected-access
return output
@keras_export('keras.activations.exponential')
@tf.__internal__.dispatch.add_dispatch_support
def exponential(x):
"""Exponential activation function.
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.exponential(a)
>>> b.numpy()
array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32)
Args:
x: Input tensor.
Returns:
Tensor with exponential activation: `exp(x)`.
"""
return tf.exp(x)
@keras_export('keras.activations.hard_sigmoid')
@tf.__internal__.dispatch.add_dispatch_support
def hard_sigmoid(x):
"""Hard sigmoid activation function.
A faster approximation of the sigmoid activation.
Piecewise linear approximation of the sigmoid function.
Ref: 'https://en.wikipedia.org/wiki/Hard_sigmoid'
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.hard_sigmoid(a)
>>> b.numpy()
array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32)
Args:
x: Input tensor.
Returns:
The hard sigmoid activation, defined as:
- `if x < -2.5: return 0`
- `if x > 2.5: return 1`
- `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`
"""
return backend.hard_sigmoid(x)
@keras_export('keras.activations.linear')
@tf.__internal__.dispatch.add_dispatch_support
def linear(x):
"""Linear activation function (pass-through).
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.linear(a)
>>> b.numpy()
array([-3., -1., 0., 1., 3.], dtype=float32)
Args:
x: Input tensor.
Returns:
The input, unmodified.
"""
return x
@keras_export('keras.activations.serialize')
@tf.__internal__.dispatch.add_dispatch_support
def serialize(activation):
"""Returns the string identifier of an activation function.
Args:
activation : Function object.
Returns:
String denoting the name attribute of the input function
For example:
>>> tf.keras.activations.serialize(tf.keras.activations.tanh)
'tanh'
>>> tf.keras.activations.serialize(tf.keras.activations.sigmoid)
'sigmoid'
>>> tf.keras.activations.serialize('abcd')
Traceback (most recent call last):
...
ValueError: ('Cannot serialize', 'abcd')
Raises:
ValueError: The input function is not a valid one.
"""
if (hasattr(activation, '__name__') and
activation.__name__ in _TF_ACTIVATIONS_V2):
return _TF_ACTIVATIONS_V2[activation.__name__]
return serialize_keras_object(activation)
# Add additional globals so that deserialize can find these common activation
# functions
leaky_relu = tf.nn.leaky_relu
log_softmax = tf.nn.log_softmax
relu6 = tf.nn.relu6
silu = tf.nn.silu
@keras_export('keras.activations.deserialize')
@tf.__internal__.dispatch.add_dispatch_support
def deserialize(name, custom_objects=None):
"""Returns activation function given a string identifier.
Args:
name: The name of the activation function.
custom_objects: Optional `{function_name: function_obj}`
dictionary listing user-provided activation functions.
Returns:
Corresponding activation function.
For example:
>>> tf.keras.activations.deserialize('linear')
<function linear at 0x1239596a8>
>>> tf.keras.activations.deserialize('sigmoid')
<function sigmoid at 0x123959510>
>>> tf.keras.activations.deserialize('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: `Unknown activation function` if the input string does not
denote any defined Tensorflow activation function.
"""
globs = globals()
# only replace missing activations
activation_globs = activation_layers.get_globals()
for key, val in activation_globs.items():
if key not in globs:
globs[key] = val
return deserialize_keras_object(
name,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='activation function')
@keras_export('keras.activations.get')
@tf.__internal__.dispatch.add_dispatch_support
def get(identifier):
"""Returns function.
Args:
identifier: Function or string
Returns:
Function corresponding to the input string or input function.
For example:
>>> tf.keras.activations.get('softmax')
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(tf.keras.activations.softmax)
<function softmax at 0x1222a3d90>
>>> tf.keras.activations.get(None)
<function linear at 0x1239596a8>
>>> tf.keras.activations.get(abs)
<built-in function abs>
>>> tf.keras.activations.get('abcd')
Traceback (most recent call last):
...
ValueError: Unknown activation function:abcd
Raises:
ValueError: Input is an unknown function or string, i.e., the input does
not denote any defined function.
"""
if identifier is None:
return linear
if isinstance(identifier, (str, dict)):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise TypeError(
f'Could not interpret activation function identifier: {identifier}')
|
keras-team/keras
|
keras/activations.py
|
Python
|
apache-2.0
| 18,941
|
[
"Gaussian"
] |
a2c5e27b2d1570c2bb8a93d8ac98af3d6b0f53584beca67f8720e907e3b7a42f
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create cutting planes
planes = vtk.vtkPlanes()
points = vtk.vtkPoints()
norms = vtk.vtkFloatArray()
norms.SetNumberOfComponents(3)
points.InsertPoint(0, 0.0, 0.0, 0.0)
norms.InsertTuple3(0, 0.0, 0.0, 1.0)
points.InsertPoint(1, 0.0, 0.0, 0.0)
norms.InsertTuple3(1, -1.0, 0.0, 0.0)
planes.SetPoints(points)
planes.SetNormals(norms)
# texture
texReader = vtk.vtkStructuredPointsReader()
texReader.SetFileName(VTK_DATA_ROOT + "/Data/texThres2.vtk")
texture = vtk.vtkTexture()
texture.SetInputConnection(texReader.GetOutputPort())
texture.InterpolateOff()
texture.RepeatOff()
# read motor parts...each part colored separately
#
byu = vtk.vtkBYUReader()
byu.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu.SetPartNumber(1)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(byu.GetOutputPort())
tex1 = vtk.vtkImplicitTextureCoords()
tex1.SetInputConnection(normals.GetOutputPort())
tex1.SetRFunction(planes)
# tex1.FlipTextureOn()
byuMapper = vtk.vtkDataSetMapper()
byuMapper.SetInputConnection(tex1.GetOutputPort())
byuActor = vtk.vtkActor()
byuActor.SetMapper(byuMapper)
byuActor.SetTexture(texture)
byuActor.GetProperty().SetColor(GetRGBColor('cold_grey'))
byu2 = vtk.vtkBYUReader()
byu2.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu2.SetPartNumber(2)
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(byu2.GetOutputPort())
tex2 = vtk.vtkImplicitTextureCoords()
tex2.SetInputConnection(normals2.GetOutputPort())
tex2.SetRFunction(planes)
# tex2.FlipTextureOn()
byuMapper2 = vtk.vtkDataSetMapper()
byuMapper2.SetInputConnection(tex2.GetOutputPort())
byuActor2 = vtk.vtkActor()
byuActor2.SetMapper(byuMapper2)
byuActor2.SetTexture(texture)
byuActor2.GetProperty().SetColor(GetRGBColor('peacock'))
byu3 = vtk.vtkBYUReader()
byu3.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu3.SetPartNumber(3)
triangle3 = vtk.vtkTriangleFilter()
triangle3.SetInputConnection(byu3.GetOutputPort())
normals3 = vtk.vtkPolyDataNormals()
normals3.SetInputConnection(triangle3.GetOutputPort())
tex3 = vtk.vtkImplicitTextureCoords()
tex3.SetInputConnection(normals3.GetOutputPort())
tex3.SetRFunction(planes)
# tex3.FlipTextureOn()
byuMapper3 = vtk.vtkDataSetMapper()
byuMapper3.SetInputConnection(tex3.GetOutputPort())
byuActor3 = vtk.vtkActor()
byuActor3.SetMapper(byuMapper3)
byuActor3.SetTexture(texture)
byuActor3.GetProperty().SetColor(GetRGBColor('raw_sienna'))
byu4 = vtk.vtkBYUReader()
byu4.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu4.SetPartNumber(4)
normals4 = vtk.vtkPolyDataNormals()
normals4.SetInputConnection(byu4.GetOutputPort())
tex4 = vtk.vtkImplicitTextureCoords()
tex4.SetInputConnection(normals4.GetOutputPort())
tex4.SetRFunction(planes)
# tex4.FlipTextureOn()
byuMapper4 = vtk.vtkDataSetMapper()
byuMapper4.SetInputConnection(tex4.GetOutputPort())
byuActor4 = vtk.vtkActor()
byuActor4.SetMapper(byuMapper4)
byuActor4.SetTexture(texture)
byuActor4.GetProperty().SetColor(GetRGBColor('banana'))
byu5 = vtk.vtkBYUReader()
byu5.SetGeometryFileName(VTK_DATA_ROOT + "/Data/motor.g")
byu5.SetPartNumber(5)
normals5 = vtk.vtkPolyDataNormals()
normals5.SetInputConnection(byu5.GetOutputPort())
tex5 = vtk.vtkImplicitTextureCoords()
tex5.SetInputConnection(normals5.GetOutputPort())
tex5.SetRFunction(planes)
# tex5.FlipTextureOn()
byuMapper5 = vtk.vtkDataSetMapper()
byuMapper5.SetInputConnection(tex5.GetOutputPort())
byuActor5 = vtk.vtkActor()
byuActor5.SetMapper(byuMapper5)
byuActor5.SetTexture(texture)
byuActor5.GetProperty().SetColor(GetRGBColor('peach_puff'))
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(byuActor)
ren1.AddActor(byuActor2)
ren1.AddActor(byuActor3)
byuActor3.VisibilityOff()
ren1.AddActor(byuActor4)
ren1.AddActor(byuActor5)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
camera = vtk.vtkCamera()
camera.SetFocalPoint(0.0286334, 0.0362996, 0.0379685)
camera.SetPosition(1.37067, 1.08629, -1.30349)
camera.SetViewAngle(17.673)
camera.SetClippingRange(1, 10)
camera.SetViewUp(-0.376306, -0.5085, -0.774482)
ren1.SetActiveCamera(camera)
# render the image
iren.Initialize()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/IO/Geometry/Testing/Python/motor.py
|
Python
|
bsd-3-clause
| 4,880
|
[
"VTK"
] |
25f402d6dd6cd1ac432aad05b3b71bb726cf3fdf828c00f9116653894c895109
|
# Copyright (c) 2017 Brian Ginsburg, Julie Rutherford-Fields
# This work is available under the "3-clause ('new') BSD License”.
# Please see the file COPYING in this distribution
# for license terms.
import os
from app import db
from app.models import User, Message
import nltk
# create the database and create the teergrube user
db.create_all()
teergrube = User(name = 'Tony Hope',
email = os.environ['SEND_EMAIL_ACCOUNT'],
is_teergrube = True)
db.session.add(teergrube)
db.session.commit()
# make nltk_data directory and download stopwords corpus
if not os.path.exists('nltk_data'):
os.makedirs('nltk_data')
nltk.download(info_or_id="stopwords", download_dir="nltk_data")
|
thuselem/teergrube
|
init_app.py
|
Python
|
bsd-3-clause
| 728
|
[
"Brian"
] |
800639f3d3070fe1b08b04737057fe934311c563010fd5984eb7b09342a29f90
|
import csv
from datetime import datetime
import time
DATE_FORMAT="%Y-%m-%dT%H:%M:%S"
OFFSET = float(_Offset.GetValue(0))
DATE = _Date.GetValue(0)
NDate = _Date.GetValue(1)
if (NDate):
DATE = NDate
POINTFILENAME = _PointFile.GetValue(0)
def ReadPointFile(pointFile):
#we have finnished reading the triangle file, now onto the point file
points = vtk.vtkPoints()
try:
pointCSV = csv.reader(pointFile)
#need a parsed header line
headerLine = pointCSV.next()
#add the lowercase/trimmed lines to the header list
header = list()
for key in headerLine:
header.append(key.lower().strip())
#create a dictonary of each line in the file
parseFile = csv.DictReader(pointFile,header)
#have to add a point at zero since the eps files start at point index 1
points.InsertNextPoint(0,0,0)
for line in parseFile:
try:
x = float(line['xp'])
y = float(line['yp'])
z = float(line['zp'])
except:
#ignore bad lines
pass
else:
points.InsertNextPoint(x,y,z)
except:
print 'Failed to parse point file'
return None
else:
return points
def ReadTriangleFile(triangleFile):
#attempt to read the file
cells = vtk.vtkCellArray()
properties = dict()
try:
triangleFile.next()
triangleFile.next()
triangleCSV = csv.reader(triangleFile)
#need a parsed header line
headerLine = triangleCSV.next()
#add the lowercase/trimmed lines to the header list
header = list()
for key in headerLine:
header.append(key.lower().strip())
#create a dictonary of each line in the file
parseFile = csv.DictReader(triangleFile,header)
TRI = 3 #number of points in a triangle
triKeys = set(['pid1', 'pid2', 'pid3']) #keys used in the csv to be point id's
#keys for time component of the file
startKey = "start"
endKey = "duration"
for line in parseFile:
#add the cell for that line, plus the properties
cells.InsertNextCell(TRI)
for key in line.keys():
if (key in triKeys):
p = int ( line[ key ] )
cells.InsertCellPoint(p)
elif startKey == key:
#add the start and end value properties
addTimeValues(properties,startKey,endKey,line[startKey],line[endKey])
elif not (endKey == key): #we need to ignore the end key, as it is handled in startkey
addProperty(properties,key, line[key])
except ValueError:
print 'failed to load triangle File'
return None,None
else:
return cells,properties
def addTimeValues(properties,startKey,endKey,startValue,endValue):
addProperty(properties,startKey,startValue)
#endValue is in seconds, divide by the offset to get the number of timeblocks
duration = float(endValue)/OFFSET
addProperty(properties,endKey, duration)
#I now need to use the DATE and the start and end dates to add a property
#that is in unix seconds that is equal to the start and end absolute times
absStart = "JulianTime Start"
absEnd = "JulianTime End"
startOffset = float(startValue) * OFFSET
#get the start date as a datetime object
startDate = datetime.strptime(DATE,DATE_FORMAT)
#need to make seconds equal to the absolute start time + the stopes start offset
seconds = startDate.second + startOffset
julianStart = findJulianTime(startDate.year,startDate.month, startDate.day, startDate.hour, startDate.minute, seconds)
seconds += float(endValue)
julianEnd = findJulianTime(startDate.year,startDate.month, startDate.day, startDate.hour, startDate.minute, seconds)
addProperty(properties, absStart, julianStart)
addProperty(properties, absEnd, julianEnd)
def findJulianTime(year,month,day,hour,min,sec):
UT=hour+min/60+sec/3600
total_seconds=hour*3600+min*60+sec
fracday=total_seconds/86400
# print dd,month,year, hour,min,sec, UT
if (100*year+month-190002.5)>0:
sig=1
else:
sig=-1
JD = 367*year - int(7*(year+int((month+9)/12))/4) + int(275*month/9) + day + 1721013.5 + UT/24 - 0.5*sig +0.5
return JD
def addProperty(properties,key,value):
if not (key in properties):
#property does not exist, add it
try:
prop = float(value)
except:
#has to be a string
prop = value
properties[key]= vtk.vtkStringArray()
else:
#has to be a float
properties[key] = vtk.vtkDoubleArray()
#set the name
properties[key].SetName(key)
properties[key].InsertNextValue(prop)
else:
try:
prop = float(value)
except:
prop = value
properties[key].InsertNextValue(prop)
#get output reference
output = self.GetPolyDataOutput()
#grab files
try:
triangleFile = file( FILENAME )
pointFile = file( POINTFILENAME )
except:
print "failed to load data, since one of the paths was incorrect"
else:
#try to load the Triangle file
cells,properties = ReadTriangleFile(triangleFile)
points = ReadPointFile(pointFile)
if (cells and points):
#also read properties in
if (properties):
for prop in properties.values():
output.GetCellData().AddArray(prop)
output.SetPoints(points)
output.SetPolys(cells)
triangleFile.close()
pointFile.close()
|
sherizadeh/PVGPlugins
|
Readers/EPSReader/EPSRequestData.py
|
Python
|
bsd-2-clause
| 5,366
|
[
"VTK"
] |
ad0713adf8a8d4dda07e43d3f59ab7aafcff9342ee0754013db3a49448840757
|
"""
@name: Modules/Core/occupancy.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2020-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Feb 2, 2020
@summary:
Mode: vacation, Away, Home
Guests: True
Sleeping: False
"""
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/occupancy.py
|
Python
|
mit
| 296
|
[
"Brian"
] |
eaadf2f345d4f8ec8b8994013c13aad5ab42bde5ddb7bc5f347bd12aa435ec0d
|
#!/usr/bin/env python
from __future__ import unicode_literals, division, print_function, absolute_import
import re
import os
import sys
from abirules_tools import find_abinit_toplevel_directory
re_markers = re.compile("^(<<<<<<< TREE|=======|>>>>>>> MERGE-SOURCE)$")
re_fbktop = re.compile("fallbacks$")
re_fbkdir = re.compile("(exports|sources|stamps)")
re_tmpdir = re.compile("^tmp")
re_tmpfile = re.compile("\.(orig|rej)$")
re_rstfile = re.compile("\.rst$")
# TODO: Should look at ..gitignore
exclude_exts = set([
".gz", ".tgz", ".png", ".nc", ".jar", ".xcf", ".pyc",
".gif", ".pct", ".jpg", ".jpeg", ".gif", ".pdf", ".svg",
".a", ".o", ".mod", ".cpkl", ".pickle", ".tar",
".swp", ".swo", ".odt",
])
exclude_bins = set([
"abinit", "anaddb", "mrgddb", "aim", "fftprof", "mrgdv", "mrgddb", "mrggkk", "ujdet",
"band2eps", "abitk", "cut3d", "fold2Bloch", "conducti", "ioprof", "lapackprof",
"macroave", "optic", "vdw_kernelgen", "vdw_kernelgen", "mrgscr",
])
def check_item(item):
"True if item has to be analyzed."
if re_tmpfile.search(item): return False
if re_rstfile.search(item): return False
if item in exclude_bins: return False
# check extension
_, ext = os.path.splitext(item)
if ext and ext.lower() in exclude_exts: return False
return True
def main():
retval = 0
top = find_abinit_toplevel_directory()
assert os.path.exists(top)
for root, dirs, files in os.walk(top):
# Ignore Makefiles
if "Makefile.am" in files: files.remove("Makefile.am")
if "Makefile.in" in files: files.remove("Makefile.in")
if "Makefile" in files: files.remove("Makefile")
# Ignore Autotools subdirs
if "autom4te.cache" in dirs: dirs.remove("autom4te.cache")
# Ignore temporary dirs
garb_dirs = [item for item in dirs if re_tmpdir.match(item)]
for d in garb_dirs: dirs.remove(d)
# Ignore installed fallbacks
if re_fbktop.search(root):
garb_dirs = [item for item in dirs if re_fbkdir.match(item)]
for d in garb_dirs: dirs.remove(d)
# Display conflict markers found
for item in files:
path = os.path.join(root, item)
if not check_item(item): continue
try:
if sys.version_info >= (3, 0):
with open(os.path.join(root, item), "rt", encoding="ISO-8859-1") as fh:
chk_data = fh.readlines()
else:
with open(path, "r") as fh:
chk_data = fh.readlines()
chk_stat = False
for line in chk_data:
if re_markers.match(line):
chk_stat = True
retval += 1
break
if chk_stat:
sys.stderr.write("Found conflict markers in:\n" % path)
except Exception as exc:
retval += 1
sys.stderr.write("Exception while testing: %s\n%s\n" % (path, str(exc)))
return retval
if __name__ == "__main__":
sys.exit(main())
|
abinit/abinit
|
abichecks/scripts/check-conflict-markers.py
|
Python
|
gpl-3.0
| 2,893
|
[
"ABINIT"
] |
3512b29587f23da1e9294caa9b706c838bb10bfbd14e3e7f1b92e5224f247e78
|
#! /usr/bin/local/python
# -*- coding: utf-8 -*-
import sys
import os
import math
import pysam
import breakmer.utils as utils
__author__ = "Ryan Abo"
__copyright__ = "Copyright 2015, Ryan Abo"
__email__ = "ryanabo@gmail.com"
__license__ = "MIT"
class FilterValues:
"""
"""
def __init__(self):
self.maxEventSize = None
self.realignFreq = None
self.brkptCoverages = None
self.flankMatchPercents = None
self.minSegmentLen = None
self.minBrkptKmers = None
self.seqComplexity = None
self.startEndMissingQueryCoverage = None
self.missingQueryCoverage = None
self.maxSegmentOverlap = None
# self.maxMeanCoverage = None
self.nReadStrands = None
self.maxRealignmentGap = None
self.deletedSeqs = None
self.insertedSeqs = None
def set_indel_values(self, blatResult, brkptCoverages):
""" """
self.realignFreq = blatResult.alignFreq
self.maxEventSize = blatResult.indel_maxevent_size[0]
self.deletedSeqs = blatResult.get_indel_seqs('del')
self.insertedSeqs = blatResult.get_indel_seqs('ins')
self.brkptCoverages = [min(brkptCoverages), max(brkptCoverages)]
self.flankMatchPercents = []
for flankMatch in blatResult.indel_flank_match:
self.flankMatchPercents.append(round((float(flankMatch) / float(blatResult.get_seq_size('query'))) * 100, 2))
def set_trl_values(self, svEvent):
""" """
blatResult = svEvent.blatResultsSorted[0][0]
breakpoints = svEvent.brkpts
self.minSegmentLen = blatResult.get_nmatch_total()
# Set the min to be the surrounding area of breakpoints, and max to be the direct breakpoints
self.brkptCoverages = [min(breakpoints.counts['n']), max(breakpoints.counts['d'])]
self.minBrkptKmers = min(breakpoints.kmers)
# Sequence complexity of the shortest blat aligned sequence
self.seqComplexity = svEvent.get_seq_complexity()
self.startEndMissingQueryCoverage = svEvent.get_startend_missing_query_coverage()
self.missingQueryCoverage = svEvent.get_missing_query_coverage()
self.maxSegmentOverlap = max(blatResult.seg_overlap)
self.nReadStrands = svEvent.check_read_strands()
self.maxRealignmentGap = max(blatResult.gaps.get_gap_sizes())
# Use this value to determine the uniqueness of the realignment
self.realignFreq = svEvent.get_realign_freq()
def set_rearr_values(self, svEvent):
""" """
breakpoints = svEvent.brkpts
blatResult = svEvent.blatResultsSorted[0][0]
self.brkptCoverages = [min(breakpoints.counts['n']), max(breakpoints.counts['d'])]
self.minBrkptKmers = min(breakpoints.kmers)
self.minSegmentLen = blatResult.get_nmatch_total()
self.missingQueryCoverage = svEvent.get_missing_query_coverage()
self.maxSegmentOverlap = max(blatResult.seg_overlap)
self.realignFreq = svEvent.get_realign_freq()
def get_formatted_output_values(self, svType, svSubtype):
""" """
outputValues = {}
if svType == 'indel':
outputValues['maxeventSize'] = self.maxEventSize
outputValues['realignFreq'] = self.realignFreq
# Store the minimum value.
outputValues['breakpointCoverages'] = self.brkptCoverages[0]
outputValues['minSeqEdgeRealignmentPercent'] = min(self.flankMatchPercents)
outputValues['deletedSequences'] = self.deletedSeqs
outputValues['insertedSequences'] = self.insertedSeqs
elif svType == 'rearrangement':
outputValues['minBrkptKmers'] = self.minBrkptKmers
outputValues['minSegmentLen'] = self.minSegmentLen
outputValues['missingQueryCoverage'] = self.missingQueryCoverage
outputValues['maxSegmentOverlap'] = self.maxSegmentOverlap
outputValues['realignFreq'] = ",".join([str(x) for x in self.realignFreq])
if svSubtype == 'trl':
outputValues['breakpointCoverages'] = ",".join([str(x) for x in self.brkptCoverages])
outputValues['sequenceComplexity'] = self.seqComplexity
outputValues['startEndMissingQueryCoverage'] = self.startEndMissingQueryCoverage
outputValues['nReadStrands'] = self.nReadStrands
outputValues['maxRealignmentGapSize'] = self.maxRealignmentGap
outputList = []
for key, value in outputValues.items():
outputList.append(key + '=' + str(value))
return ';'.join(outputList)
class SVResult:
"""
"""
def __init__(self):
self.loggingName = 'breakmer.caller.sv_caller'
self.fullBreakpointStr = None
self.targetBreakpointStr = None
self.alignCigar = None
self.totalMismatches = None
self.strands = None
self.totalMatching = None
self.svType = ''
self.svSubtype = None
self.splitReadCount = None
self.nKmers = None
self.discReadCount = None
self.contigId = None
self.contigSeq = None
self.targetName = None
self.breakpointCoverageDepth = None
self.description = None
self.genes = None
self.repeatOverlapPercent = None
self.realignmentUniqueness = None
self.filtered = {'status': False, 'reason': []}
self.filterValues = FilterValues()
def format_indel_values(self, svEvent):
"""
"""
self.targetName = svEvent.contig.get_target_name()
self.contigSeq = svEvent.get_contig_seq()
self.contigId = svEvent.get_contig_id()
blatResult = svEvent.blatResults[0][1]
self.genes = blatResult.get_gene_anno()
self.repeatOverlapPercent = 0.0
self.totalMatching = blatResult.get_nmatch_total()
self.realignmentUniqueness = blatResult.alignFreq
self.totalMismatches = blatResult.get_nmatches('mismatch')
self.strands = blatResult.strand
self.fullBreakpointStr = svEvent.get_brkpt_str('target')
self.targetBreakpointStr = svEvent.get_brkpt_str('target')
self.breakpointCoverageDepth = svEvent.get_brkpt_depths()
# List of insertion or deletion sizes that coorespond with the breakpoints
self.description = blatResult.indel_sizes
self.alignCigar = blatResult.cigar
self.svType = 'indel'
contigCountTracker = svEvent.contig.get_contig_count_tracker()
contigBrkpts = []
for x in blatResult.breakpts.contigBreakpoints:
for bp in x:
contigBrkpts.append(bp)
self.splitReadCount = [contigCountTracker.get_counts(x, x, 'indel') for x in contigBrkpts]
self.filterValues.set_indel_values(blatResult, self.splitReadCount)
def format_rearrangement_values(self, svEvent):
""" """
utils.log(self.loggingName, 'info', 'Resolving SVs call from blat results')
# Sort the stored blat results by the number of matches to the reference sequence.
blatResSorted = sorted(svEvent.blatResults, key=lambda x: x[0])
resultValid = {'valid': True, 'repeatValid': True}
maxRepeat = 0.0
self.totalMatching = []
self.repeatOverlapPercent = []
self.realignmentUniqueness = []
self.genes = []
self.alignCigar = []
self.strands = []
self.totalMismatches = []
for i, blatResultTuple in enumerate(blatResSorted):
blatResult = blatResultTuple[1]
resultValid['valid'] = resultValid['valid'] and blatResult.valid
maxRepeat = max(maxRepeat, blatResult.repeat_overlap)
self.repeatOverlapPercent.append(blatResult.repeat_overlap)
self.realignmentUniqueness.append(blatResult.alignFreq)
self.totalMatching.append(blatResult.get_nmatch_total())
self.genes.append(blatResult.get_gene_anno())
self.alignCigar.append(blatResult.cigar)
self.strands.append(blatResult.strand)
self.totalMismatches.append(blatResult.get_nmatches('mismatch'))
svEvent.brkpts.update_brkpt_info(blatResult, i, i == (len(blatResSorted) - 1))
# Sort the blatResultsSorted list by the lowest matching result to the highest matching result
svEvent.blatResultsSorted = sorted(svEvent.blatResultsSorted, key=lambda x: x[1])
if svEvent.brkpts.diff_chr():
# translocation event
# print 'sv_caller.py format_rearrangement_values(), set trl values', svEvent.contig.meta.id, svEvent.contig.seq
svEvent.set_brkpt_counts('trl')
self.discReadCount = svEvent.get_disc_read_count()
self.svType = 'rearrangement'
self.svSubtype = 'trl'
self.filterValues.set_trl_values(svEvent)
else:
svEvent.set_brkpt_counts('rearr')
self.svType, self.svSubtype, self.discReadCount = svEvent.define_rearr()
self.genes = list(set(self.genes))
self.description = svEvent.rearrDesc
self.filterValues.set_rearr_values(svEvent)
self.realignmentUniqueness = self.filterValues.realignFreq
self.targetName = svEvent.contig.get_target_name()
self.fullBreakpointStr = svEvent.get_brkpt_str('all')
self.targetBreakpointStr = svEvent.get_brkpt_str('target')
self.breakpointCoverageDepth = svEvent.get_brkpt_depths()
self.splitReadCount = svEvent.get_splitread_count()
self.contigSeq = svEvent.get_contig_seq()
self.contigId = svEvent.get_contig_id()
def set_filtered(self, filterReason):
""" """
self.filtered['status'] = True
self.filtered['reason'].append(filterReason)
def get_old_formatted_output_values(self):
""" """
headerStr = ['genes',
'target_breakpoints',
'align_cigar',
'mismatches',
'strands',
'rep_overlap_segment_len',
'sv_type',
'split_read_count',
'nkmers',
'disc_read_count',
'breakpoint_coverages',
'contig_id',
'contig_seq'
]
brkptStr = ','.join([str(x) for x in item])
if self.svType == 'indel':
brkptStr += ' (' + ','.join([str(x) for x in self.descript]) + ')'
repOverlap_segLen_hitFreq = []
for i in self.totalMatching:
repOverlap_segLen_hitFreq.append('0.0:' + str(matchLen) + ':0.0')
nkmers = '0'
outList = [self.targetName,
self.brkptStr,
self.alignCigar,
self.totalMismatches,
self.strands,
repOverlap_segLen_hitFreq,
self.svType,
self.splitReadCount,
nkmers,
self.discReadCount,
self.breakpointCoverageDepth,
self.contigId,
self.contigSeq,
]
outListStr = []
for item in outList:
if not isinstance(item, list):
outListStr.append(str(item))
else:
outListStr.append(','.join([str(x) for x in item]))
formattedFilterValsStr = self.filterValues.get_formatted_output_values(self.svType, self.svSubtype)
outListStr.append(formattedFilterValsStr)
return ('\t'.join(headerStr), '\t'.join(outListStr))
def get_formatted_output_values(self):
""" """
headerStr = ['Target_Name',
'SV_type',
'SV_subtype',
'Description',
'All_genomic_breakpoints',
'Target_genomic_breakpoints',
'Split_read_counts',
'Discordant_read_counts',
'Read_depth_at_genomic_breakpoints',
'Align_cigar',
'Strands',
'Total_mismatches',
'Total_matching',
'Realignment_uniqueness',
'Contig_ID',
'Contig_length',
'Contig_sequence',
'Filtered',
'Filtered_reason',
'Filter_values'
]
outList = [self.targetName,
self.svType,
self.svSubtype,
self.description,
self.fullBreakpointStr,
self.targetBreakpointStr,
self.splitReadCount,
self.discReadCount,
self.breakpointCoverageDepth,
self.alignCigar,
self.strands,
self.totalMismatches,
self.totalMatching,
self.realignmentUniqueness,
self.contigId,
len(self.contigSeq),
self.contigSeq,
self.filtered['status'],
','.join(self.filtered['reason'])
]
outListStr = []
for item in outList:
if not isinstance(item, list):
outListStr.append(str(item))
else:
outListStr.append(','.join([str(x) for x in item]))
formattedFilterValsStr = self.filterValues.get_formatted_output_values(self.svType, self.svSubtype)
outListStr.append(formattedFilterValsStr)
return ('\t'.join(headerStr), '\t'.join(outListStr))
def is_filtered(self):
""" """
return self.filtered['status']
class SVBreakpoints:
def __init__(self):
self.loggingName = 'breakmer.caller.sv_caller'
self.t = {'target': None, 'other': None}
self.formatted = []
self.r = []
self.q = [[0, 0], []]
self.chrs = []
self.brkptStr = []
self.tcoords = []
self.qcoords = []
self.f = []
self.counts = {'n': [], 'd': [], 'b': []}
self.kmers = []
# Standard format for storing genomic breakpoints for outputtting rsults
# List of tuples containing ('chr#', bp1, bp2), there will be multiple bp for deletions and
# only one bp for insertions or rearrangment breakpoints.
self.genomicBrkpts = {'target': [], 'other': [], 'all': []}
def update_brkpt_info(self, br, i, last_iter):
"""Infer the breakpoint information from the blat result for rearrangments.
"""
chrom = 'chr' + br.get_seq_name('ref')
ts, te = br.get_coords('ref')
qs, qe = br.get_coords('query')
targetKey = 'target' if br.in_target else 'other'
self.chrs.append(br.get_seq_name('ref'))
self.tcoords.append((ts, te))
self.qcoords.append((qs, qe))
tbrkpt = []
filt_rep_start = None
if i == 0:
self.q[0] = [max(0, qs - 1), qe]
self.q[1].append([qe, qe - self.q[0][0], None])
tbrkpt = [te]
filt_rep_start = br.filter_reps_edges[0]
if br.strand == '-':
tbrkpt = [ts]
filt_rep_start = br.filter_reps_edges[0]
self.genomicBrkpts[targetKey].append((chrom, tbrkpt[0]))
self.genomicBrkpts['all'].append((chrom, tbrkpt[0]))
br.set_sv_brkpt((chrom, tbrkpt[0]), 'rearrangement', targetKey)
elif last_iter:
self.q[1][-1][2] = qe - self.q[1][-1][0]
self.q[1].append([qs, qs - self.q[0][0], qe - qs])
tbrkpt = [ts]
filt_rep_start = br.filter_reps_edges[0]
if br.strand == '-':
tbrkpt = [te]
filt_rep_start = br.filter_reps_edges[1]
self.genomicBrkpts[targetKey].append((chrom, tbrkpt[0]))
self.genomicBrkpts['all'].append((chrom, tbrkpt[0]))
br.set_sv_brkpt((chrom, tbrkpt[0]), 'rearrangement', targetKey)
else:
self.q[1][-1][2] = qe - self.q[1][-1][1]
self.q[1].append([qs, qs - self.q[0][0], qe - qs])
self.q[1].append([qe, qe - qs, None])
self.q[0] = [qs, qe]
tbrkpt = [ts, te]
self.genomicBrkpts[targetKey].append((chrom, ts, te))
self.genomicBrkpts['all'].append((chrom, ts, te))
if br.strand == '+':
br.set_sv_brkpt((chrom, ts, te), 'rearrangement', targetKey)
if br.strand == '-':
filt_rep_start = br.filter_reps_edges[1]
tbrkpt = [te, ts]
self.genomicBrkpts[targetKey].append((chrom, te, ts))
self.genomicBrkpts['all'].append((chrom, te, ts))
br.set_sv_brkpt((chrom, te, ts), 'rearrangement', targetKey)
self.brkptStr.append('chr' + str(br.get_seq_name('ref')) + ":" + "-".join([str(x) for x in tbrkpt]))
self.r.extend(tbrkpt)
self.f.append(filt_rep_start)
self.t[targetKey] = (br.get_seq_name('ref'), tbrkpt[0])
self.formatted.append('chr' + str(br.get_seq_name('ref')) + ":" + "-".join([str(x) for x in tbrkpt]))
def set_indel_brkpts(self, blatResult):
""" """
# List of tuples for indel breakpoints parsed from the blat result ('chr#', bp1, bp2)
self.genomicBrkpts['target'] = blatResult.get_genomic_brkpts()
for brkpt in self.genomicBrkpts['target']:
blatResult.set_sv_brkpt(brkpt, 'indel', 'target')
def diff_chr(self):
"""Determine if the stored realignment results are on multiple chromosomes - indicating a
translocation event.
"""
# print 'Rearr chrs', self.chrs, len(set(self.chrs))
if len(set(self.chrs)) == 1:
return False
else:
return True
def get_target_brkpt(self, key):
""" """
return self.genomicBrkpts['target'] # target[key]
def get_brkpt_str(self, targetKey):
""" """
if targetKey is None:
brkptStr = ','.join(self.brkptStr) # self.genomicBrkpts['all']
# for key in self.genomicBrkpts:
# outStr = self.get_brkpt_str(key)
# if brkptStr == '':
# brkptStr = outStr
# elif outStr != '':
# brkptStr += ',' + outStr
return brkptStr
else:
brkptStr = []
for genomicBrkpts in self.genomicBrkpts[targetKey]:
chrom = genomicBrkpts[0]
bps = genomicBrkpts[1:]
brkptStr.append(chrom + ':' + '-'.join([str(x) for x in bps]))
return ','.join(brkptStr)
def get_brkpt_depths(self, sampleBamFn):
""" """
depths = []
bamfile = pysam.Samfile(sampleBamFn, 'rb')
for genomicBrkpt in self.genomicBrkpts['all']:
chrom = genomicBrkpt[0].strip('chr')
bps = genomicBrkpt[1:]
for bp in bps:
alignedDepth = 0
alignedReads = bamfile.fetch(str(chrom), int(bp), int(bp) + 1)
for alignedRead in alignedReads:
if alignedRead.is_duplicate or alignedRead.is_qcfail or alignedRead.is_unmapped or alignedRead.mapq < 10:
continue
alignedDepth += 1
depths.append(alignedDepth)
return depths
def get_splitread_count(self):
""" """
return self.counts['b']
def set_counts(self, svType, contig):
""" """
contigCountTracker = contig.get_contig_count_tracker()
# print 'SV Breakpoint object set_counts(), self.q', self.q
# print 'self.tcoords', self.tcoords
# print 'self.qcoords', self.qcoords
# print 'self.counts', self.counts
for qb in self.q[1]:
left_idx = qb[0] - min(qb[1], 5)
right_idx = qb[0] + min(qb[2], 5)
# print 'qb', qb
# print 'left idx', left_idx
# print 'right idx', right_idx
bc = contigCountTracker.get_counts(left_idx, right_idx, svType)
self.counts['n'].append(min(bc))
self.counts['d'].append(min(contigCountTracker.get_counts((qb[0] - 1), (qb[0] + 1), svType)))
self.counts['b'].append(contigCountTracker.get_counts(qb[0], qb[0], svType))
self.kmers.append(contig.get_kmer_locs()[qb[0]])
utils.log(self.loggingName, 'debug', 'Read count around breakpoint %d : %s' % (qb[0], ",".join([str(x) for x in bc])))
# print 'Stored counts', self.counts
utils.log(self.loggingName, 'debug', 'Kmer count around breakpoints %s' % (",".join([str(x) for x in self.kmers])))
class SVEvent:
def __init__(self, blatResult, contig, svType):
self.loggingName = 'breakmer.caller.sv_caller'
self.svType = svType
self.svSubtype = ''
self.events = []
self.blatResults = []
self.blatResultsSorted = []
self.annotated = False
self.failed_annotation = False
self.qlen = 0
self.nmatch = 0
self.in_target = False
self.contig = contig
self.valid = True
self.in_rep = True
self.querySize = None
self.queryCoverage = [0] * len(contig.seq)
self.brkpts = SVBreakpoints()
self.rearrDesc = None
self.resultValues = SVResult()
self.add(blatResult)
def add(self, blatResult):
queryStartCoord = blatResult.alignVals.get_coords('query', 0)
queryEndCoord = blatResult.alignVals.get_coords('query', 1)
self.blatResults.append((queryStartCoord, blatResult))
# Add the number of hits to the query region
for i in range(queryStartCoord, queryEndCoord):
self.queryCoverage[i] += 1
if not self.querySize:
self.querySize = blatResult.get_seq_size('query')
self.qlen += blatResult.get_query_span()
self.nmatch += blatResult.get_nmatch_total()
self.in_target = self.in_target or blatResult.in_target
self.in_rep = self.in_rep and (blatResult.repeat_overlap > 75.0)
self.valid = self.valid and blatResult.valid
self.blatResultsSorted.append((blatResult, blatResult.get_nmatch_total()))
def result_valid(self):
valid = False
if (len(self.blatResults) > 1) and self.in_target:
valid = True
return valid
def check_annotated(self):
""" """
return self.annotated and not self.failed_annotation
def has_annotations(self):
""" """
return self.annotated
def get_genomic_brkpts(self):
""" """
return self.brkpts.genomicBrkpts
def check_previous_add(self, br):
ncoords = br.get_coords('query')
prev_br, prev_nmatch = self.blatResultsSorted[-1]
prev_coords = prev_br.get_coords('query')
if ncoords[0] == prev_coords[0] and ncoords[1] == prev_coords[1]:
n_nmatch = br.get_nmatch_total()
if abs(prev_nmatch - n_nmatch) < 10:
if not prev_br.in_target and br.in_target:
self.blatResultsSorted[-1] = (br, n_nmatch)
self.blatResults[-1] = (ncoords[0], br)
self.in_target = True
def format_indel_values(self):
"""
"""
self.brkpts.set_indel_brkpts(self.blatResults[0][1])
self.resultValues.format_indel_values(self)
def format_rearr_values(self):
"""
"""
self.resultValues.format_rearrangement_values(self)
def get_disc_read_count(self):
"""Get the number of discordant read pairs that contribute evidence to a detected translocation
event between a target region and another genomic location.
It calls the check_inter_readcounts in breakmer.processor.bam_handler module with the target and
'other' breakpoints.
Args:
None
Returns:
discReadCount (int): The number of discordant read pairs that support a detected event with
specified breakpoints.
This needs to deal with the situation below where the are more than two realignment results.
In this general scenario, the target breakpoint nearest the non-target breakpoint needs to be
passed to the check_inter_readcounts function.
Example 1:
[blatResult1 (target), blatResult2 (non-target)] - most common scenario.
Example 2:
[blatResult1 (target), blatResult2 (target), blatResult3 (non-target)]
"""
# Sort the blat results by lowest to highest query coordinate value.
querySortedResults = sorted(self.blatResults, key=lambda x: x[0])
inTarget = [None, None] # Tracks the in_target state of the last realignment result and the breakpoint of that result.
targetBrkpt = None # Track the target breakpoint nearest the non-target breakpoint result.
# Iterate through realignment results starting with the lowest query coordinate hit.
# If there is a state change for in_target status between the last result and the current result,
# then store the in_target breakpoint.
for resultTuple in querySortedResults:
result = resultTuple[1]
if inTarget[0] is None:
inTarget = [result.in_target, result.tend()]
else:
if result.in_target != inTarget[0]:
targetBrkpt = inTarget[1]
if result.in_target:
targetBrkpt = result.tstart()
break
varReads = self.contig.get_var_reads('sv')
discReadCount = 0
# print self.get_genomic_brkpts()['target'][0]
targetBrkptValues = self.get_genomic_brkpts()['target'][0]
discReadCount = varReads.check_inter_readcounts(targetBrkptValues[0], targetBrkpt, self.get_genomic_brkpts()['other'])
return discReadCount
def get_brkpt_str(self, targetKey=None):
""" """
return self.brkpts.get_brkpt_str(targetKey)
def get_brkpt_depths(self):
"""
"""
return self.brkpts.get_brkpt_depths(self.contig.get_sample_bam_fn())
def get_splitread_count(self):
""" """
return self.brkpts.get_splitread_count()
def set_filtered(self, filterReason):
""" """
self.resultValues.set_filtered(filterReason)
def get_missing_query_coverage(self):
""" """
return len(filter(lambda y: y, map(lambda x: x == 0, self.queryCoverage)))
def get_formatted_output_values(self):
""" """
return self.resultValues.get_formatted_output_values()
def get_contig_seq(self):
""" """
return self.contig.seq
def get_contig_id(self):
""" """
return self.contig.get_id()
def set_brkpt_counts(self, svType):
""" """
self.brkpts.set_counts(svType, self.contig)
def check_overlap(self, coord1, coord2):
contained = False
if coord1[0] >= coord2[0] and coord1[1] <= coord2[1]:
contained = True
elif coord2[0] >= coord1[0] and coord2[1] <= coord1[1]:
contained = True
return contained
def which_rearr(self, varReads, tcoords, qcoords, strands, brkpts):
rearrValues = {'discReadCount': None, 'svType': 'rearrangement', 'svSubType': None, 'hit': False}
if not self.check_overlap(tcoords[0], tcoords[1]):
utils.log(self.loggingName, 'debug', 'Checking rearrangement svType, strand1 %s, strand2 %s, breakpt1 %d, breakpt %d' % (strands[0], strands[1], brkpts[0], brkpts[1]))
if (strands[0] != strands[1]): # and (brkpts[0] < brkpts[1]):
# Inversion
# Get discordantly mapped read-pairs
utils.log(self.loggingName, 'debug', 'Inversion event identified.')
rearrValues['hit'] = True
rearrValues['svSubType'] = 'inversion'
rearrValues['discReadCount'] = varReads.check_inv_readcounts(brkpts)
elif (strands[0] == strands[1]):
tgap = brkpts[1] - brkpts[0]
qgap = qcoords[1][0] - qcoords[0][1]
if tgap < 0:
utils.log(self.loggingName, 'debug', 'Tandem duplication event identified.')
rearrValues['hit'] = True
rearrValues['svSubType'] = 'tandem_dup'
rearrValues['discReadCount'] = varReads.check_td_readcounts(brkpts)
elif tgap > qgap:
# Gapped deletion from Blast result
utils.log(self.loggingName, 'debug', 'Deletion event identified.')
rearrValues['hit'] = True
rearrValues['svType'] = 'indel'
rearrValues['indelSize'] = 'D' + str(tgap)
else:
# Gapped insertion from Blast result
utils.log(self.loggingName, 'debug', 'Insertion event identified.')
rearrValues['hit'] = True
rearrValues['svType'] = 'indel'
rearrValues['indelSize'] = 'I' + str(qgap)
return rearrValues
def define_rearr(self):
""" """
varReads = self.contig.get_var_reads('sv')
strands = self.resultValues.strands
brkpts = self.brkpts.r
tcoords = self.brkpts.tcoords
qcoords = self.brkpts.qcoords
svType = 'rearrangement'
svSubType = None
rs = 0
hit = False
rearrHits = {}
for i in range(1, len(self.blatResults)):
vals = self.which_rearr(varReads, tcoords[(i - 1):(i + 1)], qcoords[(i - 1):(i + 1)], strands[(i - 1):(i + 1)], brkpts[(i - 1):(i + 1)])
if vals['hit']:
if vals['svType'] not in rearrHits:
rearrHits[vals['svType']] = []
rearrHits[vals['svType']].append(vals)
if 'rearrangement' not in rearrHits:
utils.log(self.loggingName, 'debug', 'Error in realignment parsing. Indel found without rearrangement event.')
rearrHit = False
for rearr in rearrHits:
for i, rr in enumerate(rearrHits[rearr]):
if rearr == 'rearrangement':
if not rearrHit:
svSubType = rearrHits[rearr][i]['svSubType']
rs = int(rearrHits[rearr][i]['discReadCount'])
rearrHit = True
else:
svSubType = None
if self.rearrDesc is None:
self.rearrDesc = [svSubType]
self.rearrDesc.append(rearrHits[rearr][i]['svSubType'])
else:
if self.rearrDesc is None:
self.rearrDesc = []
self.rearrDesc.append(rearrHits[rearr][i]['indelSize'])
if svSubType is None:
utils.log(self.loggingName, 'debug', 'Not inversion or tandem dup, checking for odd read pairs around breakpoints')
rs = varReads.check_other_readcounts(brkpts)
return svType, svSubType, rs
def get_max_meanCoverage(self):
"""Return the highest mean hit frequency among all blat results stored.
"""
maxAlignFreq = 0
for blatResult, nBasesAligned in self.blatResultsSorted:
if int(blatResult.alignFreq) > int(maxAlignFreq):
maxAlignFreq = int(blatResult.alignFreq)
def get_realign_freq(self):
"""
"""
realignFreqs = []
for blatResult, nBasesAligned in self.blatResultsSorted:
realignFreqs.append(int(blatResult.alignFreq))
return realignFreqs
def check_read_strands(self):
"""
"""
same_strand = False
strands = []
for read in self.contig.reads:
strand = read.id.split("/")[1]
strands.append(strand)
if len(set(strands)) == 1:
same_strand = True
utils.log(self.loggingName, 'debug', 'Checking read strands for contig reads %s' % (",".join([read.id for read in self.contig.reads])))
utils.log(self.loggingName, 'debug', 'Reads are on same strand: %r' % same_strand)
return len(set(strands))
def get_seq_complexity(self):
"""Get the 3-mer complexity of the shortest aligned blat sequence.
"""
blatResult, nBasesAligned = self.blatResultsSorted[0]
alignedSeq = self.contig.seq[blatResult.qstart():blatResult.qend()]
merSize = 3
utils.log(self.loggingName, 'debug', 'Checking sequence complexity of blat result segment %s using %d-mers' % (alignedSeq, merSize))
nmers = {}
totalMersPossible = len(alignedSeq) - 2
for i in range(len(alignedSeq) - (merSize - 1)):
nmers[str(alignedSeq[i:i + merSize]).upper()] = True
complexity = round((float(len(nmers)) / float(totalMersPossible)) * 100, 4)
utils.log(self.loggingName, 'debug', 'Complexity measure %f, based on %d unique %d-mers observed out of a total of %d %d-mers possible' % (complexity, len(nmers), merSize, totalMersPossible, merSize))
return complexity
def get_startend_missing_query_coverage(self):
"""Calculate the percentage of the contig sequence that is not realigned to the reference, only examining the
beginning and end of the contig sequence.
"""
missingCov = 0
for i in self.queryCoverage:
if i == 0:
missingCov += 1
else:
break
for i in reversed(self.queryCoverage):
if i == 0:
missingCov += 1
else:
break
percentMissing = round((float(missingCov) / float(len(self.contig.seq))) * 100, 4)
utils.log(self.loggingName, 'debug', 'Calculated %f missing coverage of blat query sequence at beginning and end' % percentMissing)
return percentMissing
def is_filtered(self):
""""""
return self.resultValues.is_filtered()
def set_filtered(self, filterReason):
""" """
self.resultValues.set_filtered(filterReason)
def set_annotations(self):
""" """
self.annotated = True
def set_failed_annotation(self):
""" """
self.failed_annotation = True
class ContigCaller:
"""
"""
def __init__(self, realignment, contig, params):
self.realignment = realignment
self.contig = contig
self.params = params
self.clippedQs = []
self.svEvent = None
self.loggingName = 'breakmer.caller.sv_caller'
def call_svs(self):
""" """
if not self.realignment.has_results():
utils.log(self.loggingName, 'info', 'No blat results file exists, no calls for %s.' % self.contig.get_id())
else:
utils.log(self.loggingName, 'info', 'Making variant calls from blat results %s' % self.realignment.get_result_fn())
if self.check_indels():
self.svEvent.format_indel_values()
elif self.check_svs():
self.svEvent.format_rearr_values()
return self.svEvent
def check_indels(self):
""" """
hasIndel = False
blatResults = self.realignment.get_blat_results()
for i, blatResult in enumerate(blatResults):
if i == 0 and blatResult.check_indel(len(blatResults)):
hasIndel = True
utils.log(self.loggingName, 'info', 'Contig has indel, returning %r' % hasIndel)
self.svEvent = SVEvent(blatResult, self.contig, 'indel')
return hasIndel
else:
utils.log(self.loggingName, 'debug', 'Storing clipped blat result start %d, end %d' % (blatResult.qstart(), blatResult.qend()))
self.clippedQs.append((blatResult.qstart(), blatResult.qend(), blatResult, i))
utils.log(self.loggingName, 'info', 'Contig does not have indel, return %r' % hasIndel)
return hasIndel
def check_svs(self):
""" """
utils.log(self.loggingName, 'info', 'Checking for SVs')
gaps = [(0, self.realignment.get_qsize())]
if len(self.clippedQs) > 1:
utils.log(self.loggingName, 'debug', 'Iterating through %d clipped blat results.' % len(self.clippedQs))
mergedClip = [0, None]
for i, clippedQs in enumerate(self.clippedQs):
qs, qe, blatResult, idx = clippedQs
utils.log(self.loggingName, 'debug', 'Blat result with start %d, end %d, chrom %s' % (qs, qe, blatResult.get_seq_name('ref')))
gaps = self.iter_gaps(gaps, self.clippedQs[i], i)
if self.svEvent.qlen > mergedClip[0]:
mergedClip = [self.svEvent.qlen, self.svEvent]
self.svEvent = mergedClip[1]
else:
utils.log(self.loggingName, 'info', 'There are no more than 1 clipped blat results, not continuing with SVs calling.')
if self.svEvent and self.svEvent.result_valid():
return True
else:
self.svEvent = None
return False
def iter_gaps(self, gaps, clippedQuerySeqVals, iterIdx):
""" """
new_gaps = []
qs, qe, blatResult, idx = clippedQuerySeqVals
hit = False
for gap in gaps:
gs, ge = gap
utils.log(self.loggingName, 'debug', 'Gap coords %d, %d' % (gs, ge))
startWithinGap = (qs >= gs and qs <= ge)
endWithinGap = (qe <= ge and qe >= gs)
gapEdgeDistStart = (qs <= gs) and ((gs - qs) < 15)
gapEdgeDistEnd = (qe >= ge) and ((qe - ge) < 15)
if startWithinGap or endWithinGap or (gapEdgeDistStart and (endWithinGap or gapEdgeDistEnd)) or (gapEdgeDistEnd and (startWithinGap or gapEdgeDistStart)):
ngap = []
if qs > gs:
if (qs - 1 - gs) > 10:
ngap.append((gs, qs - 1))
if qe < ge:
if (ge - qe + 1) > 10:
ngap.append((qe + 1, ge))
if iterIdx == 0:
utils.log(self.loggingName, 'debug', 'Creating SV event from blat result with start %d, end %d' % (qs, qe))
self.svEvent = SVEvent(blatResult, self.contig, 'rearrangement')
new_gaps.extend(ngap)
hit = True
elif self.check_add_br(qs, qe, gs, ge, blatResult):
utils.log(self.loggingName, 'debug', 'Adding blat result to event')
new_gaps.extend(ngap)
self.svEvent.add(blatResult)
hit = True
else:
new_gaps.append(gap)
else:
new_gaps.append(gap)
utils.log(self.loggingName, 'debug', 'New gap coords %s' % (",".join([str(x) for x in new_gaps])))
if not hit:
self.svEvent.check_previous_add(blatResult)
return new_gaps
def check_add_br(self, qs, qe, gs, ge, blatResult):
""" """
utils.log(self.loggingName, 'info', 'Checking to add blat result with start %d, end %d' % (qs, qe))
add = False
# Calc % of segment overlaps with gap
over_perc = round((float(min(qe, ge) - max(qs, gs)) / float(qe - qs)) * 100)
# Check overlap with other aligned segments
ov_right = 0
if qe > ge:
ov_right = abs(qe - ge)
ov_left = 0
if qs < gs:
ov_left = abs(qs - gs)
blatResult.set_segment_overlap(ov_left, ov_right)
max_seg_overlap = max(ov_right, ov_left)
utils.log(self.loggingName, 'debug', 'Blat query segment overlaps gap by %f' % over_perc)
utils.log(self.loggingName, 'debug', 'Max segment overlap %f' % max_seg_overlap)
utils.log(self.loggingName, 'debug', 'Event in target %r and blat result in target %r' % (self.svEvent.in_target, blatResult.in_target))
if over_perc >= 50 and (max_seg_overlap < 15 or (blatResult.in_target and self.svEvent.in_target)):
add = True
utils.log(self.loggingName, 'debug', 'Add blat result to SV event %r' % add)
return add
|
a-bioinformatician/BreaKmer
|
breakmer/caller/sv_caller.py
|
Python
|
mit
| 40,683
|
[
"BLAST",
"pysam"
] |
560c8b49e13db1b22ab02b29fad18a67cdbae3cb8fb1f69d2da21c0c45651c07
|
import numpy as np
import scipy.ndimage.interpolation
import scipy.ndimage.filters
try:
import seaborn as sns
sns.set()
from matplotlib import pyplot as plt
except ImportError:
pass
from utils import RNG, plot_greyscale_image
from utils.dataset import load_mnist
def get_transformation(transformation_name, **params):
for k, v in globals().items():
if k.lower() == transformation_name.lower():
return v(**params)
raise ValueError("invalid transformation name '{0}'".format(transformation_name))
class RandomTransformation(object):
def __init__(self, random_seed=None):
self.random_seed = random_seed
self.rng = RNG(self.random_seed)
def __call__(self, x):
self.rng = RNG(self.random_seed)
return self._call(x)
def _call(self, x):
raise NotImplementedError()
def shift(x, shift_=(0, 0)):
y = scipy.ndimage.interpolation.shift(x, shift=shift_, mode='nearest')
y = (y - y.min()) * (x.max() - x.min()) / (y.max() - y.min()) + x.min()
return y
class RandomShift(RandomTransformation):
def __init__(self, x_shift=(0, 0), y_shift=(0, 0), random_seed=None):
self.x_shift = x_shift
self.y_shift = y_shift
super(RandomShift, self).__init__(random_seed=random_seed)
def _call(self, x):
x_shift = self.rng.randint(self.x_shift[0], self.x_shift[1] + 1)
y_shift = self.rng.randint(self.y_shift[0], self.y_shift[1] + 1)
return shift(x, shift_=(y_shift, x_shift))
def rotate(x, angle=0.):
y = scipy.ndimage.interpolation.rotate(x, angle=angle, mode='nearest', reshape=False)
y = (y - y.min()) * (x.max() - x.min()) / (y.max() - y.min()) + x.min()
return y
class RandomRotate(RandomTransformation):
def __init__(self, angle=(0., 0.), random_seed=None):
self.angle = angle
super(RandomRotate, self).__init__(random_seed=random_seed)
def _call(self, x):
angle = self.rng.uniform(self.angle[0], self.angle[1])
return rotate(x, angle=angle)
def subsample(x, pos=(0, 0), new_shape=None):
new_shape = new_shape or x.shape
y = x[pos[0]:(pos[0] + new_shape[0]), pos[1]:(pos[1] + new_shape[1])]
return np.copy(y)
class RandomSubsample(RandomTransformation):
def __init__(self, new_shape=None, random_seed=None):
self.new_shape = new_shape
super(RandomSubsample, self).__init__(random_seed=random_seed)
def _call(self, x):
new_shape = self.new_shape or x.shape
x_pos = self.rng.randint(0, x.shape[0] - new_shape[0] + 1)
y_pos = self.rng.randint(0, x.shape[1] - new_shape[1] + 1)
return subsample(x, pos=(x_pos, y_pos), new_shape=new_shape)
def gaussian(x, sigma=0.):
y = scipy.ndimage.filters.gaussian_filter(x, sigma=sigma, mode='nearest')
y = (y - y.min()) * (x.max() - x.min()) / (y.max() - y.min()) + x.min()
return y
class RandomGaussian(RandomTransformation):
def __init__(self, sigma=(0., 0.), random_seed=None):
self.sigma = sigma
super(RandomGaussian, self).__init__(random_seed=random_seed)
def _call(self, x):
sigma = self.rng.uniform(self.sigma[0], self.sigma[1])
return gaussian(x, sigma)
class Dropout(RandomTransformation):
def __init__(self, p=(0., 0.), random_seed=None):
self.p = p
super(Dropout, self).__init__(random_seed=random_seed)
def _call(self, x):
p = self.rng.uniform(self.p[0], self.p[1])
mask = self.rng.uniform(size=x.shape) > p
mask = mask * (x.max() - x.min()) + x.min()
return np.minimum(x, mask)
class RandomAugmentator(RandomTransformation):
def __init__(self, transform_shape=None, out_shape=None, random_seed=None):
self.transform_shape = transform_shape
self.out_shape = out_shape
self.transforms = []
self._inner_seed = None
super(RandomAugmentator, self).__init__(random_seed=random_seed)
def _update_inner_seed(self):
self._inner_seed = self.rng.randint(2 ** 20, size=len(self.transforms))
def add(self, transformation, **params):
self.transforms.append(get_transformation(transformation, **params))
return self
def transform_x(self, x, n_samples=3):
x = np.asarray(x)
for _ in xrange(n_samples):
y = np.copy(x)
if self.transform_shape: y = y.reshape(self.transform_shape)
self._update_inner_seed()
for i, t in enumerate(self.transforms):
t.random_seed = self._inner_seed[i]
y = t(y)
if self.out_shape: y = y.reshape(self.out_shape)
yield y
def transform(self, X, n_samples=3):
X_new = []
for x in X:
X_new.append(x)
for y in self.transform_x(x, n_samples=n_samples):
if not self.out_shape:
y = y.reshape(x.shape)
X_new.append(y)
return np.asarray(X_new)
if __name__ == '__main__':
X, y = load_mnist(mode='train', path='../data/')
aug = RandomAugmentator(transform_shape=(28, 28), random_seed=1337)
aug.add('RandomRotate', angle=(-10., 15.))
aug.add('Dropout', p=(0.9, 1.))
aug.add('RandomGaussian', sigma=(0., 1.))
aug.add('RandomShift', x_shift=(-2, 2), y_shift=(-2, 2))
for y in aug.transform(X[:3]/255., 3):
plot_greyscale_image(y)
plt.show()
|
monsta-hd/ml-mnist
|
ml_mnist/augmentation.py
|
Python
|
mit
| 5,439
|
[
"Gaussian"
] |
144918b57cf01f82eeb52d99e4617830e722506d0c996588fcc0526b8ea96001
|
"""
Test Help links in LMS
"""
from __future__ import absolute_import
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.tests.discussion.helpers import CohortTestMixin
from common.test.acceptance.tests.lms.test_lms_instructor_dashboard import BaseInstructorDashboardTest
from common.test.acceptance.tests.studio.base_studio_test import ContainerBase
from common.test.acceptance.tests.helpers import (
assert_opened_help_link_is_correct,
url_for_help,
click_and_wait_for_window
)
from openedx.core.release import skip_unless_master
# @skip_unless_master is used throughout this file because on named release
# branches, most work happens leading up to the first release on the branch, and
# that is before the docs have been published. Tests that check readthedocs for
# the right doc page will fail during this time, and it's just a big
# distraction. Also, if we bork the docs, it's not the end of the world, and we
# can fix it easily, so this is a good tradeoff.
@skip_unless_master # See note at the top of the file.
class TestCohortHelp(ContainerBase, CohortTestMixin):
"""
Tests help links in Cohort page
"""
shard = 2
def setUp(self, is_staff=True):
super(TestCohortHelp, self).setUp(is_staff=is_staff)
self.enable_cohorting(self.course_fixture)
self.instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
self.instructor_dashboard_page.visit()
self.cohort_management = self.instructor_dashboard_page.select_cohort_management()
def verify_help_link(self, href):
"""
Verifies that help link is correct
Arguments:
href (str): Help url
"""
help_element = self.cohort_management.get_cohort_help_element()
self.assertEqual(help_element.text, "What does this mean?")
click_and_wait_for_window(self, help_element)
assert_opened_help_link_is_correct(self, href)
def test_manual_cohort_help(self):
"""
Scenario: Help in 'What does it mean?' is correct when we create cohort manually.
Given that I am at 'Cohort' tab of LMS instructor dashboard
And I check 'Enable Cohorts'
And I add cohort name it, choose Manual for Cohort Assignment Method and
No content group for Associated Content Group and save the cohort
Then you see the UI text "Learners are added to this cohort only when..."
followed by "What does this mean" link.
And I click "What does this mean" link then help link should end with
course_features/cohorts/cohort_config.html#assign-learners-to-cohorts-manually
"""
self.cohort_management.add_cohort('cohort_name')
href = url_for_help(
'course_author',
'/course_features/cohorts/cohort_config.html#assign-learners-to-cohorts-manually',
)
self.verify_help_link(href)
def test_automatic_cohort_help(self):
"""
Scenario: Help in 'What does it mean?' is correct when we create cohort automatically.
Given that I am at 'Cohort' tab of LMS instructor dashboard
And I check 'Enable Cohorts'
And I add cohort name it, choose Automatic for Cohort Assignment Method and
No content group for Associated Content Group and save the cohort
Then you see the UI text "Learners are added to this cohort automatically"
followed by "What does this mean" link.
And I click "What does this mean" link then help link should end with
course_features/cohorts/cohorts_overview.html#all-automated-assignment
"""
self.cohort_management.add_cohort('cohort_name', assignment_type='random')
href = url_for_help(
'course_author',
'/course_features/cohorts/cohorts_overview.html#all-automated-assignment',
)
self.verify_help_link(href)
@skip_unless_master # See note at the top of the file.
class InstructorDashboardHelp(BaseInstructorDashboardTest):
"""
Tests opening help from the general Help button in the instructor dashboard.
"""
shard = 2
def setUp(self):
super(InstructorDashboardHelp, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
def test_instructor_dashboard_help(self):
"""
Scenario: Help button opens staff help
Given that I am viewing the Instructor Dashboard
When I click "Help"
Then I see help about the instructor dashboard in a new tab
"""
href = url_for_help('course_author', '/CA_instructor_dash_help.html')
help_element = self.instructor_dashboard_page.get_help_element()
click_and_wait_for_window(self, help_element)
assert_opened_help_link_is_correct(self, href)
|
jolyonb/edx-platform
|
common/test/acceptance/tests/lms/test_lms_help.py
|
Python
|
agpl-3.0
| 5,071
|
[
"VisIt"
] |
0bd9af5703824477488ca0f1b35513c173f3b59c26195818edc4f50d5904c992
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.